commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a4e98c9bdf1a16e4bcf4ec952452913b9812bb9e
|
fire_risk/models/DIST/providers/iaff.py
|
fire_risk/models/DIST/providers/iaff.py
|
# A dictionary where the key is an fdid-state and the values are lognormal fits generated
# from response time gis data.
response_time_distributions = {
'37140-CA': (0.20611095226322063, -4.7357748161635111, 8.6850997083002905),
'02072-FL': (0.34949627393777505, -2.1718657657665021, 6.6793162966144539),
'06001-GA': (0.15516182334170464, -6.3713354004369327, 9.9123471434677057),
'52000-HI': (0.30700916252422011, -2.4031329143522013, 6.1229794823087547),
'CS931-IL': (0.081475834493098712, -13.457565905124984, 17.056736130299502),
'25035-MA': (0.15505110429322835, -5.1587384739296791, 8.0864571571129122),
'10302-SC': (0.34438549165942622, -2.0800726636847711, 5.6805249239718378),
'51000-VA': (0.16621261785191671, -4.5177075520748833, 7.2698576034101752),
'25009-OH': (0.25995846388218091, -3.390284958787158, 7.0896026268171362),
}
|
Add response time distributions from GIS.
|
Add response time distributions from GIS.
|
Python
|
mit
|
FireCARES/fire-risk,FireCARES/fire-risk
|
Add response time distributions from GIS.
|
# A dictionary where the key is an fdid-state and the values are lognormal fits generated
# from response time gis data.
response_time_distributions = {
'37140-CA': (0.20611095226322063, -4.7357748161635111, 8.6850997083002905),
'02072-FL': (0.34949627393777505, -2.1718657657665021, 6.6793162966144539),
'06001-GA': (0.15516182334170464, -6.3713354004369327, 9.9123471434677057),
'52000-HI': (0.30700916252422011, -2.4031329143522013, 6.1229794823087547),
'CS931-IL': (0.081475834493098712, -13.457565905124984, 17.056736130299502),
'25035-MA': (0.15505110429322835, -5.1587384739296791, 8.0864571571129122),
'10302-SC': (0.34438549165942622, -2.0800726636847711, 5.6805249239718378),
'51000-VA': (0.16621261785191671, -4.5177075520748833, 7.2698576034101752),
'25009-OH': (0.25995846388218091, -3.390284958787158, 7.0896026268171362),
}
|
<commit_before><commit_msg>Add response time distributions from GIS.<commit_after>
|
# A dictionary where the key is an fdid-state and the values are lognormal fits generated
# from response time gis data.
response_time_distributions = {
'37140-CA': (0.20611095226322063, -4.7357748161635111, 8.6850997083002905),
'02072-FL': (0.34949627393777505, -2.1718657657665021, 6.6793162966144539),
'06001-GA': (0.15516182334170464, -6.3713354004369327, 9.9123471434677057),
'52000-HI': (0.30700916252422011, -2.4031329143522013, 6.1229794823087547),
'CS931-IL': (0.081475834493098712, -13.457565905124984, 17.056736130299502),
'25035-MA': (0.15505110429322835, -5.1587384739296791, 8.0864571571129122),
'10302-SC': (0.34438549165942622, -2.0800726636847711, 5.6805249239718378),
'51000-VA': (0.16621261785191671, -4.5177075520748833, 7.2698576034101752),
'25009-OH': (0.25995846388218091, -3.390284958787158, 7.0896026268171362),
}
|
Add response time distributions from GIS.# A dictionary where the key is an fdid-state and the values are lognormal fits generated
# from response time gis data.
response_time_distributions = {
'37140-CA': (0.20611095226322063, -4.7357748161635111, 8.6850997083002905),
'02072-FL': (0.34949627393777505, -2.1718657657665021, 6.6793162966144539),
'06001-GA': (0.15516182334170464, -6.3713354004369327, 9.9123471434677057),
'52000-HI': (0.30700916252422011, -2.4031329143522013, 6.1229794823087547),
'CS931-IL': (0.081475834493098712, -13.457565905124984, 17.056736130299502),
'25035-MA': (0.15505110429322835, -5.1587384739296791, 8.0864571571129122),
'10302-SC': (0.34438549165942622, -2.0800726636847711, 5.6805249239718378),
'51000-VA': (0.16621261785191671, -4.5177075520748833, 7.2698576034101752),
'25009-OH': (0.25995846388218091, -3.390284958787158, 7.0896026268171362),
}
|
<commit_before><commit_msg>Add response time distributions from GIS.<commit_after># A dictionary where the key is an fdid-state and the values are lognormal fits generated
# from response time gis data.
response_time_distributions = {
'37140-CA': (0.20611095226322063, -4.7357748161635111, 8.6850997083002905),
'02072-FL': (0.34949627393777505, -2.1718657657665021, 6.6793162966144539),
'06001-GA': (0.15516182334170464, -6.3713354004369327, 9.9123471434677057),
'52000-HI': (0.30700916252422011, -2.4031329143522013, 6.1229794823087547),
'CS931-IL': (0.081475834493098712, -13.457565905124984, 17.056736130299502),
'25035-MA': (0.15505110429322835, -5.1587384739296791, 8.0864571571129122),
'10302-SC': (0.34438549165942622, -2.0800726636847711, 5.6805249239718378),
'51000-VA': (0.16621261785191671, -4.5177075520748833, 7.2698576034101752),
'25009-OH': (0.25995846388218091, -3.390284958787158, 7.0896026268171362),
}
|
|
e9e9999ed9b3fe73bd132fd5dcbcef3bbee1bef6
|
csunplugged/utils/convert_heading_tree_to_dict.py
|
csunplugged/utils/convert_heading_tree_to_dict.py
|
"""Module used for converting Verto heading tree data.
The Verto result object is a tuple containing NamedTuples, however this
must be converted to a dictionary to be stored in a HStoreField Postgres
database field.
"""
def convert_heading_tree_to_dict(heading_tree_tuples):
"""Convert tuple heading tree to dictionary.
Args:
heading_tree_tuples: The heading tree from a Verto conversion.
Returns:
Dictionary of heading tree, or None if None provided as tree.
"""
if heading_tree_tuples is None:
return None
else:
root_nodes = []
for heading_node in heading_tree_tuples:
root_nodes.append(convert_heading_node(heading_node))
return root_nodes
def convert_heading_node(heading_node):
"""Convert a heading node to a dictionary.
Args:
heading_node: A tuple for a heading node.
Returns:
A dictionary of data for a heading node.
"""
children = []
for child in heading_node.children:
children.append(convert_heading_node(child))
heading_node_data = {
"text": heading_node.title,
"slug": heading_node.title_slug,
"level": heading_node.level,
"children": children,
}
return heading_node_data
|
Add module for converting heading node tuples to a dictionary
|
Add module for converting heading node tuples to a dictionary
This could be stored in BaseLoader.py directly if that
is a more logical location. It also could convert output
of every Verto conversion, but currently is only called
when needed.
|
Python
|
mit
|
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
|
Add module for converting heading node tuples to a dictionary
This could be stored in BaseLoader.py directly if that
is a more logical location. It also could convert output
of every Verto conversion, but currently is only called
when needed.
|
"""Module used for converting Verto heading tree data.
The Verto result object is a tuple containing NamedTuples, however this
must be converted to a dictionary to be stored in a HStoreField Postgres
database field.
"""
def convert_heading_tree_to_dict(heading_tree_tuples):
"""Convert tuple heading tree to dictionary.
Args:
heading_tree_tuples: The heading tree from a Verto conversion.
Returns:
Dictionary of heading tree, or None if None provided as tree.
"""
if heading_tree_tuples is None:
return None
else:
root_nodes = []
for heading_node in heading_tree_tuples:
root_nodes.append(convert_heading_node(heading_node))
return root_nodes
def convert_heading_node(heading_node):
"""Convert a heading node to a dictionary.
Args:
heading_node: A tuple for a heading node.
Returns:
A dictionary of data for a heading node.
"""
children = []
for child in heading_node.children:
children.append(convert_heading_node(child))
heading_node_data = {
"text": heading_node.title,
"slug": heading_node.title_slug,
"level": heading_node.level,
"children": children,
}
return heading_node_data
|
<commit_before><commit_msg>Add module for converting heading node tuples to a dictionary
This could be stored in BaseLoader.py directly if that
is a more logical location. It also could convert output
of every Verto conversion, but currently is only called
when needed.<commit_after>
|
"""Module used for converting Verto heading tree data.
The Verto result object is a tuple containing NamedTuples, however this
must be converted to a dictionary to be stored in a HStoreField Postgres
database field.
"""
def convert_heading_tree_to_dict(heading_tree_tuples):
"""Convert tuple heading tree to dictionary.
Args:
heading_tree_tuples: The heading tree from a Verto conversion.
Returns:
Dictionary of heading tree, or None if None provided as tree.
"""
if heading_tree_tuples is None:
return None
else:
root_nodes = []
for heading_node in heading_tree_tuples:
root_nodes.append(convert_heading_node(heading_node))
return root_nodes
def convert_heading_node(heading_node):
"""Convert a heading node to a dictionary.
Args:
heading_node: A tuple for a heading node.
Returns:
A dictionary of data for a heading node.
"""
children = []
for child in heading_node.children:
children.append(convert_heading_node(child))
heading_node_data = {
"text": heading_node.title,
"slug": heading_node.title_slug,
"level": heading_node.level,
"children": children,
}
return heading_node_data
|
Add module for converting heading node tuples to a dictionary
This could be stored in BaseLoader.py directly if that
is a more logical location. It also could convert output
of every Verto conversion, but currently is only called
when needed."""Module used for converting Verto heading tree data.
The Verto result object is a tuple containing NamedTuples, however this
must be converted to a dictionary to be stored in a HStoreField Postgres
database field.
"""
def convert_heading_tree_to_dict(heading_tree_tuples):
"""Convert tuple heading tree to dictionary.
Args:
heading_tree_tuples: The heading tree from a Verto conversion.
Returns:
Dictionary of heading tree, or None if None provided as tree.
"""
if heading_tree_tuples is None:
return None
else:
root_nodes = []
for heading_node in heading_tree_tuples:
root_nodes.append(convert_heading_node(heading_node))
return root_nodes
def convert_heading_node(heading_node):
"""Convert a heading node to a dictionary.
Args:
heading_node: A tuple for a heading node.
Returns:
A dictionary of data for a heading node.
"""
children = []
for child in heading_node.children:
children.append(convert_heading_node(child))
heading_node_data = {
"text": heading_node.title,
"slug": heading_node.title_slug,
"level": heading_node.level,
"children": children,
}
return heading_node_data
|
<commit_before><commit_msg>Add module for converting heading node tuples to a dictionary
This could be stored in BaseLoader.py directly if that
is a more logical location. It also could convert output
of every Verto conversion, but currently is only called
when needed.<commit_after>"""Module used for converting Verto heading tree data.
The Verto result object is a tuple containing NamedTuples, however this
must be converted to a dictionary to be stored in a HStoreField Postgres
database field.
"""
def convert_heading_tree_to_dict(heading_tree_tuples):
"""Convert tuple heading tree to dictionary.
Args:
heading_tree_tuples: The heading tree from a Verto conversion.
Returns:
Dictionary of heading tree, or None if None provided as tree.
"""
if heading_tree_tuples is None:
return None
else:
root_nodes = []
for heading_node in heading_tree_tuples:
root_nodes.append(convert_heading_node(heading_node))
return root_nodes
def convert_heading_node(heading_node):
"""Convert a heading node to a dictionary.
Args:
heading_node: A tuple for a heading node.
Returns:
A dictionary of data for a heading node.
"""
children = []
for child in heading_node.children:
children.append(convert_heading_node(child))
heading_node_data = {
"text": heading_node.title,
"slug": heading_node.title_slug,
"level": heading_node.level,
"children": children,
}
return heading_node_data
|
|
987a660f6adc86bd2eed176c5fa11cceebbcde9f
|
devicehive/api.py
|
devicehive/api.py
|
from devicehive.transport import Request
from devicehive.transport import Response
class Api(object):
"""Api class."""
def __init__(self, transport):
self._transport = transport
def _request(self, url, action, request, **params):
req = Request(url, action, request, **params)
response = self._transport.request(req.action(), req.request(),
**req.params())
return Response(response)
def refresh_token(self, refresh_token):
url = 'token/refresh'
action = url
request = {'refreshToken': refresh_token}
params = {'method': 'POST',
'merge_data': True}
return self._request(url, action, request, **params)
|
Add Api class with refresh token request
|
Add Api class with refresh token request
|
Python
|
apache-2.0
|
devicehive/devicehive-python
|
Add Api class with refresh token request
|
from devicehive.transport import Request
from devicehive.transport import Response
class Api(object):
"""Api class."""
def __init__(self, transport):
self._transport = transport
def _request(self, url, action, request, **params):
req = Request(url, action, request, **params)
response = self._transport.request(req.action(), req.request(),
**req.params())
return Response(response)
def refresh_token(self, refresh_token):
url = 'token/refresh'
action = url
request = {'refreshToken': refresh_token}
params = {'method': 'POST',
'merge_data': True}
return self._request(url, action, request, **params)
|
<commit_before><commit_msg>Add Api class with refresh token request<commit_after>
|
from devicehive.transport import Request
from devicehive.transport import Response
class Api(object):
"""Api class."""
def __init__(self, transport):
self._transport = transport
def _request(self, url, action, request, **params):
req = Request(url, action, request, **params)
response = self._transport.request(req.action(), req.request(),
**req.params())
return Response(response)
def refresh_token(self, refresh_token):
url = 'token/refresh'
action = url
request = {'refreshToken': refresh_token}
params = {'method': 'POST',
'merge_data': True}
return self._request(url, action, request, **params)
|
Add Api class with refresh token requestfrom devicehive.transport import Request
from devicehive.transport import Response
class Api(object):
"""Api class."""
def __init__(self, transport):
self._transport = transport
def _request(self, url, action, request, **params):
req = Request(url, action, request, **params)
response = self._transport.request(req.action(), req.request(),
**req.params())
return Response(response)
def refresh_token(self, refresh_token):
url = 'token/refresh'
action = url
request = {'refreshToken': refresh_token}
params = {'method': 'POST',
'merge_data': True}
return self._request(url, action, request, **params)
|
<commit_before><commit_msg>Add Api class with refresh token request<commit_after>from devicehive.transport import Request
from devicehive.transport import Response
class Api(object):
"""Api class."""
def __init__(self, transport):
self._transport = transport
def _request(self, url, action, request, **params):
req = Request(url, action, request, **params)
response = self._transport.request(req.action(), req.request(),
**req.params())
return Response(response)
def refresh_token(self, refresh_token):
url = 'token/refresh'
action = url
request = {'refreshToken': refresh_token}
params = {'method': 'POST',
'merge_data': True}
return self._request(url, action, request, **params)
|
|
c0bd56cffd87b00d6889cfd745212d8cd53860f5
|
books/CrackingCodesWithPython/Chapter02/PracticeQuestions.py
|
books/CrackingCodesWithPython/Chapter02/PracticeQuestions.py
|
# Chapter 2 Practice Questions
# To check these questions, they should be entered in IDLE; otherwise
# print statements would be needed.
# 1. Which is the operator for division, / or \?
4 / 2
4 \ 2
# 2. Which of the following is an integer value, and which is a floating-point value?
type(42)
type(3.141592)
# 3. Which of the following lines are not expressions?
4 x 10 + 2
3 * 7 + 1
2 +
42
2 + 2
spam = 42
# 4. If you enter the following lines of code into the interactive shell, what do
# lines (1) and (2) print?
spam = 20
spam + 20 # (1)
SPAM = 30
spam # (2)
|
Initialize Chapter 2 Practice Questions
|
Initialize Chapter 2 Practice Questions
|
Python
|
mit
|
JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials
|
Initialize Chapter 2 Practice Questions
|
# Chapter 2 Practice Questions
# To check these questions, they should be entered in IDLE; otherwise
# print statements would be needed.
# 1. Which is the operator for division, / or \?
4 / 2
4 \ 2
# 2. Which of the following is an integer value, and which is a floating-point value?
type(42)
type(3.141592)
# 3. Which of the following lines are not expressions?
4 x 10 + 2
3 * 7 + 1
2 +
42
2 + 2
spam = 42
# 4. If you enter the following lines of code into the interactive shell, what do
# lines (1) and (2) print?
spam = 20
spam + 20 # (1)
SPAM = 30
spam # (2)
|
<commit_before><commit_msg>Initialize Chapter 2 Practice Questions<commit_after>
|
# Chapter 2 Practice Questions
# To check these questions, they should be entered in IDLE; otherwise
# print statements would be needed.
# 1. Which is the operator for division, / or \?
4 / 2
4 \ 2
# 2. Which of the following is an integer value, and which is a floating-point value?
type(42)
type(3.141592)
# 3. Which of the following lines are not expressions?
4 x 10 + 2
3 * 7 + 1
2 +
42
2 + 2
spam = 42
# 4. If you enter the following lines of code into the interactive shell, what do
# lines (1) and (2) print?
spam = 20
spam + 20 # (1)
SPAM = 30
spam # (2)
|
Initialize Chapter 2 Practice Questions# Chapter 2 Practice Questions
# To check these questions, they should be entered in IDLE; otherwise
# print statements would be needed.
# 1. Which is the operator for division, / or \?
4 / 2
4 \ 2
# 2. Which of the following is an integer value, and which is a floating-point value?
type(42)
type(3.141592)
# 3. Which of the following lines are not expressions?
4 x 10 + 2
3 * 7 + 1
2 +
42
2 + 2
spam = 42
# 4. If you enter the following lines of code into the interactive shell, what do
# lines (1) and (2) print?
spam = 20
spam + 20 # (1)
SPAM = 30
spam # (2)
|
<commit_before><commit_msg>Initialize Chapter 2 Practice Questions<commit_after># Chapter 2 Practice Questions
# To check these questions, they should be entered in IDLE; otherwise
# print statements would be needed.
# 1. Which is the operator for division, / or \?
4 / 2
4 \ 2
# 2. Which of the following is an integer value, and which is a floating-point value?
type(42)
type(3.141592)
# 3. Which of the following lines are not expressions?
4 x 10 + 2
3 * 7 + 1
2 +
42
2 + 2
spam = 42
# 4. If you enter the following lines of code into the interactive shell, what do
# lines (1) and (2) print?
spam = 20
spam + 20 # (1)
SPAM = 30
spam # (2)
|
|
3c94cd8e6f7429f3a512a49a86eaa089a8f472c7
|
other/wrapping-cpp/swig/c++/multiplefiles/test_shapes.py
|
other/wrapping-cpp/swig/c++/multiplefiles/test_shapes.py
|
import os
import pytest
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
os.system('make clean')
print("Completed finaliser")
request.addfinalizer(teardown)
os.system('make clean')
os.system('make all')
def test_square(setup):
import shapes
assert shapes.square(10).area() == 100
assert shapes.square(100).area() == 10000
assert shapes.square(5).area() == 25
def test_rectangle(setup):
import shapes
assert shapes.rectangle(10, 10).area() == 100
assert shapes.rectangle(100, 10).area() == 1000
assert shapes.rectangle(1, 2).area() == 2
|
import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
call_make('clean')
print("Completed finaliser")
request.addfinalizer(teardown)
call_make('clean')
call_make('all')
def test_square(setup):
import shapes
assert shapes.square(10).area() == 100
assert shapes.square(100).area() == 10000
assert shapes.square(5).area() == 25
def test_rectangle(setup):
import shapes
assert shapes.rectangle(10, 10).area() == 100
assert shapes.rectangle(100, 10).area() == 1000
assert shapes.rectangle(1, 2).area() == 2
|
Allow to be called from other directories.
|
Allow to be called from other directories.
|
Python
|
bsd-2-clause
|
ryanpepper/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python
|
import os
import pytest
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
os.system('make clean')
print("Completed finaliser")
request.addfinalizer(teardown)
os.system('make clean')
os.system('make all')
def test_square(setup):
import shapes
assert shapes.square(10).area() == 100
assert shapes.square(100).area() == 10000
assert shapes.square(5).area() == 25
def test_rectangle(setup):
import shapes
assert shapes.rectangle(10, 10).area() == 100
assert shapes.rectangle(100, 10).area() == 1000
assert shapes.rectangle(1, 2).area() == 2
Allow to be called from other directories.
|
import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
call_make('clean')
print("Completed finaliser")
request.addfinalizer(teardown)
call_make('clean')
call_make('all')
def test_square(setup):
import shapes
assert shapes.square(10).area() == 100
assert shapes.square(100).area() == 10000
assert shapes.square(5).area() == 25
def test_rectangle(setup):
import shapes
assert shapes.rectangle(10, 10).area() == 100
assert shapes.rectangle(100, 10).area() == 1000
assert shapes.rectangle(1, 2).area() == 2
|
<commit_before>import os
import pytest
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
os.system('make clean')
print("Completed finaliser")
request.addfinalizer(teardown)
os.system('make clean')
os.system('make all')
def test_square(setup):
import shapes
assert shapes.square(10).area() == 100
assert shapes.square(100).area() == 10000
assert shapes.square(5).area() == 25
def test_rectangle(setup):
import shapes
assert shapes.rectangle(10, 10).area() == 100
assert shapes.rectangle(100, 10).area() == 1000
assert shapes.rectangle(1, 2).area() == 2
<commit_msg>Allow to be called from other directories.<commit_after>
|
import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
call_make('clean')
print("Completed finaliser")
request.addfinalizer(teardown)
call_make('clean')
call_make('all')
def test_square(setup):
import shapes
assert shapes.square(10).area() == 100
assert shapes.square(100).area() == 10000
assert shapes.square(5).area() == 25
def test_rectangle(setup):
import shapes
assert shapes.rectangle(10, 10).area() == 100
assert shapes.rectangle(100, 10).area() == 1000
assert shapes.rectangle(1, 2).area() == 2
|
import os
import pytest
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
os.system('make clean')
print("Completed finaliser")
request.addfinalizer(teardown)
os.system('make clean')
os.system('make all')
def test_square(setup):
import shapes
assert shapes.square(10).area() == 100
assert shapes.square(100).area() == 10000
assert shapes.square(5).area() == 25
def test_rectangle(setup):
import shapes
assert shapes.rectangle(10, 10).area() == 100
assert shapes.rectangle(100, 10).area() == 1000
assert shapes.rectangle(1, 2).area() == 2
Allow to be called from other directories.import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
call_make('clean')
print("Completed finaliser")
request.addfinalizer(teardown)
call_make('clean')
call_make('all')
def test_square(setup):
import shapes
assert shapes.square(10).area() == 100
assert shapes.square(100).area() == 10000
assert shapes.square(5).area() == 25
def test_rectangle(setup):
import shapes
assert shapes.rectangle(10, 10).area() == 100
assert shapes.rectangle(100, 10).area() == 1000
assert shapes.rectangle(1, 2).area() == 2
|
<commit_before>import os
import pytest
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
os.system('make clean')
print("Completed finaliser")
request.addfinalizer(teardown)
os.system('make clean')
os.system('make all')
def test_square(setup):
import shapes
assert shapes.square(10).area() == 100
assert shapes.square(100).area() == 10000
assert shapes.square(5).area() == 25
def test_rectangle(setup):
import shapes
assert shapes.rectangle(10, 10).area() == 100
assert shapes.rectangle(100, 10).area() == 1000
assert shapes.rectangle(1, 2).area() == 2
<commit_msg>Allow to be called from other directories.<commit_after>import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
call_make('clean')
print("Completed finaliser")
request.addfinalizer(teardown)
call_make('clean')
call_make('all')
def test_square(setup):
import shapes
assert shapes.square(10).area() == 100
assert shapes.square(100).area() == 10000
assert shapes.square(5).area() == 25
def test_rectangle(setup):
import shapes
assert shapes.rectangle(10, 10).area() == 100
assert shapes.rectangle(100, 10).area() == 1000
assert shapes.rectangle(1, 2).area() == 2
|
8ea9b63224a33f73a49210a5cd6dd29f0eba41ce
|
app/test_programs.py
|
app/test_programs.py
|
import unittest
import multiprocessing
import time
class TestPrograms(unittest.TestCase):
def program_helper(self, func, params=dict()):
self.process = multiprocessing.Process(target=func, args=(params,))
self.process.start()
time.sleep(1)
self.process.terminate()
def test_program_ascii_text(self):
from programs.ascii_text import run
self.program_helper(run, { "text": "Hello World" })
def test_cheertree(self):
from programs.cheertree import run
self.program_helper(run)
def test_crss(self):
from programs.cross import run
self.program_helper(run)
def test_demo(self):
from programs.demo import run
self.program_helper(run)
def test_dna(self):
from programs.dna import run
self.program_helper(run)
def test_game_of_life(self):
from programs.game_of_life import run
self.program_helper(run)
def test_dna(self):
from programs.dna import run
self.program_helper(run)
def test_program_matrix(self):
from programs.matrix import run
self.program_helper(run)
def test_psychedelia(self):
from programs.psychedelia import run
self.program_helper(run)
def test_rain(self):
from programs.rain import run
self.program_helper(run)
def test_rainbow(self):
from programs.rainbow import run
self.program_helper(run)
def test_random_blinky(self):
from programs.random_blinky import run
self.program_helper(run)
def test_random_sparkles(self):
from programs.random_sparkles import run
self.program_helper(run)
def test_simple(self):
from programs.simple import run
self.program_helper(run)
def test_snow(self):
from programs.snow import run
self.program_helper(run)
def test_trig(self):
from programs.trig import run
self.program_helper(run)
if __name__ == '__main__':
unittest.main()
|
Add unit tests for programs
|
Add unit tests for programs
|
Python
|
mit
|
njbbaer/unicorn-remote,njbbaer/unicorn-remote,njbbaer/unicorn-remote
|
Add unit tests for programs
|
import unittest
import multiprocessing
import time
class TestPrograms(unittest.TestCase):
def program_helper(self, func, params=dict()):
self.process = multiprocessing.Process(target=func, args=(params,))
self.process.start()
time.sleep(1)
self.process.terminate()
def test_program_ascii_text(self):
from programs.ascii_text import run
self.program_helper(run, { "text": "Hello World" })
def test_cheertree(self):
from programs.cheertree import run
self.program_helper(run)
def test_crss(self):
from programs.cross import run
self.program_helper(run)
def test_demo(self):
from programs.demo import run
self.program_helper(run)
def test_dna(self):
from programs.dna import run
self.program_helper(run)
def test_game_of_life(self):
from programs.game_of_life import run
self.program_helper(run)
def test_dna(self):
from programs.dna import run
self.program_helper(run)
def test_program_matrix(self):
from programs.matrix import run
self.program_helper(run)
def test_psychedelia(self):
from programs.psychedelia import run
self.program_helper(run)
def test_rain(self):
from programs.rain import run
self.program_helper(run)
def test_rainbow(self):
from programs.rainbow import run
self.program_helper(run)
def test_random_blinky(self):
from programs.random_blinky import run
self.program_helper(run)
def test_random_sparkles(self):
from programs.random_sparkles import run
self.program_helper(run)
def test_simple(self):
from programs.simple import run
self.program_helper(run)
def test_snow(self):
from programs.snow import run
self.program_helper(run)
def test_trig(self):
from programs.trig import run
self.program_helper(run)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for programs<commit_after>
|
import unittest
import multiprocessing
import time
class TestPrograms(unittest.TestCase):
def program_helper(self, func, params=dict()):
self.process = multiprocessing.Process(target=func, args=(params,))
self.process.start()
time.sleep(1)
self.process.terminate()
def test_program_ascii_text(self):
from programs.ascii_text import run
self.program_helper(run, { "text": "Hello World" })
def test_cheertree(self):
from programs.cheertree import run
self.program_helper(run)
def test_crss(self):
from programs.cross import run
self.program_helper(run)
def test_demo(self):
from programs.demo import run
self.program_helper(run)
def test_dna(self):
from programs.dna import run
self.program_helper(run)
def test_game_of_life(self):
from programs.game_of_life import run
self.program_helper(run)
def test_dna(self):
from programs.dna import run
self.program_helper(run)
def test_program_matrix(self):
from programs.matrix import run
self.program_helper(run)
def test_psychedelia(self):
from programs.psychedelia import run
self.program_helper(run)
def test_rain(self):
from programs.rain import run
self.program_helper(run)
def test_rainbow(self):
from programs.rainbow import run
self.program_helper(run)
def test_random_blinky(self):
from programs.random_blinky import run
self.program_helper(run)
def test_random_sparkles(self):
from programs.random_sparkles import run
self.program_helper(run)
def test_simple(self):
from programs.simple import run
self.program_helper(run)
def test_snow(self):
from programs.snow import run
self.program_helper(run)
def test_trig(self):
from programs.trig import run
self.program_helper(run)
if __name__ == '__main__':
unittest.main()
|
Add unit tests for programsimport unittest
import multiprocessing
import time
class TestPrograms(unittest.TestCase):
def program_helper(self, func, params=dict()):
self.process = multiprocessing.Process(target=func, args=(params,))
self.process.start()
time.sleep(1)
self.process.terminate()
def test_program_ascii_text(self):
from programs.ascii_text import run
self.program_helper(run, { "text": "Hello World" })
def test_cheertree(self):
from programs.cheertree import run
self.program_helper(run)
def test_crss(self):
from programs.cross import run
self.program_helper(run)
def test_demo(self):
from programs.demo import run
self.program_helper(run)
def test_dna(self):
from programs.dna import run
self.program_helper(run)
def test_game_of_life(self):
from programs.game_of_life import run
self.program_helper(run)
def test_dna(self):
from programs.dna import run
self.program_helper(run)
def test_program_matrix(self):
from programs.matrix import run
self.program_helper(run)
def test_psychedelia(self):
from programs.psychedelia import run
self.program_helper(run)
def test_rain(self):
from programs.rain import run
self.program_helper(run)
def test_rainbow(self):
from programs.rainbow import run
self.program_helper(run)
def test_random_blinky(self):
from programs.random_blinky import run
self.program_helper(run)
def test_random_sparkles(self):
from programs.random_sparkles import run
self.program_helper(run)
def test_simple(self):
from programs.simple import run
self.program_helper(run)
def test_snow(self):
from programs.snow import run
self.program_helper(run)
def test_trig(self):
from programs.trig import run
self.program_helper(run)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for programs<commit_after>import unittest
import multiprocessing
import time
class TestPrograms(unittest.TestCase):
def program_helper(self, func, params=dict()):
self.process = multiprocessing.Process(target=func, args=(params,))
self.process.start()
time.sleep(1)
self.process.terminate()
def test_program_ascii_text(self):
from programs.ascii_text import run
self.program_helper(run, { "text": "Hello World" })
def test_cheertree(self):
from programs.cheertree import run
self.program_helper(run)
def test_crss(self):
from programs.cross import run
self.program_helper(run)
def test_demo(self):
from programs.demo import run
self.program_helper(run)
def test_dna(self):
from programs.dna import run
self.program_helper(run)
def test_game_of_life(self):
from programs.game_of_life import run
self.program_helper(run)
def test_dna(self):
from programs.dna import run
self.program_helper(run)
def test_program_matrix(self):
from programs.matrix import run
self.program_helper(run)
def test_psychedelia(self):
from programs.psychedelia import run
self.program_helper(run)
def test_rain(self):
from programs.rain import run
self.program_helper(run)
def test_rainbow(self):
from programs.rainbow import run
self.program_helper(run)
def test_random_blinky(self):
from programs.random_blinky import run
self.program_helper(run)
def test_random_sparkles(self):
from programs.random_sparkles import run
self.program_helper(run)
def test_simple(self):
from programs.simple import run
self.program_helper(run)
def test_snow(self):
from programs.snow import run
self.program_helper(run)
def test_trig(self):
from programs.trig import run
self.program_helper(run)
if __name__ == '__main__':
unittest.main()
|
|
1cb01db57fe761b37852650f8acfd7a7707b721f
|
create_recipes.py
|
create_recipes.py
|
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages for which" +
" recipies will be created")
args = parser.parse_args()
package_names = [package.strip() for package in
open(args.package_list, 'r').readlines()]
log_dir = "./logs/"
recipes_dir = "./recipes/"
recipe_log_file = open(log_dir + 'recipe_log', 'w')
successes = []
failures = []
for package in package_names:
msg = "Creating Conda recipe for %s\n" % (package)
recipe_log_file.write(msg)
print(msg)
err = subprocess.call(['conda', 'skeleton', 'pypi', package,
'--output-dir', recipes_dir],
stdout=recipe_log_file, stderr=recipe_log_file)
if err is 0:
successes.append(package)
else:
failures.append(package)
recipe_log_file.close()
successful_recipes_file = open(log_dir + 'successful_recipes', 'w')
failed_recipes_file = open(log_dir + 'failed_recipes', 'w')
successful_recipes_file.write('\n'.join(successes))
failed_recipes_file.write('\n'.join(failures))
successful_recipes_file.close()
failed_recipes_file.close()
|
Write Python Script for recipe creation
|
Write Python Script for recipe creation
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com>
|
Python
|
bsd-3-clause
|
ContinuumIO/pypi-conda-builds
|
Write Python Script for recipe creation
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com>
|
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages for which" +
" recipies will be created")
args = parser.parse_args()
package_names = [package.strip() for package in
open(args.package_list, 'r').readlines()]
log_dir = "./logs/"
recipes_dir = "./recipes/"
recipe_log_file = open(log_dir + 'recipe_log', 'w')
successes = []
failures = []
for package in package_names:
msg = "Creating Conda recipe for %s\n" % (package)
recipe_log_file.write(msg)
print(msg)
err = subprocess.call(['conda', 'skeleton', 'pypi', package,
'--output-dir', recipes_dir],
stdout=recipe_log_file, stderr=recipe_log_file)
if err is 0:
successes.append(package)
else:
failures.append(package)
recipe_log_file.close()
successful_recipes_file = open(log_dir + 'successful_recipes', 'w')
failed_recipes_file = open(log_dir + 'failed_recipes', 'w')
successful_recipes_file.write('\n'.join(successes))
failed_recipes_file.write('\n'.join(failures))
successful_recipes_file.close()
failed_recipes_file.close()
|
<commit_before><commit_msg>Write Python Script for recipe creation
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com><commit_after>
|
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages for which" +
" recipies will be created")
args = parser.parse_args()
package_names = [package.strip() for package in
open(args.package_list, 'r').readlines()]
log_dir = "./logs/"
recipes_dir = "./recipes/"
recipe_log_file = open(log_dir + 'recipe_log', 'w')
successes = []
failures = []
for package in package_names:
msg = "Creating Conda recipe for %s\n" % (package)
recipe_log_file.write(msg)
print(msg)
err = subprocess.call(['conda', 'skeleton', 'pypi', package,
'--output-dir', recipes_dir],
stdout=recipe_log_file, stderr=recipe_log_file)
if err is 0:
successes.append(package)
else:
failures.append(package)
recipe_log_file.close()
successful_recipes_file = open(log_dir + 'successful_recipes', 'w')
failed_recipes_file = open(log_dir + 'failed_recipes', 'w')
successful_recipes_file.write('\n'.join(successes))
failed_recipes_file.write('\n'.join(failures))
successful_recipes_file.close()
failed_recipes_file.close()
|
Write Python Script for recipe creation
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com>import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages for which" +
" recipies will be created")
args = parser.parse_args()
package_names = [package.strip() for package in
open(args.package_list, 'r').readlines()]
log_dir = "./logs/"
recipes_dir = "./recipes/"
recipe_log_file = open(log_dir + 'recipe_log', 'w')
successes = []
failures = []
for package in package_names:
msg = "Creating Conda recipe for %s\n" % (package)
recipe_log_file.write(msg)
print(msg)
err = subprocess.call(['conda', 'skeleton', 'pypi', package,
'--output-dir', recipes_dir],
stdout=recipe_log_file, stderr=recipe_log_file)
if err is 0:
successes.append(package)
else:
failures.append(package)
recipe_log_file.close()
successful_recipes_file = open(log_dir + 'successful_recipes', 'w')
failed_recipes_file = open(log_dir + 'failed_recipes', 'w')
successful_recipes_file.write('\n'.join(successes))
failed_recipes_file.write('\n'.join(failures))
successful_recipes_file.close()
failed_recipes_file.close()
|
<commit_before><commit_msg>Write Python Script for recipe creation
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com><commit_after>import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages for which" +
" recipies will be created")
args = parser.parse_args()
package_names = [package.strip() for package in
open(args.package_list, 'r').readlines()]
log_dir = "./logs/"
recipes_dir = "./recipes/"
recipe_log_file = open(log_dir + 'recipe_log', 'w')
successes = []
failures = []
for package in package_names:
msg = "Creating Conda recipe for %s\n" % (package)
recipe_log_file.write(msg)
print(msg)
err = subprocess.call(['conda', 'skeleton', 'pypi', package,
'--output-dir', recipes_dir],
stdout=recipe_log_file, stderr=recipe_log_file)
if err is 0:
successes.append(package)
else:
failures.append(package)
recipe_log_file.close()
successful_recipes_file = open(log_dir + 'successful_recipes', 'w')
failed_recipes_file = open(log_dir + 'failed_recipes', 'w')
successful_recipes_file.write('\n'.join(successes))
failed_recipes_file.write('\n'.join(failures))
successful_recipes_file.close()
failed_recipes_file.close()
|
|
9004162c264173beddb8308a2533fa08c8ca24ca
|
pkgs/applications/networking/browsers/chromium/get-commit-message.py
|
pkgs/applications/networking/browsers/chromium/get-commit-message.py
|
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p python3Packages.feedparser python3Packages.requests
# This script prints the Git commit message for stable channel updates.
import re
import textwrap
import feedparser
import requests
feed = feedparser.parse('https://chromereleases.googleblog.com/feeds/posts/default')
html_tags = re.compile(r'<[^>]+>')
for entry in feed.entries:
if entry.title != 'Stable Channel Update for Desktop':
continue
url = requests.get(entry.link).url.split('?')[0]
content = entry.content[0].value
if re.search(r'Linux', content) is None:
continue
#print(url) # For debugging purposes
version = re.search(r'\d+(\.\d+){3}', content).group(0)
fixes = re.search(r'This update includes .+ security fixes\.', content).group(0)
fixes = html_tags.sub('', fixes)
zero_days = re.search(r'Google is aware of reports that .+ in the wild\.', content)
if zero_days:
fixes += " " + zero_days.group(0)
cve_list = re.findall(r'CVE-[^: ]+', content)
cve_string = ' '.join(cve_list)
print('chromium: TODO -> ' + version + '\n')
print(url + '\n')
print('\n'.join(textwrap.wrap(fixes, width=72)) + '\n')
print("CVEs:\n" + '\n'.join(textwrap.wrap(cve_string, width=72)))
break # We only care about the most recent stable channel update
|
Add my script to generate the commit messages
|
chromium: Add my script to generate the commit messages
Might not be that readable and reliable but it should get the job done.
|
Python
|
mit
|
NixOS/nixpkgs,NixOS/nixpkgs,NixOS/nixpkgs,NixOS/nixpkgs,NixOS/nixpkgs,NixOS/nixpkgs,NixOS/nixpkgs,NixOS/nixpkgs,NixOS/nixpkgs,NixOS/nixpkgs,NixOS/nixpkgs,NixOS/nixpkgs,NixOS/nixpkgs,NixOS/nixpkgs,NixOS/nixpkgs
|
chromium: Add my script to generate the commit messages
Might not be that readable and reliable but it should get the job done.
|
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p python3Packages.feedparser python3Packages.requests
# This script prints the Git commit message for stable channel updates.
import re
import textwrap
import feedparser
import requests
feed = feedparser.parse('https://chromereleases.googleblog.com/feeds/posts/default')
html_tags = re.compile(r'<[^>]+>')
for entry in feed.entries:
if entry.title != 'Stable Channel Update for Desktop':
continue
url = requests.get(entry.link).url.split('?')[0]
content = entry.content[0].value
if re.search(r'Linux', content) is None:
continue
#print(url) # For debugging purposes
version = re.search(r'\d+(\.\d+){3}', content).group(0)
fixes = re.search(r'This update includes .+ security fixes\.', content).group(0)
fixes = html_tags.sub('', fixes)
zero_days = re.search(r'Google is aware of reports that .+ in the wild\.', content)
if zero_days:
fixes += " " + zero_days.group(0)
cve_list = re.findall(r'CVE-[^: ]+', content)
cve_string = ' '.join(cve_list)
print('chromium: TODO -> ' + version + '\n')
print(url + '\n')
print('\n'.join(textwrap.wrap(fixes, width=72)) + '\n')
print("CVEs:\n" + '\n'.join(textwrap.wrap(cve_string, width=72)))
break # We only care about the most recent stable channel update
|
<commit_before><commit_msg>chromium: Add my script to generate the commit messages
Might not be that readable and reliable but it should get the job done.<commit_after>
|
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p python3Packages.feedparser python3Packages.requests
# This script prints the Git commit message for stable channel updates.
import re
import textwrap
import feedparser
import requests
feed = feedparser.parse('https://chromereleases.googleblog.com/feeds/posts/default')
html_tags = re.compile(r'<[^>]+>')
for entry in feed.entries:
if entry.title != 'Stable Channel Update for Desktop':
continue
url = requests.get(entry.link).url.split('?')[0]
content = entry.content[0].value
if re.search(r'Linux', content) is None:
continue
#print(url) # For debugging purposes
version = re.search(r'\d+(\.\d+){3}', content).group(0)
fixes = re.search(r'This update includes .+ security fixes\.', content).group(0)
fixes = html_tags.sub('', fixes)
zero_days = re.search(r'Google is aware of reports that .+ in the wild\.', content)
if zero_days:
fixes += " " + zero_days.group(0)
cve_list = re.findall(r'CVE-[^: ]+', content)
cve_string = ' '.join(cve_list)
print('chromium: TODO -> ' + version + '\n')
print(url + '\n')
print('\n'.join(textwrap.wrap(fixes, width=72)) + '\n')
print("CVEs:\n" + '\n'.join(textwrap.wrap(cve_string, width=72)))
break # We only care about the most recent stable channel update
|
chromium: Add my script to generate the commit messages
Might not be that readable and reliable but it should get the job done.#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p python3Packages.feedparser python3Packages.requests
# This script prints the Git commit message for stable channel updates.
import re
import textwrap
import feedparser
import requests
feed = feedparser.parse('https://chromereleases.googleblog.com/feeds/posts/default')
html_tags = re.compile(r'<[^>]+>')
for entry in feed.entries:
if entry.title != 'Stable Channel Update for Desktop':
continue
url = requests.get(entry.link).url.split('?')[0]
content = entry.content[0].value
if re.search(r'Linux', content) is None:
continue
#print(url) # For debugging purposes
version = re.search(r'\d+(\.\d+){3}', content).group(0)
fixes = re.search(r'This update includes .+ security fixes\.', content).group(0)
fixes = html_tags.sub('', fixes)
zero_days = re.search(r'Google is aware of reports that .+ in the wild\.', content)
if zero_days:
fixes += " " + zero_days.group(0)
cve_list = re.findall(r'CVE-[^: ]+', content)
cve_string = ' '.join(cve_list)
print('chromium: TODO -> ' + version + '\n')
print(url + '\n')
print('\n'.join(textwrap.wrap(fixes, width=72)) + '\n')
print("CVEs:\n" + '\n'.join(textwrap.wrap(cve_string, width=72)))
break # We only care about the most recent stable channel update
|
<commit_before><commit_msg>chromium: Add my script to generate the commit messages
Might not be that readable and reliable but it should get the job done.<commit_after>#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p python3Packages.feedparser python3Packages.requests
# This script prints the Git commit message for stable channel updates.
import re
import textwrap
import feedparser
import requests
feed = feedparser.parse('https://chromereleases.googleblog.com/feeds/posts/default')
html_tags = re.compile(r'<[^>]+>')
for entry in feed.entries:
if entry.title != 'Stable Channel Update for Desktop':
continue
url = requests.get(entry.link).url.split('?')[0]
content = entry.content[0].value
if re.search(r'Linux', content) is None:
continue
#print(url) # For debugging purposes
version = re.search(r'\d+(\.\d+){3}', content).group(0)
fixes = re.search(r'This update includes .+ security fixes\.', content).group(0)
fixes = html_tags.sub('', fixes)
zero_days = re.search(r'Google is aware of reports that .+ in the wild\.', content)
if zero_days:
fixes += " " + zero_days.group(0)
cve_list = re.findall(r'CVE-[^: ]+', content)
cve_string = ' '.join(cve_list)
print('chromium: TODO -> ' + version + '\n')
print(url + '\n')
print('\n'.join(textwrap.wrap(fixes, width=72)) + '\n')
print("CVEs:\n" + '\n'.join(textwrap.wrap(cve_string, width=72)))
break # We only care about the most recent stable channel update
|
|
c750c371ce3315d7fa556c6143a1ea5ed3e5e488
|
examples/check_remote_upload.py
|
examples/check_remote_upload.py
|
from __future__ import print_function
from openload import OpenLoad
username = 'FTP Username/API Login'
key = 'FTP Password/API Key'
openload = OpenLoad(username, key)
resp = openload.remote_upload_status()
print(resp)
|
Add check remote upload example
|
Add check remote upload example
|
Python
|
mit
|
mohan3d/PyOpenload
|
Add check remote upload example
|
from __future__ import print_function
from openload import OpenLoad
username = 'FTP Username/API Login'
key = 'FTP Password/API Key'
openload = OpenLoad(username, key)
resp = openload.remote_upload_status()
print(resp)
|
<commit_before><commit_msg>Add check remote upload example<commit_after>
|
from __future__ import print_function
from openload import OpenLoad
username = 'FTP Username/API Login'
key = 'FTP Password/API Key'
openload = OpenLoad(username, key)
resp = openload.remote_upload_status()
print(resp)
|
Add check remote upload examplefrom __future__ import print_function
from openload import OpenLoad
username = 'FTP Username/API Login'
key = 'FTP Password/API Key'
openload = OpenLoad(username, key)
resp = openload.remote_upload_status()
print(resp)
|
<commit_before><commit_msg>Add check remote upload example<commit_after>from __future__ import print_function
from openload import OpenLoad
username = 'FTP Username/API Login'
key = 'FTP Password/API Key'
openload = OpenLoad(username, key)
resp = openload.remote_upload_status()
print(resp)
|
|
e3b49fe1ecbce68154de8f286507b99848d251d7
|
external_tools/src/main/python/images/move_corrupt_images.py
|
external_tools/src/main/python/images/move_corrupt_images.py
|
"""
Script to move corrupt images to 'dirty' directory
Reads list of images to move. Does not verify that images are corrupt -
Simply moves to 'dirty' directory of appropriate data-release creating
the required directory structure
"""
import os
import argparse
parser = argparse.ArgumentParser(
description="Move corrupt images to 'dirty' dir")
parser.add_argument('-i', dest='inputFiles', required=True,
help='File containing list of images to move'
)
parser.add_argument('-s', dest='splitString',
help='token to separate the basedir from input files'
)
parser.add_argument('-r', dest='replacementString',
help='String to replace the split string with'
)
parser.add_argument('-d', dest='destDirBase', required=True,
help='Path to the base of the destination dir'
)
args = parser.parse_args()
input_files = args.inputFiles
split_string = "" if args.splitString is None else args.splitString
replacement_string = "" if args.replacementString is None else args.replacementString
with open(input_files,'rt') as f:
fnames = [fname.strip('\n') for fname in f.readlines()]
for fname in fnames:
fname = fname.replace(' ','\ ')
fname2 = fname.replace(split_string, replacement_string)
if os.path.exists(fname2):
continue
out_dir = os.path.dirname(fname2)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
command = "mv " + fname + " " + fname2
print(command)
os.system(command)
|
Add script to move corrupt images
|
Add script to move corrupt images
|
Python
|
apache-2.0
|
mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData
|
Add script to move corrupt images
|
"""
Script to move corrupt images to 'dirty' directory
Reads list of images to move. Does not verify that images are corrupt -
Simply moves to 'dirty' directory of appropriate data-release creating
the required directory structure
"""
import os
import argparse
parser = argparse.ArgumentParser(
description="Move corrupt images to 'dirty' dir")
parser.add_argument('-i', dest='inputFiles', required=True,
help='File containing list of images to move'
)
parser.add_argument('-s', dest='splitString',
help='token to separate the basedir from input files'
)
parser.add_argument('-r', dest='replacementString',
help='String to replace the split string with'
)
parser.add_argument('-d', dest='destDirBase', required=True,
help='Path to the base of the destination dir'
)
args = parser.parse_args()
input_files = args.inputFiles
split_string = "" if args.splitString is None else args.splitString
replacement_string = "" if args.replacementString is None else args.replacementString
with open(input_files,'rt') as f:
fnames = [fname.strip('\n') for fname in f.readlines()]
for fname in fnames:
fname = fname.replace(' ','\ ')
fname2 = fname.replace(split_string, replacement_string)
if os.path.exists(fname2):
continue
out_dir = os.path.dirname(fname2)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
command = "mv " + fname + " " + fname2
print(command)
os.system(command)
|
<commit_before><commit_msg>Add script to move corrupt images<commit_after>
|
"""
Script to move corrupt images to 'dirty' directory
Reads list of images to move. Does not verify that images are corrupt -
Simply moves to 'dirty' directory of appropriate data-release creating
the required directory structure
"""
import os
import argparse
parser = argparse.ArgumentParser(
description="Move corrupt images to 'dirty' dir")
parser.add_argument('-i', dest='inputFiles', required=True,
help='File containing list of images to move'
)
parser.add_argument('-s', dest='splitString',
help='token to separate the basedir from input files'
)
parser.add_argument('-r', dest='replacementString',
help='String to replace the split string with'
)
parser.add_argument('-d', dest='destDirBase', required=True,
help='Path to the base of the destination dir'
)
args = parser.parse_args()
input_files = args.inputFiles
split_string = "" if args.splitString is None else args.splitString
replacement_string = "" if args.replacementString is None else args.replacementString
with open(input_files,'rt') as f:
fnames = [fname.strip('\n') for fname in f.readlines()]
for fname in fnames:
fname = fname.replace(' ','\ ')
fname2 = fname.replace(split_string, replacement_string)
if os.path.exists(fname2):
continue
out_dir = os.path.dirname(fname2)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
command = "mv " + fname + " " + fname2
print(command)
os.system(command)
|
Add script to move corrupt images"""
Script to move corrupt images to 'dirty' directory
Reads list of images to move. Does not verify that images are corrupt -
Simply moves to 'dirty' directory of appropriate data-release creating
the required directory structure
"""
import os
import argparse
parser = argparse.ArgumentParser(
description="Move corrupt images to 'dirty' dir")
parser.add_argument('-i', dest='inputFiles', required=True,
help='File containing list of images to move'
)
parser.add_argument('-s', dest='splitString',
help='token to separate the basedir from input files'
)
parser.add_argument('-r', dest='replacementString',
help='String to replace the split string with'
)
parser.add_argument('-d', dest='destDirBase', required=True,
help='Path to the base of the destination dir'
)
args = parser.parse_args()
input_files = args.inputFiles
split_string = "" if args.splitString is None else args.splitString
replacement_string = "" if args.replacementString is None else args.replacementString
with open(input_files,'rt') as f:
fnames = [fname.strip('\n') for fname in f.readlines()]
for fname in fnames:
fname = fname.replace(' ','\ ')
fname2 = fname.replace(split_string, replacement_string)
if os.path.exists(fname2):
continue
out_dir = os.path.dirname(fname2)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
command = "mv " + fname + " " + fname2
print(command)
os.system(command)
|
<commit_before><commit_msg>Add script to move corrupt images<commit_after>"""
Script to move corrupt images to 'dirty' directory
Reads list of images to move. Does not verify that images are corrupt -
Simply moves to 'dirty' directory of appropriate data-release creating
the required directory structure
"""
import os
import argparse
parser = argparse.ArgumentParser(
description="Move corrupt images to 'dirty' dir")
parser.add_argument('-i', dest='inputFiles', required=True,
help='File containing list of images to move'
)
parser.add_argument('-s', dest='splitString',
help='token to separate the basedir from input files'
)
parser.add_argument('-r', dest='replacementString',
help='String to replace the split string with'
)
parser.add_argument('-d', dest='destDirBase', required=True,
help='Path to the base of the destination dir'
)
args = parser.parse_args()
input_files = args.inputFiles
split_string = "" if args.splitString is None else args.splitString
replacement_string = "" if args.replacementString is None else args.replacementString
with open(input_files,'rt') as f:
fnames = [fname.strip('\n') for fname in f.readlines()]
for fname in fnames:
fname = fname.replace(' ','\ ')
fname2 = fname.replace(split_string, replacement_string)
if os.path.exists(fname2):
continue
out_dir = os.path.dirname(fname2)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
command = "mv " + fname + " " + fname2
print(command)
os.system(command)
|
|
29f3f84fbd90e5d43ba5d6c621d19021a45d184d
|
examples/hwapi/hwconfig_dragonboard410c.py
|
examples/hwapi/hwconfig_dragonboard410c.py
|
from machine import Pin
# 96Boards/Qualcomm DragonBoard 410c
# By default, on-board LEDs are controlled by kernel LED driver.
# To make corresponding pins be available as normal GPIO,
# corresponding driver needs to be unbound first (as root):
# echo -n "soc:leds" >/sys/class/leds/apq8016-sbc:green:user1/device/driver/unbind
# Note that application also either should be run as root, or
# /sys/class/gpio ownership needs to be changed.
# User LED 1 on gpio21
LED = Pin(21, Pin.OUT)
|
Add hwconfig for DragonBoard 410c.
|
examples/hwapi: Add hwconfig for DragonBoard 410c.
This requires recently added implementation of machine.Pin from
micropython-lib.
|
Python
|
mit
|
pfalcon/micropython,adafruit/micropython,pozetroninc/micropython,mhoffma/micropython,MrSurly/micropython,infinnovation/micropython,pramasoul/micropython,oopy/micropython,infinnovation/micropython,matthewelse/micropython,MrSurly/micropython,torwag/micropython,Timmenem/micropython,lowRISC/micropython,lowRISC/micropython,HenrikSolver/micropython,lowRISC/micropython,bvernoux/micropython,alex-robbins/micropython,blazewicz/micropython,dmazzella/micropython,oopy/micropython,pfalcon/micropython,pozetroninc/micropython,deshipu/micropython,bvernoux/micropython,ryannathans/micropython,blazewicz/micropython,swegener/micropython,blazewicz/micropython,trezor/micropython,toolmacher/micropython,kerneltask/micropython,MrSurly/micropython-esp32,matthewelse/micropython,tuc-osg/micropython,micropython/micropython-esp32,hiway/micropython,matthewelse/micropython,AriZuu/micropython,matthewelse/micropython,Peetz0r/micropython-esp32,henriknelson/micropython,toolmacher/micropython,henriknelson/micropython,tralamazza/micropython,TDAbboud/micropython,cwyark/micropython,torwag/micropython,toolmacher/micropython,chrisdearman/micropython,hiway/micropython,kerneltask/micropython,AriZuu/micropython,PappaPeppar/micropython,ryannathans/micropython,adafruit/micropython,MrSurly/micropython-esp32,swegener/micropython,henriknelson/micropython,adafruit/micropython,torwag/micropython,toolmacher/micropython,kerneltask/micropython,tobbad/micropython,AriZuu/micropython,TDAbboud/micropython,trezor/micropython,adafruit/circuitpython,AriZuu/micropython,selste/micropython,adafruit/circuitpython,mhoffma/micropython,MrSurly/micropython,Peetz0r/micropython-esp32,deshipu/micropython,dmazzella/micropython,blazewicz/micropython,pfalcon/micropython,tuc-osg/micropython,bvernoux/micropython,Timmenem/micropython,AriZuu/micropython,MrSurly/micropython,deshipu/micropython,henriknelson/micropython,mhoffma/micropython,micropython/micropython-esp32,chrisdearman/micropython,pramasoul/micropython,Peetz0r/micropython-esp32,Timmenem/micropython,tuc-osg/micropython,hiway/micropython,PappaPeppar/micropython,adafruit/circuitpython,lowRISC/micropython,trezor/micropython,pramasoul/micropython,ryannathans/micropython,tralamazza/micropython,selste/micropython,SHA2017-badge/micropython-esp32,infinnovation/micropython,HenrikSolver/micropython,HenrikSolver/micropython,kerneltask/micropython,micropython/micropython-esp32,alex-robbins/micropython,cwyark/micropython,trezor/micropython,torwag/micropython,matthewelse/micropython,Peetz0r/micropython-esp32,PappaPeppar/micropython,cwyark/micropython,deshipu/micropython,alex-robbins/micropython,dmazzella/micropython,ryannathans/micropython,PappaPeppar/micropython,MrSurly/micropython,puuu/micropython,pozetroninc/micropython,TDAbboud/micropython,pozetroninc/micropython,jmarcelino/pycom-micropython,micropython/micropython-esp32,adafruit/circuitpython,tralamazza/micropython,oopy/micropython,trezor/micropython,pozetroninc/micropython,MrSurly/micropython-esp32,dmazzella/micropython,mhoffma/micropython,mhoffma/micropython,selste/micropython,oopy/micropython,TDAbboud/micropython,swegener/micropython,tobbad/micropython,micropython/micropython-esp32,alex-robbins/micropython,adafruit/micropython,tobbad/micropython,puuu/micropython,Timmenem/micropython,bvernoux/micropython,puuu/micropython,tuc-osg/micropython,matthewelse/micropython,pramasoul/micropython,HenrikSolver/micropython,puuu/micropython,SHA2017-badge/micropython-esp32,infinnovation/micropython,adafruit/circuitpython,jmarcelino/pycom-micropython,TDAbboud/micropython,SHA2017-badge/micropython-esp32,swegener/micropython,tobbad/micropython,selste/micropython,toolmacher/micropython,tuc-osg/micropython,MrSurly/micropython-esp32,MrSurly/micropython-esp32,chrisdearman/micropython,cwyark/micropython,jmarcelino/pycom-micropython,chrisdearman/micropython,PappaPeppar/micropython,Peetz0r/micropython-esp32,SHA2017-badge/micropython-esp32,hiway/micropython,Timmenem/micropython,tobbad/micropython,bvernoux/micropython,SHA2017-badge/micropython-esp32,infinnovation/micropython,HenrikSolver/micropython,adafruit/circuitpython,jmarcelino/pycom-micropython,pfalcon/micropython,cwyark/micropython,selste/micropython,kerneltask/micropython,deshipu/micropython,ryannathans/micropython,pfalcon/micropython,jmarcelino/pycom-micropython,alex-robbins/micropython,pramasoul/micropython,tralamazza/micropython,torwag/micropython,oopy/micropython,chrisdearman/micropython,adafruit/micropython,swegener/micropython,puuu/micropython,henriknelson/micropython,lowRISC/micropython,hiway/micropython,blazewicz/micropython
|
examples/hwapi: Add hwconfig for DragonBoard 410c.
This requires recently added implementation of machine.Pin from
micropython-lib.
|
from machine import Pin
# 96Boards/Qualcomm DragonBoard 410c
# By default, on-board LEDs are controlled by kernel LED driver.
# To make corresponding pins be available as normal GPIO,
# corresponding driver needs to be unbound first (as root):
# echo -n "soc:leds" >/sys/class/leds/apq8016-sbc:green:user1/device/driver/unbind
# Note that application also either should be run as root, or
# /sys/class/gpio ownership needs to be changed.
# User LED 1 on gpio21
LED = Pin(21, Pin.OUT)
|
<commit_before><commit_msg>examples/hwapi: Add hwconfig for DragonBoard 410c.
This requires recently added implementation of machine.Pin from
micropython-lib.<commit_after>
|
from machine import Pin
# 96Boards/Qualcomm DragonBoard 410c
# By default, on-board LEDs are controlled by kernel LED driver.
# To make corresponding pins be available as normal GPIO,
# corresponding driver needs to be unbound first (as root):
# echo -n "soc:leds" >/sys/class/leds/apq8016-sbc:green:user1/device/driver/unbind
# Note that application also either should be run as root, or
# /sys/class/gpio ownership needs to be changed.
# User LED 1 on gpio21
LED = Pin(21, Pin.OUT)
|
examples/hwapi: Add hwconfig for DragonBoard 410c.
This requires recently added implementation of machine.Pin from
micropython-lib.from machine import Pin
# 96Boards/Qualcomm DragonBoard 410c
# By default, on-board LEDs are controlled by kernel LED driver.
# To make corresponding pins be available as normal GPIO,
# corresponding driver needs to be unbound first (as root):
# echo -n "soc:leds" >/sys/class/leds/apq8016-sbc:green:user1/device/driver/unbind
# Note that application also either should be run as root, or
# /sys/class/gpio ownership needs to be changed.
# User LED 1 on gpio21
LED = Pin(21, Pin.OUT)
|
<commit_before><commit_msg>examples/hwapi: Add hwconfig for DragonBoard 410c.
This requires recently added implementation of machine.Pin from
micropython-lib.<commit_after>from machine import Pin
# 96Boards/Qualcomm DragonBoard 410c
# By default, on-board LEDs are controlled by kernel LED driver.
# To make corresponding pins be available as normal GPIO,
# corresponding driver needs to be unbound first (as root):
# echo -n "soc:leds" >/sys/class/leds/apq8016-sbc:green:user1/device/driver/unbind
# Note that application also either should be run as root, or
# /sys/class/gpio ownership needs to be changed.
# User LED 1 on gpio21
LED = Pin(21, Pin.OUT)
|
|
e5c35523744bb10c8207200784f7f4e66ed5ea0c
|
openstackclient/common/parseractions.py
|
openstackclient/common/parseractions.py
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""argparse Custom Actions"""
import argparse
class KeyValueAction(argparse.Action):
"""A custom action to parse arguments as key=value pairs.
Ensures that dest is a dict
"""
def __call__(self, parser, namespace, values, option_string=None):
# Make sure we have an empty dict rather than None
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, {})
# Add value if an assignment else remove it
if '=' in values:
getattr(namespace, self.dest, {}).update([values.split('=', 1)])
else:
getattr(namespace, self.dest, {}).pop(values, None)
|
Tweak volume commands and add k=v argparse action
|
Tweak volume commands and add k=v argparse action
Basic cleanups:
* change metadata to property
* add new KeyValueAction to parse the property options
* multiple properties can be set using multiple --property args
* consistent formatting
* do lookups for volume args
Change-Id: Ib6c43f01ad46b395aee8c61e886f42e2a5f5573e
|
Python
|
apache-2.0
|
dtroyer/osc-lib
|
Tweak volume commands and add k=v argparse action
Basic cleanups:
* change metadata to property
* add new KeyValueAction to parse the property options
* multiple properties can be set using multiple --property args
* consistent formatting
* do lookups for volume args
Change-Id: Ib6c43f01ad46b395aee8c61e886f42e2a5f5573e
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""argparse Custom Actions"""
import argparse
class KeyValueAction(argparse.Action):
"""A custom action to parse arguments as key=value pairs.
Ensures that dest is a dict
"""
def __call__(self, parser, namespace, values, option_string=None):
# Make sure we have an empty dict rather than None
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, {})
# Add value if an assignment else remove it
if '=' in values:
getattr(namespace, self.dest, {}).update([values.split('=', 1)])
else:
getattr(namespace, self.dest, {}).pop(values, None)
|
<commit_before><commit_msg>Tweak volume commands and add k=v argparse action
Basic cleanups:
* change metadata to property
* add new KeyValueAction to parse the property options
* multiple properties can be set using multiple --property args
* consistent formatting
* do lookups for volume args
Change-Id: Ib6c43f01ad46b395aee8c61e886f42e2a5f5573e<commit_after>
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""argparse Custom Actions"""
import argparse
class KeyValueAction(argparse.Action):
"""A custom action to parse arguments as key=value pairs.
Ensures that dest is a dict
"""
def __call__(self, parser, namespace, values, option_string=None):
# Make sure we have an empty dict rather than None
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, {})
# Add value if an assignment else remove it
if '=' in values:
getattr(namespace, self.dest, {}).update([values.split('=', 1)])
else:
getattr(namespace, self.dest, {}).pop(values, None)
|
Tweak volume commands and add k=v argparse action
Basic cleanups:
* change metadata to property
* add new KeyValueAction to parse the property options
* multiple properties can be set using multiple --property args
* consistent formatting
* do lookups for volume args
Change-Id: Ib6c43f01ad46b395aee8c61e886f42e2a5f5573e# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""argparse Custom Actions"""
import argparse
class KeyValueAction(argparse.Action):
"""A custom action to parse arguments as key=value pairs.
Ensures that dest is a dict
"""
def __call__(self, parser, namespace, values, option_string=None):
# Make sure we have an empty dict rather than None
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, {})
# Add value if an assignment else remove it
if '=' in values:
getattr(namespace, self.dest, {}).update([values.split('=', 1)])
else:
getattr(namespace, self.dest, {}).pop(values, None)
|
<commit_before><commit_msg>Tweak volume commands and add k=v argparse action
Basic cleanups:
* change metadata to property
* add new KeyValueAction to parse the property options
* multiple properties can be set using multiple --property args
* consistent formatting
* do lookups for volume args
Change-Id: Ib6c43f01ad46b395aee8c61e886f42e2a5f5573e<commit_after># Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""argparse Custom Actions"""
import argparse
class KeyValueAction(argparse.Action):
"""A custom action to parse arguments as key=value pairs.
Ensures that dest is a dict
"""
def __call__(self, parser, namespace, values, option_string=None):
# Make sure we have an empty dict rather than None
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, {})
# Add value if an assignment else remove it
if '=' in values:
getattr(namespace, self.dest, {}).update([values.split('=', 1)])
else:
getattr(namespace, self.dest, {}).pop(values, None)
|
|
cdf3a6273ad13ed4c269e221edc33a0abb4ba116
|
next_pr_number.py
|
next_pr_number.py
|
import sys
from github import Github
if len(sys.argv) == 2:
repository = sys.argv[1]
elif len(sys.argv) > 2:
print("Usage: next_pr_number.py <repository>")
sys.exit(1)
else:
repository = 'astropy/astropy'
gh = Github()
repo = gh.get_repo('astropy/astropy')
pl = repo.get_issues(sort='created', state='all')
print("Next PR number: {0}".format(pl.get_page(0)[0].number))
|
Add a script to find the next PR number for the changelog
|
Add a script to find the next PR number for the changelog
|
Python
|
bsd-3-clause
|
astropy/astropy-tools,astropy/astropy-tools
|
Add a script to find the next PR number for the changelog
|
import sys
from github import Github
if len(sys.argv) == 2:
repository = sys.argv[1]
elif len(sys.argv) > 2:
print("Usage: next_pr_number.py <repository>")
sys.exit(1)
else:
repository = 'astropy/astropy'
gh = Github()
repo = gh.get_repo('astropy/astropy')
pl = repo.get_issues(sort='created', state='all')
print("Next PR number: {0}".format(pl.get_page(0)[0].number))
|
<commit_before><commit_msg>Add a script to find the next PR number for the changelog<commit_after>
|
import sys
from github import Github
if len(sys.argv) == 2:
repository = sys.argv[1]
elif len(sys.argv) > 2:
print("Usage: next_pr_number.py <repository>")
sys.exit(1)
else:
repository = 'astropy/astropy'
gh = Github()
repo = gh.get_repo('astropy/astropy')
pl = repo.get_issues(sort='created', state='all')
print("Next PR number: {0}".format(pl.get_page(0)[0].number))
|
Add a script to find the next PR number for the changelogimport sys
from github import Github
if len(sys.argv) == 2:
repository = sys.argv[1]
elif len(sys.argv) > 2:
print("Usage: next_pr_number.py <repository>")
sys.exit(1)
else:
repository = 'astropy/astropy'
gh = Github()
repo = gh.get_repo('astropy/astropy')
pl = repo.get_issues(sort='created', state='all')
print("Next PR number: {0}".format(pl.get_page(0)[0].number))
|
<commit_before><commit_msg>Add a script to find the next PR number for the changelog<commit_after>import sys
from github import Github
if len(sys.argv) == 2:
repository = sys.argv[1]
elif len(sys.argv) > 2:
print("Usage: next_pr_number.py <repository>")
sys.exit(1)
else:
repository = 'astropy/astropy'
gh = Github()
repo = gh.get_repo('astropy/astropy')
pl = repo.get_issues(sort='created', state='all')
print("Next PR number: {0}".format(pl.get_page(0)[0].number))
|
|
46d2c36e02605ba5dbe63df6ea75d7c00cf28993
|
readthedocs/projects/migrations/0013_add-container-limits.py
|
readthedocs/projects/migrations/0013_add-container-limits.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0012_proper-name-for-install-project'),
]
operations = [
migrations.AddField(
model_name='project',
name='build_queue',
field=models.CharField(max_length=32, null=True, verbose_name='Alternate build queue id', blank=True),
),
migrations.AddField(
model_name='project',
name='container_mem_limit',
field=models.CharField(max_length=10, null=True, verbose_name='Container memory limit in MB', blank=True),
),
migrations.AddField(
model_name='project',
name='container_time_limit',
field=models.CharField(max_length=10, null=True, verbose_name='Container time limit', blank=True),
),
]
|
Include the migration this time :fireworks:
|
Include the migration this time :fireworks:
|
Python
|
mit
|
espdev/readthedocs.org,rtfd/readthedocs.org,davidfischer/readthedocs.org,safwanrahman/readthedocs.org,tddv/readthedocs.org,tddv/readthedocs.org,davidfischer/readthedocs.org,davidfischer/readthedocs.org,stevepiercy/readthedocs.org,davidfischer/readthedocs.org,espdev/readthedocs.org,stevepiercy/readthedocs.org,rtfd/readthedocs.org,espdev/readthedocs.org,rtfd/readthedocs.org,tddv/readthedocs.org,safwanrahman/readthedocs.org,pombredanne/readthedocs.org,safwanrahman/readthedocs.org,safwanrahman/readthedocs.org,rtfd/readthedocs.org,pombredanne/readthedocs.org,stevepiercy/readthedocs.org,espdev/readthedocs.org,stevepiercy/readthedocs.org,pombredanne/readthedocs.org,espdev/readthedocs.org
|
Include the migration this time :fireworks:
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0012_proper-name-for-install-project'),
]
operations = [
migrations.AddField(
model_name='project',
name='build_queue',
field=models.CharField(max_length=32, null=True, verbose_name='Alternate build queue id', blank=True),
),
migrations.AddField(
model_name='project',
name='container_mem_limit',
field=models.CharField(max_length=10, null=True, verbose_name='Container memory limit in MB', blank=True),
),
migrations.AddField(
model_name='project',
name='container_time_limit',
field=models.CharField(max_length=10, null=True, verbose_name='Container time limit', blank=True),
),
]
|
<commit_before><commit_msg>Include the migration this time :fireworks:<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0012_proper-name-for-install-project'),
]
operations = [
migrations.AddField(
model_name='project',
name='build_queue',
field=models.CharField(max_length=32, null=True, verbose_name='Alternate build queue id', blank=True),
),
migrations.AddField(
model_name='project',
name='container_mem_limit',
field=models.CharField(max_length=10, null=True, verbose_name='Container memory limit in MB', blank=True),
),
migrations.AddField(
model_name='project',
name='container_time_limit',
field=models.CharField(max_length=10, null=True, verbose_name='Container time limit', blank=True),
),
]
|
Include the migration this time :fireworks:# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0012_proper-name-for-install-project'),
]
operations = [
migrations.AddField(
model_name='project',
name='build_queue',
field=models.CharField(max_length=32, null=True, verbose_name='Alternate build queue id', blank=True),
),
migrations.AddField(
model_name='project',
name='container_mem_limit',
field=models.CharField(max_length=10, null=True, verbose_name='Container memory limit in MB', blank=True),
),
migrations.AddField(
model_name='project',
name='container_time_limit',
field=models.CharField(max_length=10, null=True, verbose_name='Container time limit', blank=True),
),
]
|
<commit_before><commit_msg>Include the migration this time :fireworks:<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0012_proper-name-for-install-project'),
]
operations = [
migrations.AddField(
model_name='project',
name='build_queue',
field=models.CharField(max_length=32, null=True, verbose_name='Alternate build queue id', blank=True),
),
migrations.AddField(
model_name='project',
name='container_mem_limit',
field=models.CharField(max_length=10, null=True, verbose_name='Container memory limit in MB', blank=True),
),
migrations.AddField(
model_name='project',
name='container_time_limit',
field=models.CharField(max_length=10, null=True, verbose_name='Container time limit', blank=True),
),
]
|
|
41fae7b73867e09b9c3965b7b3cbb911b67a26c5
|
homedisplay/info_ext_pages/migrations/0002_auto_20150315_2143.py
|
homedisplay/info_ext_pages/migrations/0002_auto_20150315_2143.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_ext_pages', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='extpage',
options={'ordering': ('timestamp',), 'get_latest_by': 'timestamp', 'verbose_name': 'Osoite', 'verbose_name_plural': 'Osoitteet'},
),
]
|
Add missing (meta) migration from previous commit
|
Add missing (meta) migration from previous commit
|
Python
|
bsd-3-clause
|
ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display
|
Add missing (meta) migration from previous commit
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_ext_pages', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='extpage',
options={'ordering': ('timestamp',), 'get_latest_by': 'timestamp', 'verbose_name': 'Osoite', 'verbose_name_plural': 'Osoitteet'},
),
]
|
<commit_before><commit_msg>Add missing (meta) migration from previous commit<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_ext_pages', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='extpage',
options={'ordering': ('timestamp',), 'get_latest_by': 'timestamp', 'verbose_name': 'Osoite', 'verbose_name_plural': 'Osoitteet'},
),
]
|
Add missing (meta) migration from previous commit# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_ext_pages', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='extpage',
options={'ordering': ('timestamp',), 'get_latest_by': 'timestamp', 'verbose_name': 'Osoite', 'verbose_name_plural': 'Osoitteet'},
),
]
|
<commit_before><commit_msg>Add missing (meta) migration from previous commit<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_ext_pages', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='extpage',
options={'ordering': ('timestamp',), 'get_latest_by': 'timestamp', 'verbose_name': 'Osoite', 'verbose_name_plural': 'Osoitteet'},
),
]
|
|
c95428c04460774d4b2c3eb247c1d7c431c13dd6
|
tests/pifont_test.py
|
tests/pifont_test.py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2013,2017 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
import unittest
import os
from fontbakery.pifont import PiFont
class PiFontTest(unittest.TestCase):
def setUp(self):
self.font = PiFont('data/test/cousine/Cousine-Bold.ttf')
def test_get_contours_count(self):
self.assertEqual(
self.font.get_contours_count('a'),
2
)
self.assertEqual(
self.font.get_contours_count('aacute'),
3
)
self.assertEqual(
self.font.get_contours_count('adieresis'),
4
)
if __name__ == '__main__':
unittest.main()
|
Add test for pifont.PiFontFontTools.get_contours_count method
|
Add test for pifont.PiFontFontTools.get_contours_count method
|
Python
|
apache-2.0
|
moyogo/fontbakery,graphicore/fontbakery,googlefonts/fontbakery,moyogo/fontbakery,googlefonts/fontbakery,graphicore/fontbakery,graphicore/fontbakery,moyogo/fontbakery,googlefonts/fontbakery
|
Add test for pifont.PiFontFontTools.get_contours_count method
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2013,2017 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
import unittest
import os
from fontbakery.pifont import PiFont
class PiFontTest(unittest.TestCase):
def setUp(self):
self.font = PiFont('data/test/cousine/Cousine-Bold.ttf')
def test_get_contours_count(self):
self.assertEqual(
self.font.get_contours_count('a'),
2
)
self.assertEqual(
self.font.get_contours_count('aacute'),
3
)
self.assertEqual(
self.font.get_contours_count('adieresis'),
4
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for pifont.PiFontFontTools.get_contours_count method<commit_after>
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2013,2017 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
import unittest
import os
from fontbakery.pifont import PiFont
class PiFontTest(unittest.TestCase):
def setUp(self):
self.font = PiFont('data/test/cousine/Cousine-Bold.ttf')
def test_get_contours_count(self):
self.assertEqual(
self.font.get_contours_count('a'),
2
)
self.assertEqual(
self.font.get_contours_count('aacute'),
3
)
self.assertEqual(
self.font.get_contours_count('adieresis'),
4
)
if __name__ == '__main__':
unittest.main()
|
Add test for pifont.PiFontFontTools.get_contours_count method#!/usr/bin/env python
# coding: utf-8
# Copyright 2013,2017 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
import unittest
import os
from fontbakery.pifont import PiFont
class PiFontTest(unittest.TestCase):
def setUp(self):
self.font = PiFont('data/test/cousine/Cousine-Bold.ttf')
def test_get_contours_count(self):
self.assertEqual(
self.font.get_contours_count('a'),
2
)
self.assertEqual(
self.font.get_contours_count('aacute'),
3
)
self.assertEqual(
self.font.get_contours_count('adieresis'),
4
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for pifont.PiFontFontTools.get_contours_count method<commit_after>#!/usr/bin/env python
# coding: utf-8
# Copyright 2013,2017 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
import unittest
import os
from fontbakery.pifont import PiFont
class PiFontTest(unittest.TestCase):
def setUp(self):
self.font = PiFont('data/test/cousine/Cousine-Bold.ttf')
def test_get_contours_count(self):
self.assertEqual(
self.font.get_contours_count('a'),
2
)
self.assertEqual(
self.font.get_contours_count('aacute'),
3
)
self.assertEqual(
self.font.get_contours_count('adieresis'),
4
)
if __name__ == '__main__':
unittest.main()
|
|
0d3294d8831d87fdda605c24b7ddda82b9879a73
|
set_spider_env.py
|
set_spider_env.py
|
#!/use/bin/env python3
import os
os.system('mv report_spider report_spider_temp')
os.system('scrapy startproject report_spider')
os.chdir('./report_spider_temp')
for f in os.listdir():
if os.path.isdir(f):
os.system('cp -r ./{} ../report_spider'.format(f))
else:
os.system('cp ./{} ../report_spider'.format(f))
os.chdir('../')
os.system('rm -r -f report_spider_temp')
os.chdir('./report_spider')
os.system('bash setup.sh')
|
Add script for set spider env
|
Add script for set spider env
|
Python
|
mit
|
HeadCow/ARPS,HeadCow/ARPS,AnselCmy/ARPS,hbtech-ai/ARPS,AnselCmy/ARPS,AnselCmy/ARPS,hbtech-ai/ARPS
|
Add script for set spider env
|
#!/use/bin/env python3
import os
os.system('mv report_spider report_spider_temp')
os.system('scrapy startproject report_spider')
os.chdir('./report_spider_temp')
for f in os.listdir():
if os.path.isdir(f):
os.system('cp -r ./{} ../report_spider'.format(f))
else:
os.system('cp ./{} ../report_spider'.format(f))
os.chdir('../')
os.system('rm -r -f report_spider_temp')
os.chdir('./report_spider')
os.system('bash setup.sh')
|
<commit_before><commit_msg>Add script for set spider env<commit_after>
|
#!/use/bin/env python3
import os
os.system('mv report_spider report_spider_temp')
os.system('scrapy startproject report_spider')
os.chdir('./report_spider_temp')
for f in os.listdir():
if os.path.isdir(f):
os.system('cp -r ./{} ../report_spider'.format(f))
else:
os.system('cp ./{} ../report_spider'.format(f))
os.chdir('../')
os.system('rm -r -f report_spider_temp')
os.chdir('./report_spider')
os.system('bash setup.sh')
|
Add script for set spider env#!/use/bin/env python3
import os
os.system('mv report_spider report_spider_temp')
os.system('scrapy startproject report_spider')
os.chdir('./report_spider_temp')
for f in os.listdir():
if os.path.isdir(f):
os.system('cp -r ./{} ../report_spider'.format(f))
else:
os.system('cp ./{} ../report_spider'.format(f))
os.chdir('../')
os.system('rm -r -f report_spider_temp')
os.chdir('./report_spider')
os.system('bash setup.sh')
|
<commit_before><commit_msg>Add script for set spider env<commit_after>#!/use/bin/env python3
import os
os.system('mv report_spider report_spider_temp')
os.system('scrapy startproject report_spider')
os.chdir('./report_spider_temp')
for f in os.listdir():
if os.path.isdir(f):
os.system('cp -r ./{} ../report_spider'.format(f))
else:
os.system('cp ./{} ../report_spider'.format(f))
os.chdir('../')
os.system('rm -r -f report_spider_temp')
os.chdir('./report_spider')
os.system('bash setup.sh')
|
|
28215cb8244c6e6bd7a6951a1380fbf78aa77fdf
|
games/management/commands/fix_wine_arch.py
|
games/management/commands/fix_wine_arch.py
|
import logging
import yaml
from django.core.management.base import BaseCommand
from games.models import Installer
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
help = "My shiny new management command."
def add_arch_to_non_wine_installers(self, installer):
script_updated = False
script = yaml.safe_load(installer.content)
for step in [step for step in script['installer'] if 'task' in step]:
task = step['task']
if task['name'] == 'wine.wineexec' and 'arch' not in task:
step['task']['arch'] = 'win32'
script_updated = True
if script_updated:
installer.content = yaml.safe_dump(script, default_flow_style=False)
return script_updated
def add_arch_to_wine_installers(self, installer):
script_updated = False
script = yaml.safe_load(installer.content)
try:
game_config = script.get('game', {})
except AttributeError:
LOGGER.error("The script %s is invalid", installer.slug)
return False
# Intaller ahs arch, we're good
if game_config.get('arch') in ('win32', 'win64'):
# Game has architecture already set
return False
if game_config.get('arch'):
raise ValueError("Weird value for arch: %s", game_config['arch'])
# Set a prefix so the game doesn't use ~/.wine
if 'prefix' not in game_config:
LOGGER.warning("No prefix found for %s", installer.slug)
detected_prefix = None
for task in [step for step in script.get('installer', []) if 'task' in step]:
if 'prefix' in task:
if detected_prefix and detected_prefix != task['prefix']:
raise ValueError("Different values of prefixes found")
detected_prefix = task['prefix']
if not detected_prefix:
detected_prefix = '$GAMEDIR'
LOGGER.info("Setting prefix to %s", detected_prefix)
game_config['prefix'] = detected_prefix
script_updated = True
if 'Program Files (x86)' in installer.content:
LOGGER.info("%s is a 64bit game?", installer.slug)
detected_arch = 'win64'
else:
detected_arch = 'win32'
LOGGER.info("Setting arch for %s to %s", installer.slug, detected_arch)
game_config['arch'] = detected_arch
script_updated = True
if script_updated:
script['game'] = game_config
installer.content = yaml.safe_dump(script, default_flow_style=False)
return True
def handle(self, *args, **options):
"""Change install scripts to specify wine prefix architecture"""
installers = Installer.objects.filter(content__icontains="Program Files")
for installer in installers:
if installer.runner.slug != "wine":
script_updated = self.add_arch_to_non_wine_installers(installer)
else:
script_updated = self.add_arch_to_wine_installers(installer)
if script_updated:
LOGGER.info("Updating installer %s", installer)
installer.save()
|
Add script to add missing arch to Wine scripts
|
Add script to add missing arch to Wine scripts
|
Python
|
agpl-3.0
|
lutris/website,lutris/website,lutris/website,lutris/website
|
Add script to add missing arch to Wine scripts
|
import logging
import yaml
from django.core.management.base import BaseCommand
from games.models import Installer
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
help = "My shiny new management command."
def add_arch_to_non_wine_installers(self, installer):
script_updated = False
script = yaml.safe_load(installer.content)
for step in [step for step in script['installer'] if 'task' in step]:
task = step['task']
if task['name'] == 'wine.wineexec' and 'arch' not in task:
step['task']['arch'] = 'win32'
script_updated = True
if script_updated:
installer.content = yaml.safe_dump(script, default_flow_style=False)
return script_updated
def add_arch_to_wine_installers(self, installer):
script_updated = False
script = yaml.safe_load(installer.content)
try:
game_config = script.get('game', {})
except AttributeError:
LOGGER.error("The script %s is invalid", installer.slug)
return False
# Intaller ahs arch, we're good
if game_config.get('arch') in ('win32', 'win64'):
# Game has architecture already set
return False
if game_config.get('arch'):
raise ValueError("Weird value for arch: %s", game_config['arch'])
# Set a prefix so the game doesn't use ~/.wine
if 'prefix' not in game_config:
LOGGER.warning("No prefix found for %s", installer.slug)
detected_prefix = None
for task in [step for step in script.get('installer', []) if 'task' in step]:
if 'prefix' in task:
if detected_prefix and detected_prefix != task['prefix']:
raise ValueError("Different values of prefixes found")
detected_prefix = task['prefix']
if not detected_prefix:
detected_prefix = '$GAMEDIR'
LOGGER.info("Setting prefix to %s", detected_prefix)
game_config['prefix'] = detected_prefix
script_updated = True
if 'Program Files (x86)' in installer.content:
LOGGER.info("%s is a 64bit game?", installer.slug)
detected_arch = 'win64'
else:
detected_arch = 'win32'
LOGGER.info("Setting arch for %s to %s", installer.slug, detected_arch)
game_config['arch'] = detected_arch
script_updated = True
if script_updated:
script['game'] = game_config
installer.content = yaml.safe_dump(script, default_flow_style=False)
return True
def handle(self, *args, **options):
"""Change install scripts to specify wine prefix architecture"""
installers = Installer.objects.filter(content__icontains="Program Files")
for installer in installers:
if installer.runner.slug != "wine":
script_updated = self.add_arch_to_non_wine_installers(installer)
else:
script_updated = self.add_arch_to_wine_installers(installer)
if script_updated:
LOGGER.info("Updating installer %s", installer)
installer.save()
|
<commit_before><commit_msg>Add script to add missing arch to Wine scripts<commit_after>
|
import logging
import yaml
from django.core.management.base import BaseCommand
from games.models import Installer
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
help = "My shiny new management command."
def add_arch_to_non_wine_installers(self, installer):
script_updated = False
script = yaml.safe_load(installer.content)
for step in [step for step in script['installer'] if 'task' in step]:
task = step['task']
if task['name'] == 'wine.wineexec' and 'arch' not in task:
step['task']['arch'] = 'win32'
script_updated = True
if script_updated:
installer.content = yaml.safe_dump(script, default_flow_style=False)
return script_updated
def add_arch_to_wine_installers(self, installer):
script_updated = False
script = yaml.safe_load(installer.content)
try:
game_config = script.get('game', {})
except AttributeError:
LOGGER.error("The script %s is invalid", installer.slug)
return False
# Intaller ahs arch, we're good
if game_config.get('arch') in ('win32', 'win64'):
# Game has architecture already set
return False
if game_config.get('arch'):
raise ValueError("Weird value for arch: %s", game_config['arch'])
# Set a prefix so the game doesn't use ~/.wine
if 'prefix' not in game_config:
LOGGER.warning("No prefix found for %s", installer.slug)
detected_prefix = None
for task in [step for step in script.get('installer', []) if 'task' in step]:
if 'prefix' in task:
if detected_prefix and detected_prefix != task['prefix']:
raise ValueError("Different values of prefixes found")
detected_prefix = task['prefix']
if not detected_prefix:
detected_prefix = '$GAMEDIR'
LOGGER.info("Setting prefix to %s", detected_prefix)
game_config['prefix'] = detected_prefix
script_updated = True
if 'Program Files (x86)' in installer.content:
LOGGER.info("%s is a 64bit game?", installer.slug)
detected_arch = 'win64'
else:
detected_arch = 'win32'
LOGGER.info("Setting arch for %s to %s", installer.slug, detected_arch)
game_config['arch'] = detected_arch
script_updated = True
if script_updated:
script['game'] = game_config
installer.content = yaml.safe_dump(script, default_flow_style=False)
return True
def handle(self, *args, **options):
"""Change install scripts to specify wine prefix architecture"""
installers = Installer.objects.filter(content__icontains="Program Files")
for installer in installers:
if installer.runner.slug != "wine":
script_updated = self.add_arch_to_non_wine_installers(installer)
else:
script_updated = self.add_arch_to_wine_installers(installer)
if script_updated:
LOGGER.info("Updating installer %s", installer)
installer.save()
|
Add script to add missing arch to Wine scriptsimport logging
import yaml
from django.core.management.base import BaseCommand
from games.models import Installer
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
help = "My shiny new management command."
def add_arch_to_non_wine_installers(self, installer):
script_updated = False
script = yaml.safe_load(installer.content)
for step in [step for step in script['installer'] if 'task' in step]:
task = step['task']
if task['name'] == 'wine.wineexec' and 'arch' not in task:
step['task']['arch'] = 'win32'
script_updated = True
if script_updated:
installer.content = yaml.safe_dump(script, default_flow_style=False)
return script_updated
def add_arch_to_wine_installers(self, installer):
script_updated = False
script = yaml.safe_load(installer.content)
try:
game_config = script.get('game', {})
except AttributeError:
LOGGER.error("The script %s is invalid", installer.slug)
return False
# Intaller ahs arch, we're good
if game_config.get('arch') in ('win32', 'win64'):
# Game has architecture already set
return False
if game_config.get('arch'):
raise ValueError("Weird value for arch: %s", game_config['arch'])
# Set a prefix so the game doesn't use ~/.wine
if 'prefix' not in game_config:
LOGGER.warning("No prefix found for %s", installer.slug)
detected_prefix = None
for task in [step for step in script.get('installer', []) if 'task' in step]:
if 'prefix' in task:
if detected_prefix and detected_prefix != task['prefix']:
raise ValueError("Different values of prefixes found")
detected_prefix = task['prefix']
if not detected_prefix:
detected_prefix = '$GAMEDIR'
LOGGER.info("Setting prefix to %s", detected_prefix)
game_config['prefix'] = detected_prefix
script_updated = True
if 'Program Files (x86)' in installer.content:
LOGGER.info("%s is a 64bit game?", installer.slug)
detected_arch = 'win64'
else:
detected_arch = 'win32'
LOGGER.info("Setting arch for %s to %s", installer.slug, detected_arch)
game_config['arch'] = detected_arch
script_updated = True
if script_updated:
script['game'] = game_config
installer.content = yaml.safe_dump(script, default_flow_style=False)
return True
def handle(self, *args, **options):
"""Change install scripts to specify wine prefix architecture"""
installers = Installer.objects.filter(content__icontains="Program Files")
for installer in installers:
if installer.runner.slug != "wine":
script_updated = self.add_arch_to_non_wine_installers(installer)
else:
script_updated = self.add_arch_to_wine_installers(installer)
if script_updated:
LOGGER.info("Updating installer %s", installer)
installer.save()
|
<commit_before><commit_msg>Add script to add missing arch to Wine scripts<commit_after>import logging
import yaml
from django.core.management.base import BaseCommand
from games.models import Installer
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
help = "My shiny new management command."
def add_arch_to_non_wine_installers(self, installer):
script_updated = False
script = yaml.safe_load(installer.content)
for step in [step for step in script['installer'] if 'task' in step]:
task = step['task']
if task['name'] == 'wine.wineexec' and 'arch' not in task:
step['task']['arch'] = 'win32'
script_updated = True
if script_updated:
installer.content = yaml.safe_dump(script, default_flow_style=False)
return script_updated
def add_arch_to_wine_installers(self, installer):
script_updated = False
script = yaml.safe_load(installer.content)
try:
game_config = script.get('game', {})
except AttributeError:
LOGGER.error("The script %s is invalid", installer.slug)
return False
# Intaller ahs arch, we're good
if game_config.get('arch') in ('win32', 'win64'):
# Game has architecture already set
return False
if game_config.get('arch'):
raise ValueError("Weird value for arch: %s", game_config['arch'])
# Set a prefix so the game doesn't use ~/.wine
if 'prefix' not in game_config:
LOGGER.warning("No prefix found for %s", installer.slug)
detected_prefix = None
for task in [step for step in script.get('installer', []) if 'task' in step]:
if 'prefix' in task:
if detected_prefix and detected_prefix != task['prefix']:
raise ValueError("Different values of prefixes found")
detected_prefix = task['prefix']
if not detected_prefix:
detected_prefix = '$GAMEDIR'
LOGGER.info("Setting prefix to %s", detected_prefix)
game_config['prefix'] = detected_prefix
script_updated = True
if 'Program Files (x86)' in installer.content:
LOGGER.info("%s is a 64bit game?", installer.slug)
detected_arch = 'win64'
else:
detected_arch = 'win32'
LOGGER.info("Setting arch for %s to %s", installer.slug, detected_arch)
game_config['arch'] = detected_arch
script_updated = True
if script_updated:
script['game'] = game_config
installer.content = yaml.safe_dump(script, default_flow_style=False)
return True
def handle(self, *args, **options):
"""Change install scripts to specify wine prefix architecture"""
installers = Installer.objects.filter(content__icontains="Program Files")
for installer in installers:
if installer.runner.slug != "wine":
script_updated = self.add_arch_to_non_wine_installers(installer)
else:
script_updated = self.add_arch_to_wine_installers(installer)
if script_updated:
LOGGER.info("Updating installer %s", installer)
installer.save()
|
|
d9c9ba576fda54e22c9292ce43f0fb895d8eb371
|
py/longest-increasing-path-in-a-matrix.py
|
py/longest-increasing-path-in-a-matrix.py
|
from collections import defaultdict, Counter
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
if not matrix:
return 0
h = len(matrix)
w = len(matrix[0])
neighbors = defaultdict(list)
in_deg = Counter()
longest_length = Counter()
ds = [(0, -1), (0, 1), (1, 0), (-1, 0)]
starts = set(xrange(h * w))
for x, row in enumerate(matrix):
for y, v in enumerate(row):
for dx, dy in ds:
nx, ny = x + dx, y + dy
if 0 <= nx < h and 0 <= ny < w:
if matrix[nx][ny] > v:
neighbors[x * w + y].append(nx * w + ny)
in_deg[nx * w + ny] += 1
starts.discard(nx * w + ny)
for start in starts:
longest_length[start] = 1
q = list(starts)
ans = 1
for v in q:
for neighbor in neighbors[v]:
longest_length[neighbor] = max(longest_length[neighbor], longest_length[v] + 1)
ans = max(longest_length[neighbor], ans)
in_deg[neighbor] -= 1
if in_deg[neighbor] == 0:
q.append(neighbor)
return ans
|
Add py solution for 329. Longest Increasing Path in a Matrix
|
Add py solution for 329. Longest Increasing Path in a Matrix
329. Longest Increasing Path in a Matrix: https://leetcode.com/problems/longest-increasing-path-in-a-matrix/
Approach 1:
Topological sort and find the maximum chain length
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 329. Longest Increasing Path in a Matrix
329. Longest Increasing Path in a Matrix: https://leetcode.com/problems/longest-increasing-path-in-a-matrix/
Approach 1:
Topological sort and find the maximum chain length
|
from collections import defaultdict, Counter
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
if not matrix:
return 0
h = len(matrix)
w = len(matrix[0])
neighbors = defaultdict(list)
in_deg = Counter()
longest_length = Counter()
ds = [(0, -1), (0, 1), (1, 0), (-1, 0)]
starts = set(xrange(h * w))
for x, row in enumerate(matrix):
for y, v in enumerate(row):
for dx, dy in ds:
nx, ny = x + dx, y + dy
if 0 <= nx < h and 0 <= ny < w:
if matrix[nx][ny] > v:
neighbors[x * w + y].append(nx * w + ny)
in_deg[nx * w + ny] += 1
starts.discard(nx * w + ny)
for start in starts:
longest_length[start] = 1
q = list(starts)
ans = 1
for v in q:
for neighbor in neighbors[v]:
longest_length[neighbor] = max(longest_length[neighbor], longest_length[v] + 1)
ans = max(longest_length[neighbor], ans)
in_deg[neighbor] -= 1
if in_deg[neighbor] == 0:
q.append(neighbor)
return ans
|
<commit_before><commit_msg>Add py solution for 329. Longest Increasing Path in a Matrix
329. Longest Increasing Path in a Matrix: https://leetcode.com/problems/longest-increasing-path-in-a-matrix/
Approach 1:
Topological sort and find the maximum chain length<commit_after>
|
from collections import defaultdict, Counter
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
if not matrix:
return 0
h = len(matrix)
w = len(matrix[0])
neighbors = defaultdict(list)
in_deg = Counter()
longest_length = Counter()
ds = [(0, -1), (0, 1), (1, 0), (-1, 0)]
starts = set(xrange(h * w))
for x, row in enumerate(matrix):
for y, v in enumerate(row):
for dx, dy in ds:
nx, ny = x + dx, y + dy
if 0 <= nx < h and 0 <= ny < w:
if matrix[nx][ny] > v:
neighbors[x * w + y].append(nx * w + ny)
in_deg[nx * w + ny] += 1
starts.discard(nx * w + ny)
for start in starts:
longest_length[start] = 1
q = list(starts)
ans = 1
for v in q:
for neighbor in neighbors[v]:
longest_length[neighbor] = max(longest_length[neighbor], longest_length[v] + 1)
ans = max(longest_length[neighbor], ans)
in_deg[neighbor] -= 1
if in_deg[neighbor] == 0:
q.append(neighbor)
return ans
|
Add py solution for 329. Longest Increasing Path in a Matrix
329. Longest Increasing Path in a Matrix: https://leetcode.com/problems/longest-increasing-path-in-a-matrix/
Approach 1:
Topological sort and find the maximum chain lengthfrom collections import defaultdict, Counter
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
if not matrix:
return 0
h = len(matrix)
w = len(matrix[0])
neighbors = defaultdict(list)
in_deg = Counter()
longest_length = Counter()
ds = [(0, -1), (0, 1), (1, 0), (-1, 0)]
starts = set(xrange(h * w))
for x, row in enumerate(matrix):
for y, v in enumerate(row):
for dx, dy in ds:
nx, ny = x + dx, y + dy
if 0 <= nx < h and 0 <= ny < w:
if matrix[nx][ny] > v:
neighbors[x * w + y].append(nx * w + ny)
in_deg[nx * w + ny] += 1
starts.discard(nx * w + ny)
for start in starts:
longest_length[start] = 1
q = list(starts)
ans = 1
for v in q:
for neighbor in neighbors[v]:
longest_length[neighbor] = max(longest_length[neighbor], longest_length[v] + 1)
ans = max(longest_length[neighbor], ans)
in_deg[neighbor] -= 1
if in_deg[neighbor] == 0:
q.append(neighbor)
return ans
|
<commit_before><commit_msg>Add py solution for 329. Longest Increasing Path in a Matrix
329. Longest Increasing Path in a Matrix: https://leetcode.com/problems/longest-increasing-path-in-a-matrix/
Approach 1:
Topological sort and find the maximum chain length<commit_after>from collections import defaultdict, Counter
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
if not matrix:
return 0
h = len(matrix)
w = len(matrix[0])
neighbors = defaultdict(list)
in_deg = Counter()
longest_length = Counter()
ds = [(0, -1), (0, 1), (1, 0), (-1, 0)]
starts = set(xrange(h * w))
for x, row in enumerate(matrix):
for y, v in enumerate(row):
for dx, dy in ds:
nx, ny = x + dx, y + dy
if 0 <= nx < h and 0 <= ny < w:
if matrix[nx][ny] > v:
neighbors[x * w + y].append(nx * w + ny)
in_deg[nx * w + ny] += 1
starts.discard(nx * w + ny)
for start in starts:
longest_length[start] = 1
q = list(starts)
ans = 1
for v in q:
for neighbor in neighbors[v]:
longest_length[neighbor] = max(longest_length[neighbor], longest_length[v] + 1)
ans = max(longest_length[neighbor], ans)
in_deg[neighbor] -= 1
if in_deg[neighbor] == 0:
q.append(neighbor)
return ans
|
|
b8bd5fc044d3dd3b273cba4443c771e60036b6c0
|
corehq/apps/importer/base.py
|
corehq/apps/importer/base.py
|
from corehq.apps.data_interfaces.interfaces import DataInterface
from django.utils.translation import ugettext as _
class ImportCases(DataInterface):
name = _("Import Cases from Excel")
slug = "import_cases"
description = _("Import case data from an external Excel file")
report_template_path = "importer/import_cases.html"
gide_filters = True
asynchronous = False
|
from corehq.apps.data_interfaces.interfaces import DataInterface
from django.utils.translation import ugettext_lazy
class ImportCases(DataInterface):
name = ugettext_lazy("Import Cases from Excel")
slug = "import_cases"
description = ugettext_lazy("Import case data from an external Excel file")
report_template_path = "importer/import_cases.html"
gide_filters = True
asynchronous = False
|
Use lazy translation for importer strings
|
Use lazy translation for importer strings
|
Python
|
bsd-3-clause
|
SEL-Columbia/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,gmimano/commcaretest,dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,SEL-Columbia/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,gmimano/commcaretest,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,gmimano/commcaretest,puttarajubr/commcare-hq
|
from corehq.apps.data_interfaces.interfaces import DataInterface
from django.utils.translation import ugettext as _
class ImportCases(DataInterface):
name = _("Import Cases from Excel")
slug = "import_cases"
description = _("Import case data from an external Excel file")
report_template_path = "importer/import_cases.html"
gide_filters = True
asynchronous = False
Use lazy translation for importer strings
|
from corehq.apps.data_interfaces.interfaces import DataInterface
from django.utils.translation import ugettext_lazy
class ImportCases(DataInterface):
name = ugettext_lazy("Import Cases from Excel")
slug = "import_cases"
description = ugettext_lazy("Import case data from an external Excel file")
report_template_path = "importer/import_cases.html"
gide_filters = True
asynchronous = False
|
<commit_before>from corehq.apps.data_interfaces.interfaces import DataInterface
from django.utils.translation import ugettext as _
class ImportCases(DataInterface):
name = _("Import Cases from Excel")
slug = "import_cases"
description = _("Import case data from an external Excel file")
report_template_path = "importer/import_cases.html"
gide_filters = True
asynchronous = False
<commit_msg>Use lazy translation for importer strings<commit_after>
|
from corehq.apps.data_interfaces.interfaces import DataInterface
from django.utils.translation import ugettext_lazy
class ImportCases(DataInterface):
name = ugettext_lazy("Import Cases from Excel")
slug = "import_cases"
description = ugettext_lazy("Import case data from an external Excel file")
report_template_path = "importer/import_cases.html"
gide_filters = True
asynchronous = False
|
from corehq.apps.data_interfaces.interfaces import DataInterface
from django.utils.translation import ugettext as _
class ImportCases(DataInterface):
name = _("Import Cases from Excel")
slug = "import_cases"
description = _("Import case data from an external Excel file")
report_template_path = "importer/import_cases.html"
gide_filters = True
asynchronous = False
Use lazy translation for importer stringsfrom corehq.apps.data_interfaces.interfaces import DataInterface
from django.utils.translation import ugettext_lazy
class ImportCases(DataInterface):
name = ugettext_lazy("Import Cases from Excel")
slug = "import_cases"
description = ugettext_lazy("Import case data from an external Excel file")
report_template_path = "importer/import_cases.html"
gide_filters = True
asynchronous = False
|
<commit_before>from corehq.apps.data_interfaces.interfaces import DataInterface
from django.utils.translation import ugettext as _
class ImportCases(DataInterface):
name = _("Import Cases from Excel")
slug = "import_cases"
description = _("Import case data from an external Excel file")
report_template_path = "importer/import_cases.html"
gide_filters = True
asynchronous = False
<commit_msg>Use lazy translation for importer strings<commit_after>from corehq.apps.data_interfaces.interfaces import DataInterface
from django.utils.translation import ugettext_lazy
class ImportCases(DataInterface):
name = ugettext_lazy("Import Cases from Excel")
slug = "import_cases"
description = ugettext_lazy("Import case data from an external Excel file")
report_template_path = "importer/import_cases.html"
gide_filters = True
asynchronous = False
|
6d73e22ed9d885d9678b821b1c46d4a4567fb63f
|
parsl/tests/manual_tests/plain_executor.py
|
parsl/tests/manual_tests/plain_executor.py
|
import time
# from requests import get
import argparse
import parsl
# from parsl.config import Config
# from parsl.app.app import python_app
# from parsl.executors import HighThroughputExecutor
# from parsl.providers.slurm.slurm import SlurmProvider
# from parsl.launchers import SimpleLauncher
def sleep(seconds):
import time
time.sleep(seconds)
def double(x):
return x * 2
def call_sleep(size):
print("Priming ....")
start = time.time()
primers = [double(i) for i in range(0, 2)]
[p.result() for p in primers]
delta = time.time() - start
print("Priming done in {:10.4f} s".format(delta))
start = time.time()
tasks = [sleep(0) for _ in range(0, size)]
for task in tasks:
task.result()
delta = time.time() - start
print("Time to complete {} tasks: {:8.3f} s".format(args.count, delta))
print("Throughput : {:8.3f} Tasks/s".format(int(args.count) / delta))
def call_double(size, executor):
print("Priming ....")
start = time.time()
primers = [executor.submit(double, i) for i in range(0, 2)]
print("Got results : ", [p.result() for p in primers])
delta = time.time() - start
print("Priming done in {:10.4f} s".format(delta))
print("Launching tasks: {}".format(size))
start = time.time()
tasks = [executor.submit(double, i) for i in range(0, size)]
for task in tasks:
task.result()
delta = time.time() - start
print("Time to complete {} tasks: {:8.3f} s".format(args.count, delta))
print("Throughput : {:8.3f} Tasks/s".format(int(args.count) / delta))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default="1000",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
from htex_local import config
# parsl.set_stream_logger()
# config.executors[0].worker_debug = True
dfk = parsl.load(config)
executor = dfk.executors["htex_Local"]
call_double(int(args.count), executor)
|
Test case using executors without Parsl
|
Test case using executors without Parsl
|
Python
|
apache-2.0
|
Parsl/parsl,swift-lang/swift-e-lab,swift-lang/swift-e-lab,Parsl/parsl,Parsl/parsl,Parsl/parsl
|
Test case using executors without Parsl
|
import time
# from requests import get
import argparse
import parsl
# from parsl.config import Config
# from parsl.app.app import python_app
# from parsl.executors import HighThroughputExecutor
# from parsl.providers.slurm.slurm import SlurmProvider
# from parsl.launchers import SimpleLauncher
def sleep(seconds):
import time
time.sleep(seconds)
def double(x):
return x * 2
def call_sleep(size):
print("Priming ....")
start = time.time()
primers = [double(i) for i in range(0, 2)]
[p.result() for p in primers]
delta = time.time() - start
print("Priming done in {:10.4f} s".format(delta))
start = time.time()
tasks = [sleep(0) for _ in range(0, size)]
for task in tasks:
task.result()
delta = time.time() - start
print("Time to complete {} tasks: {:8.3f} s".format(args.count, delta))
print("Throughput : {:8.3f} Tasks/s".format(int(args.count) / delta))
def call_double(size, executor):
print("Priming ....")
start = time.time()
primers = [executor.submit(double, i) for i in range(0, 2)]
print("Got results : ", [p.result() for p in primers])
delta = time.time() - start
print("Priming done in {:10.4f} s".format(delta))
print("Launching tasks: {}".format(size))
start = time.time()
tasks = [executor.submit(double, i) for i in range(0, size)]
for task in tasks:
task.result()
delta = time.time() - start
print("Time to complete {} tasks: {:8.3f} s".format(args.count, delta))
print("Throughput : {:8.3f} Tasks/s".format(int(args.count) / delta))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default="1000",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
from htex_local import config
# parsl.set_stream_logger()
# config.executors[0].worker_debug = True
dfk = parsl.load(config)
executor = dfk.executors["htex_Local"]
call_double(int(args.count), executor)
|
<commit_before><commit_msg>Test case using executors without Parsl<commit_after>
|
import time
# from requests import get
import argparse
import parsl
# from parsl.config import Config
# from parsl.app.app import python_app
# from parsl.executors import HighThroughputExecutor
# from parsl.providers.slurm.slurm import SlurmProvider
# from parsl.launchers import SimpleLauncher
def sleep(seconds):
import time
time.sleep(seconds)
def double(x):
return x * 2
def call_sleep(size):
print("Priming ....")
start = time.time()
primers = [double(i) for i in range(0, 2)]
[p.result() for p in primers]
delta = time.time() - start
print("Priming done in {:10.4f} s".format(delta))
start = time.time()
tasks = [sleep(0) for _ in range(0, size)]
for task in tasks:
task.result()
delta = time.time() - start
print("Time to complete {} tasks: {:8.3f} s".format(args.count, delta))
print("Throughput : {:8.3f} Tasks/s".format(int(args.count) / delta))
def call_double(size, executor):
print("Priming ....")
start = time.time()
primers = [executor.submit(double, i) for i in range(0, 2)]
print("Got results : ", [p.result() for p in primers])
delta = time.time() - start
print("Priming done in {:10.4f} s".format(delta))
print("Launching tasks: {}".format(size))
start = time.time()
tasks = [executor.submit(double, i) for i in range(0, size)]
for task in tasks:
task.result()
delta = time.time() - start
print("Time to complete {} tasks: {:8.3f} s".format(args.count, delta))
print("Throughput : {:8.3f} Tasks/s".format(int(args.count) / delta))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default="1000",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
from htex_local import config
# parsl.set_stream_logger()
# config.executors[0].worker_debug = True
dfk = parsl.load(config)
executor = dfk.executors["htex_Local"]
call_double(int(args.count), executor)
|
Test case using executors without Parslimport time
# from requests import get
import argparse
import parsl
# from parsl.config import Config
# from parsl.app.app import python_app
# from parsl.executors import HighThroughputExecutor
# from parsl.providers.slurm.slurm import SlurmProvider
# from parsl.launchers import SimpleLauncher
def sleep(seconds):
import time
time.sleep(seconds)
def double(x):
return x * 2
def call_sleep(size):
print("Priming ....")
start = time.time()
primers = [double(i) for i in range(0, 2)]
[p.result() for p in primers]
delta = time.time() - start
print("Priming done in {:10.4f} s".format(delta))
start = time.time()
tasks = [sleep(0) for _ in range(0, size)]
for task in tasks:
task.result()
delta = time.time() - start
print("Time to complete {} tasks: {:8.3f} s".format(args.count, delta))
print("Throughput : {:8.3f} Tasks/s".format(int(args.count) / delta))
def call_double(size, executor):
print("Priming ....")
start = time.time()
primers = [executor.submit(double, i) for i in range(0, 2)]
print("Got results : ", [p.result() for p in primers])
delta = time.time() - start
print("Priming done in {:10.4f} s".format(delta))
print("Launching tasks: {}".format(size))
start = time.time()
tasks = [executor.submit(double, i) for i in range(0, size)]
for task in tasks:
task.result()
delta = time.time() - start
print("Time to complete {} tasks: {:8.3f} s".format(args.count, delta))
print("Throughput : {:8.3f} Tasks/s".format(int(args.count) / delta))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default="1000",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
from htex_local import config
# parsl.set_stream_logger()
# config.executors[0].worker_debug = True
dfk = parsl.load(config)
executor = dfk.executors["htex_Local"]
call_double(int(args.count), executor)
|
<commit_before><commit_msg>Test case using executors without Parsl<commit_after>import time
# from requests import get
import argparse
import parsl
# from parsl.config import Config
# from parsl.app.app import python_app
# from parsl.executors import HighThroughputExecutor
# from parsl.providers.slurm.slurm import SlurmProvider
# from parsl.launchers import SimpleLauncher
def sleep(seconds):
import time
time.sleep(seconds)
def double(x):
return x * 2
def call_sleep(size):
print("Priming ....")
start = time.time()
primers = [double(i) for i in range(0, 2)]
[p.result() for p in primers]
delta = time.time() - start
print("Priming done in {:10.4f} s".format(delta))
start = time.time()
tasks = [sleep(0) for _ in range(0, size)]
for task in tasks:
task.result()
delta = time.time() - start
print("Time to complete {} tasks: {:8.3f} s".format(args.count, delta))
print("Throughput : {:8.3f} Tasks/s".format(int(args.count) / delta))
def call_double(size, executor):
print("Priming ....")
start = time.time()
primers = [executor.submit(double, i) for i in range(0, 2)]
print("Got results : ", [p.result() for p in primers])
delta = time.time() - start
print("Priming done in {:10.4f} s".format(delta))
print("Launching tasks: {}".format(size))
start = time.time()
tasks = [executor.submit(double, i) for i in range(0, size)]
for task in tasks:
task.result()
delta = time.time() - start
print("Time to complete {} tasks: {:8.3f} s".format(args.count, delta))
print("Throughput : {:8.3f} Tasks/s".format(int(args.count) / delta))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default="1000",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
from htex_local import config
# parsl.set_stream_logger()
# config.executors[0].worker_debug = True
dfk = parsl.load(config)
executor = dfk.executors["htex_Local"]
call_double(int(args.count), executor)
|
|
07dde7603367cae9cc52dd8c0b1bdd1d567aa307
|
nodeconductor/core/tests/test_management.py
|
nodeconductor/core/tests/test_management.py
|
import StringIO
from django.core.management import call_command
from django.test import TestCase
class CommandsTestCase(TestCase):
def test_no_missing_migrations(self):
result = StringIO.StringIO()
call_command('makemigrations', dry_run=True, stdout=result)
result_string = result.getvalue()
self.assertEqual(result_string, 'No changes detected\n')
|
Add a test for missing migrations
|
Add a test for missing migrations
|
Python
|
mit
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
Add a test for missing migrations
|
import StringIO
from django.core.management import call_command
from django.test import TestCase
class CommandsTestCase(TestCase):
def test_no_missing_migrations(self):
result = StringIO.StringIO()
call_command('makemigrations', dry_run=True, stdout=result)
result_string = result.getvalue()
self.assertEqual(result_string, 'No changes detected\n')
|
<commit_before><commit_msg>Add a test for missing migrations<commit_after>
|
import StringIO
from django.core.management import call_command
from django.test import TestCase
class CommandsTestCase(TestCase):
def test_no_missing_migrations(self):
result = StringIO.StringIO()
call_command('makemigrations', dry_run=True, stdout=result)
result_string = result.getvalue()
self.assertEqual(result_string, 'No changes detected\n')
|
Add a test for missing migrationsimport StringIO
from django.core.management import call_command
from django.test import TestCase
class CommandsTestCase(TestCase):
def test_no_missing_migrations(self):
result = StringIO.StringIO()
call_command('makemigrations', dry_run=True, stdout=result)
result_string = result.getvalue()
self.assertEqual(result_string, 'No changes detected\n')
|
<commit_before><commit_msg>Add a test for missing migrations<commit_after>import StringIO
from django.core.management import call_command
from django.test import TestCase
class CommandsTestCase(TestCase):
def test_no_missing_migrations(self):
result = StringIO.StringIO()
call_command('makemigrations', dry_run=True, stdout=result)
result_string = result.getvalue()
self.assertEqual(result_string, 'No changes detected\n')
|
|
fac9d34d6c514f33bb97760fcc391ffa17cb7b5d
|
zerver/management/commands/queue_digest_emails.py
|
zerver/management/commands/queue_digest_emails.py
|
from __future__ import absolute_import
import datetime
import pytz
from django.core.management.base import BaseCommand
from zerver.lib.queue import queue_json_publish
from zerver.models import UserActivity, get_user_profile_by_email
VALID_DIGEST_DAYS = (1, 2, 3)
def inactive_since(user_profile, cutoff):
# Hasn't used the app in the last 24 business-day hours.
most_recent_visit = [row.last_visit for row in \
UserActivity.objects.filter(
user_profile=user_profile)]
if not most_recent_visit:
# This person has never used the app.
return True
last_visit = max(most_recent_visit)
return last_visit < cutoff
def last_business_day():
one_day = datetime.timedelta(hours=24)
previous_day = datetime.datetime.now(tz=pytz.utc) - one_day
while previous_day.weekday() not in VALID_DIGEST_DAYS:
previous_day -= one_day
return previous_day
# Changes to this should also be reflected in
# zerver/worker/queue_processors.py:DigestWorker.consume()
def queue_digest_recipient(user_profile, cutoff):
# Convert cutoff to epoch seconds for transit.
event = {"user_profile_id": user_profile.id,
"cutoff": cutoff.strftime('%s')}
queue_json_publish("digest_emails", event, lambda event: None)
class Command(BaseCommand):
help = """Enqueue digest emails for users that haven't checked the app
in a while.
"""
def handle(self, *args, **options):
# To be really conservative while we don't have user timezones or
# special-casing for companies with non-standard workweeks, only
# try to send mail on Tuesdays, Wednesdays, and Thursdays.
if datetime.datetime.utcnow().weekday() not in VALID_DIGEST_DAYS:
return
for email in ["jesstess@zulip.com", "jessica.mckellar@gmail.com",
"sipbtest@mit.edu", "jesstess+si@zulip.com"]:
user_profile = get_user_profile_by_email(email)
cutoff = last_business_day()
if inactive_since(user_profile, cutoff):
queue_digest_recipient(user_profile, cutoff)
|
Add a management command to enqueue digest email recipients.
|
digest: Add a management command to enqueue digest email recipients.
(imported from commit 70ff2c7a4dae654f4077041c45e2154b3ac7afb7)
|
Python
|
apache-2.0
|
jonesgithub/zulip,Drooids/zulip,littledogboy/zulip,zorojean/zulip,andersk/zulip,hj3938/zulip,MariaFaBella85/zulip,ashwinirudrappa/zulip,krtkmj/zulip,AZtheAsian/zulip,hustlzp/zulip,Diptanshu8/zulip,dxq-git/zulip,itnihao/zulip,mdavid/zulip,so0k/zulip,zhaoweigg/zulip,susansls/zulip,aps-sids/zulip,verma-varsha/zulip,ApsOps/zulip,peguin40/zulip,kokoar/zulip,ashwinirudrappa/zulip,atomic-labs/zulip,moria/zulip,m1ssou/zulip,eeshangarg/zulip,bluesea/zulip,krtkmj/zulip,itnihao/zulip,tiansiyuan/zulip,DazWorrall/zulip,johnny9/zulip,dwrpayne/zulip,vabs22/zulip,jrowan/zulip,SmartPeople/zulip,kokoar/zulip,RobotCaleb/zulip,andersk/zulip,bluesea/zulip,zacps/zulip,sup95/zulip,wavelets/zulip,DazWorrall/zulip,jonesgithub/zulip,jackrzhang/zulip,souravbadami/zulip,paxapy/zulip,kaiyuanheshang/zulip,brockwhittaker/zulip,kokoar/zulip,tdr130/zulip,yuvipanda/zulip,krtkmj/zulip,hackerkid/zulip,KingxBanana/zulip,brockwhittaker/zulip,udxxabp/zulip,amanharitsh123/zulip,seapasulli/zulip,so0k/zulip,showell/zulip,aliceriot/zulip,ryanbackman/zulip,joyhchen/zulip,grave-w-grave/zulip,tiansiyuan/zulip,MayB/zulip,tommyip/zulip,sup95/zulip,noroot/zulip,wweiradio/zulip,ikasumiwt/zulip,huangkebo/zulip,arpitpanwar/zulip,PaulPetring/zulip,mansilladev/zulip,natanovia/zulip,bitemyapp/zulip,vikas-parashar/zulip,armooo/zulip,thomasboyt/zulip,bitemyapp/zulip,dhcrzf/zulip,udxxabp/zulip,aakash-cr7/zulip,verma-varsha/zulip,hafeez3000/zulip,johnnygaddarr/zulip,JanzTam/zulip,amallia/zulip,souravbadami/zulip,ufosky-server/zulip,hayderimran7/zulip,jeffcao/zulip,Diptanshu8/zulip,jackrzhang/zulip,j831/zulip,johnny9/zulip,jessedhillon/zulip,ikasumiwt/zulip,zulip/zulip,jonesgithub/zulip,johnnygaddarr/zulip,sonali0901/zulip,Galexrt/zulip,udxxabp/zulip,kaiyuanheshang/zulip,wdaher/zulip,zulip/zulip,bowlofstew/zulip,vakila/zulip,grave-w-grave/zulip,Drooids/zulip,Juanvulcano/zulip,developerfm/zulip,itnihao/zulip,LeeRisk/zulip,Galexrt/zulip,dxq-git/zulip,jainayush975/zulip,codeKonami/zulip,pradiptad/zulip,jrowan/zulip,wweiradio/zulip,bluesea/zulip,glovebx/zulip,xuxiao/zulip,Jianchun1/zulip,wangdeshui/zulip,brockwhittaker/zulip,easyfmxu/zulip,bowlofstew/zulip,gigawhitlocks/zulip,deer-hope/zulip,Galexrt/zulip,jimmy54/zulip,brainwane/zulip,eeshangarg/zulip,firstblade/zulip,nicholasbs/zulip,JPJPJPOPOP/zulip,xuanhan863/zulip,JanzTam/zulip,tbutter/zulip,vakila/zulip,jrowan/zulip,isht3/zulip,lfranchi/zulip,dotcool/zulip,vaidap/zulip,kou/zulip,ericzhou2008/zulip,bssrdf/zulip,hj3938/zulip,gkotian/zulip,codeKonami/zulip,avastu/zulip,andersk/zulip,SmartPeople/zulip,punchagan/zulip,levixie/zulip,arpith/zulip,AZtheAsian/zulip,firstblade/zulip,voidException/zulip,swinghu/zulip,bssrdf/zulip,blaze225/zulip,Suninus/zulip,KJin99/zulip,cosmicAsymmetry/zulip,easyfmxu/zulip,andersk/zulip,johnny9/zulip,MariaFaBella85/zulip,proliming/zulip,hackerkid/zulip,punchagan/zulip,themass/zulip,zulip/zulip,seapasulli/zulip,proliming/zulip,paxapy/zulip,xuxiao/zulip,ApsOps/zulip,LeeRisk/zulip,Gabriel0402/zulip,ApsOps/zulip,PaulPetring/zulip,suxinde2009/zulip,pradiptad/zulip,susansls/zulip,samatdav/zulip,hengqujushi/zulip,dattatreya303/zulip,jphilipsen05/zulip,amyliu345/zulip,qq1012803704/zulip,moria/zulip,m1ssou/zulip,developerfm/zulip,Gabriel0402/zulip,developerfm/zulip,dawran6/zulip,blaze225/zulip,rht/zulip,jimmy54/zulip,JPJPJPOPOP/zulip,gigawhitlocks/zulip,MariaFaBella85/zulip,zhaoweigg/zulip,arpith/zulip,tiansiyuan/zulip,umkay/zulip,ericzhou2008/zulip,peiwei/zulip,akuseru/zulip,schatt/zulip,jonesgithub/zulip,LAndreas/zulip,wavelets/zulip,moria/zulip,timabbott/zulip,suxinde2009/zulip,udxxabp/zulip,atomic-labs/zulip,LAndreas/zulip,SmartPeople/zulip,guiquanz/zulip,souravbadami/zulip,synicalsyntax/zulip,bowlofstew/zulip,hayderimran7/zulip,vabs22/zulip,PaulPetring/zulip,vakila/zulip,arpitpanwar/zulip,fw1121/zulip,LeeRisk/zulip,arpith/zulip,rht/zulip,avastu/zulip,wdaher/zulip,jainayush975/zulip,DazWorrall/zulip,zhaoweigg/zulip,kokoar/zulip,synicalsyntax/zulip,Galexrt/zulip,shubhamdhama/zulip,zhaoweigg/zulip,reyha/zulip,vikas-parashar/zulip,shubhamdhama/zulip,deer-hope/zulip,ipernet/zulip,zhaoweigg/zulip,Qgap/zulip,aps-sids/zulip,xuanhan863/zulip,shaunstanislaus/zulip,jessedhillon/zulip,ufosky-server/zulip,Vallher/zulip,verma-varsha/zulip,johnny9/zulip,zachallaun/zulip,tommyip/zulip,mohsenSy/zulip,Batterfii/zulip,mansilladev/zulip,deer-hope/zulip,codeKonami/zulip,xuanhan863/zulip,umkay/zulip,dxq-git/zulip,suxinde2009/zulip,hj3938/zulip,calvinleenyc/zulip,jerryge/zulip,grave-w-grave/zulip,calvinleenyc/zulip,tiansiyuan/zulip,hackerkid/zulip,natanovia/zulip,ikasumiwt/zulip,ikasumiwt/zulip,itnihao/zulip,he15his/zulip,johnny9/zulip,shaunstanislaus/zulip,Drooids/zulip,zacps/zulip,amyliu345/zulip,synicalsyntax/zulip,Batterfii/zulip,MayB/zulip,he15his/zulip,avastu/zulip,dnmfarrell/zulip,vabs22/zulip,AZtheAsian/zulip,SmartPeople/zulip,gkotian/zulip,jessedhillon/zulip,ikasumiwt/zulip,swinghu/zulip,hustlzp/zulip,zwily/zulip,christi3k/zulip,natanovia/zulip,rht/zulip,reyha/zulip,aliceriot/zulip,huangkebo/zulip,natanovia/zulip,hafeez3000/zulip,johnny9/zulip,cosmicAsymmetry/zulip,shaunstanislaus/zulip,blaze225/zulip,dattatreya303/zulip,brainwane/zulip,thomasboyt/zulip,sup95/zulip,karamcnair/zulip,aakash-cr7/zulip,MariaFaBella85/zulip,timabbott/zulip,adnanh/zulip,littledogboy/zulip,paxapy/zulip,arpitpanwar/zulip,bowlofstew/zulip,dotcool/zulip,mahim97/zulip,dwrpayne/zulip,jackrzhang/zulip,johnnygaddarr/zulip,Cheppers/zulip,jimmy54/zulip,LAndreas/zulip,ryanbackman/zulip,shrikrishnaholla/zulip,peguin40/zulip,huangkebo/zulip,yocome/zulip,babbage/zulip,avastu/zulip,Juanvulcano/zulip,AZtheAsian/zulip,brainwane/zulip,sup95/zulip,jrowan/zulip,bastianh/zulip,dattatreya303/zulip,samatdav/zulip,shrikrishnaholla/zulip,peiwei/zulip,ipernet/zulip,tdr130/zulip,ipernet/zulip,luyifan/zulip,Juanvulcano/zulip,cosmicAsymmetry/zulip,amallia/zulip,jainayush975/zulip,shaunstanislaus/zulip,shrikrishnaholla/zulip,tbutter/zulip,schatt/zulip,dhcrzf/zulip,susansls/zulip,aakash-cr7/zulip,KingxBanana/zulip,akuseru/zulip,Galexrt/zulip,vabs22/zulip,dawran6/zulip,grave-w-grave/zulip,TigorC/zulip,zwily/zulip,nicholasbs/zulip,zulip/zulip,Gabriel0402/zulip,zachallaun/zulip,zulip/zulip,stamhe/zulip,Frouk/zulip,vikas-parashar/zulip,glovebx/zulip,bluesea/zulip,bastianh/zulip,voidException/zulip,tommyip/zulip,zacps/zulip,shubhamdhama/zulip,fw1121/zulip,dotcool/zulip,sharmaeklavya2/zulip,joyhchen/zulip,xuxiao/zulip,zachallaun/zulip,Qgap/zulip,wweiradio/zulip,eeshangarg/zulip,brainwane/zulip,grave-w-grave/zulip,ipernet/zulip,karamcnair/zulip,deer-hope/zulip,Drooids/zulip,andersk/zulip,guiquanz/zulip,isht3/zulip,Vallher/zulip,Jianchun1/zulip,peiwei/zulip,kaiyuanheshang/zulip,saitodisse/zulip,noroot/zulip,Jianchun1/zulip,LAndreas/zulip,PhilSk/zulip,technicalpickles/zulip,karamcnair/zulip,noroot/zulip,Qgap/zulip,arpitpanwar/zulip,voidException/zulip,hengqujushi/zulip,rht/zulip,amanharitsh123/zulip,itnihao/zulip,voidException/zulip,hengqujushi/zulip,xuanhan863/zulip,ApsOps/zulip,dawran6/zulip,zulip/zulip,mdavid/zulip,kaiyuanheshang/zulip,dxq-git/zulip,ryanbackman/zulip,Frouk/zulip,thomasboyt/zulip,zofuthan/zulip,kou/zulip,christi3k/zulip,bssrdf/zulip,LeeRisk/zulip,PhilSk/zulip,RobotCaleb/zulip,vabs22/zulip,EasonYi/zulip,voidException/zulip,hustlzp/zulip,AZtheAsian/zulip,eeshangarg/zulip,rht/zulip,samatdav/zulip,ufosky-server/zulip,easyfmxu/zulip,bitemyapp/zulip,nicholasbs/zulip,zacps/zulip,joshisa/zulip,shrikrishnaholla/zulip,qq1012803704/zulip,he15his/zulip,arpith/zulip,alliejones/zulip,hackerkid/zulip,Diptanshu8/zulip,johnnygaddarr/zulip,ashwinirudrappa/zulip,jeffcao/zulip,luyifan/zulip,jackrzhang/zulip,sup95/zulip,PhilSk/zulip,AZtheAsian/zulip,zorojean/zulip,KJin99/zulip,udxxabp/zulip,Frouk/zulip,DazWorrall/zulip,thomasboyt/zulip,jessedhillon/zulip,technicalpickles/zulip,armooo/zulip,dotcool/zulip,babbage/zulip,johnnygaddarr/zulip,MayB/zulip,brockwhittaker/zulip,grave-w-grave/zulip,Juanvulcano/zulip,proliming/zulip,adnanh/zulip,reyha/zulip,Jianchun1/zulip,souravbadami/zulip,shrikrishnaholla/zulip,mansilladev/zulip,saitodisse/zulip,jackrzhang/zulip,adnanh/zulip,pradiptad/zulip,wdaher/zulip,aps-sids/zulip,jerryge/zulip,firstblade/zulip,dotcool/zulip,zofuthan/zulip,dhcrzf/zulip,dxq-git/zulip,joshisa/zulip,KingxBanana/zulip,christi3k/zulip,LeeRisk/zulip,ApsOps/zulip,kou/zulip,Frouk/zulip,levixie/zulip,jphilipsen05/zulip,paxapy/zulip,amyliu345/zulip,codeKonami/zulip,rishig/zulip,umkay/zulip,noroot/zulip,akuseru/zulip,hj3938/zulip,wweiradio/zulip,dnmfarrell/zulip,vakila/zulip,dattatreya303/zulip,lfranchi/zulip,luyifan/zulip,developerfm/zulip,MayB/zulip,themass/zulip,hayderimran7/zulip,brainwane/zulip,samatdav/zulip,jackrzhang/zulip,rishig/zulip,ahmadassaf/zulip,Batterfii/zulip,udxxabp/zulip,zacps/zulip,easyfmxu/zulip,ikasumiwt/zulip,nicholasbs/zulip,hafeez3000/zulip,stamhe/zulip,mansilladev/zulip,adnanh/zulip,armooo/zulip,PaulPetring/zulip,showell/zulip,MariaFaBella85/zulip,gkotian/zulip,Qgap/zulip,vaidap/zulip,EasonYi/zulip,shubhamdhama/zulip,glovebx/zulip,levixie/zulip,tommyip/zulip,nicholasbs/zulip,EasonYi/zulip,armooo/zulip,eeshangarg/zulip,calvinleenyc/zulip,hj3938/zulip,akuseru/zulip,j831/zulip,amanharitsh123/zulip,jimmy54/zulip,Gabriel0402/zulip,wweiradio/zulip,bastianh/zulip,EasonYi/zulip,Juanvulcano/zulip,willingc/zulip,ipernet/zulip,itnihao/zulip,bastianh/zulip,tdr130/zulip,udxxabp/zulip,glovebx/zulip,ipernet/zulip,krtkmj/zulip,zachallaun/zulip,umkay/zulip,yuvipanda/zulip,amallia/zulip,xuxiao/zulip,alliejones/zulip,esander91/zulip,arpith/zulip,MariaFaBella85/zulip,seapasulli/zulip,jrowan/zulip,m1ssou/zulip,schatt/zulip,Cheppers/zulip,shaunstanislaus/zulip,aliceriot/zulip,qq1012803704/zulip,Suninus/zulip,mohsenSy/zulip,EasonYi/zulip,m1ssou/zulip,peguin40/zulip,developerfm/zulip,bluesea/zulip,dnmfarrell/zulip,schatt/zulip,Cheppers/zulip,glovebx/zulip,esander91/zulip,adnanh/zulip,alliejones/zulip,Vallher/zulip,joshisa/zulip,Diptanshu8/zulip,ericzhou2008/zulip,Diptanshu8/zulip,aps-sids/zulip,guiquanz/zulip,dawran6/zulip,ericzhou2008/zulip,zwily/zulip,lfranchi/zulip,willingc/zulip,JPJPJPOPOP/zulip,qq1012803704/zulip,sonali0901/zulip,PaulPetring/zulip,tiansiyuan/zulip,isht3/zulip,wavelets/zulip,dwrpayne/zulip,zorojean/zulip,gigawhitlocks/zulip,showell/zulip,lfranchi/zulip,hackerkid/zulip,zofuthan/zulip,amanharitsh123/zulip,zwily/zulip,RobotCaleb/zulip,technicalpickles/zulip,themass/zulip,so0k/zulip,tommyip/zulip,eeshangarg/zulip,zhaoweigg/zulip,timabbott/zulip,he15his/zulip,yuvipanda/zulip,JanzTam/zulip,Vallher/zulip,luyifan/zulip,babbage/zulip,sonali0901/zulip,saitodisse/zulip,themass/zulip,SmartPeople/zulip,developerfm/zulip,avastu/zulip,yocome/zulip,ahmadassaf/zulip,sonali0901/zulip,praveenaki/zulip,aliceriot/zulip,mansilladev/zulip,mahim97/zulip,kaiyuanheshang/zulip,technicalpickles/zulip,amallia/zulip,eastlhu/zulip,ryansnowboarder/zulip,zachallaun/zulip,niftynei/zulip,avastu/zulip,zachallaun/zulip,kou/zulip,amyliu345/zulip,jimmy54/zulip,alliejones/zulip,technicalpickles/zulip,stamhe/zulip,ufosky-server/zulip,esander91/zulip,vikas-parashar/zulip,arpith/zulip,deer-hope/zulip,isht3/zulip,hayderimran7/zulip,dwrpayne/zulip,KJin99/zulip,fw1121/zulip,zwily/zulip,samatdav/zulip,jeffcao/zulip,armooo/zulip,Frouk/zulip,dxq-git/zulip,EasonYi/zulip,niftynei/zulip,alliejones/zulip,littledogboy/zulip,zorojean/zulip,saitodisse/zulip,jessedhillon/zulip,jeffcao/zulip,hafeez3000/zulip,paxapy/zulip,wavelets/zulip,adnanh/zulip,akuseru/zulip,zorojean/zulip,aps-sids/zulip,suxinde2009/zulip,proliming/zulip,umkay/zulip,j831/zulip,gigawhitlocks/zulip,ApsOps/zulip,esander91/zulip,firstblade/zulip,JanzTam/zulip,Jianchun1/zulip,praveenaki/zulip,zofuthan/zulip,ericzhou2008/zulip,timabbott/zulip,aakash-cr7/zulip,fw1121/zulip,moria/zulip,jainayush975/zulip,stamhe/zulip,littledogboy/zulip,bowlofstew/zulip,timabbott/zulip,karamcnair/zulip,jackrzhang/zulip,wweiradio/zulip,sharmaeklavya2/zulip,peguin40/zulip,yuvipanda/zulip,SmartPeople/zulip,krtkmj/zulip,avastu/zulip,levixie/zulip,peguin40/zulip,kou/zulip,hackerkid/zulip,luyifan/zulip,Gabriel0402/zulip,shubhamdhama/zulip,deer-hope/zulip,esander91/zulip,vikas-parashar/zulip,akuseru/zulip,praveenaki/zulip,babbage/zulip,codeKonami/zulip,luyifan/zulip,eeshangarg/zulip,joshisa/zulip,pradiptad/zulip,tbutter/zulip,synicalsyntax/zulip,niftynei/zulip,jonesgithub/zulip,amanharitsh123/zulip,nicholasbs/zulip,seapasulli/zulip,jimmy54/zulip,Gabriel0402/zulip,glovebx/zulip,punchagan/zulip,verma-varsha/zulip,firstblade/zulip,TigorC/zulip,eastlhu/zulip,Galexrt/zulip,wangdeshui/zulip,susansls/zulip,zwily/zulip,kokoar/zulip,gigawhitlocks/zulip,yuvipanda/zulip,fw1121/zulip,yocome/zulip,JanzTam/zulip,KJin99/zulip,Qgap/zulip,aliceriot/zulip,pradiptad/zulip,littledogboy/zulip,paxapy/zulip,kou/zulip,Batterfii/zulip,bitemyapp/zulip,atomic-labs/zulip,ryansnowboarder/zulip,eastlhu/zulip,pradiptad/zulip,levixie/zulip,zulip/zulip,ryansnowboarder/zulip,guiquanz/zulip,umkay/zulip,dhcrzf/zulip,Batterfii/zulip,TigorC/zulip,natanovia/zulip,swinghu/zulip,synicalsyntax/zulip,wdaher/zulip,hj3938/zulip,praveenaki/zulip,gkotian/zulip,ahmadassaf/zulip,hengqujushi/zulip,TigorC/zulip,rishig/zulip,KJin99/zulip,wweiradio/zulip,tbutter/zulip,ipernet/zulip,jerryge/zulip,mansilladev/zulip,Gabriel0402/zulip,ryansnowboarder/zulip,wdaher/zulip,he15his/zulip,mohsenSy/zulip,yuvipanda/zulip,verma-varsha/zulip,dotcool/zulip,j831/zulip,jainayush975/zulip,tiansiyuan/zulip,moria/zulip,thomasboyt/zulip,adnanh/zulip,zacps/zulip,punchagan/zulip,mohsenSy/zulip,KingxBanana/zulip,jphilipsen05/zulip,so0k/zulip,willingc/zulip,ericzhou2008/zulip,vakila/zulip,pradiptad/zulip,themass/zulip,PhilSk/zulip,Cheppers/zulip,hengqujushi/zulip,blaze225/zulip,joyhchen/zulip,Suninus/zulip,guiquanz/zulip,eastlhu/zulip,amanharitsh123/zulip,noroot/zulip,krtkmj/zulip,dnmfarrell/zulip,hengqujushi/zulip,ashwinirudrappa/zulip,proliming/zulip,ryansnowboarder/zulip,dawran6/zulip,qq1012803704/zulip,willingc/zulip,Vallher/zulip,umkay/zulip,atomic-labs/zulip,jonesgithub/zulip,littledogboy/zulip,dnmfarrell/zulip,jimmy54/zulip,dotcool/zulip,ahmadassaf/zulip,timabbott/zulip,xuanhan863/zulip,hj3938/zulip,wavelets/zulip,ahmadassaf/zulip,souravbadami/zulip,cosmicAsymmetry/zulip,punchagan/zulip,babbage/zulip,amallia/zulip,Cheppers/zulip,ericzhou2008/zulip,susansls/zulip,shaunstanislaus/zulip,jphilipsen05/zulip,Frouk/zulip,themass/zulip,sup95/zulip,praveenaki/zulip,so0k/zulip,armooo/zulip,ashwinirudrappa/zulip,Batterfii/zulip,MayB/zulip,peiwei/zulip,dattatreya303/zulip,suxinde2009/zulip,sharmaeklavya2/zulip,seapasulli/zulip,easyfmxu/zulip,schatt/zulip,ApsOps/zulip,Batterfii/zulip,eastlhu/zulip,MayB/zulip,jrowan/zulip,mahim97/zulip,natanovia/zulip,blaze225/zulip,mahim97/zulip,bowlofstew/zulip,swinghu/zulip,ashwinirudrappa/zulip,atomic-labs/zulip,DazWorrall/zulip,technicalpickles/zulip,andersk/zulip,karamcnair/zulip,JPJPJPOPOP/zulip,JPJPJPOPOP/zulip,gigawhitlocks/zulip,rishig/zulip,TigorC/zulip,xuxiao/zulip,johnnygaddarr/zulip,RobotCaleb/zulip,yocome/zulip,amallia/zulip,Drooids/zulip,zorojean/zulip,TigorC/zulip,tbutter/zulip,sharmaeklavya2/zulip,arpitpanwar/zulip,babbage/zulip,firstblade/zulip,hustlzp/zulip,atomic-labs/zulip,fw1121/zulip,amyliu345/zulip,he15his/zulip,zofuthan/zulip,mdavid/zulip,akuseru/zulip,johnny9/zulip,isht3/zulip,kou/zulip,alliejones/zulip,eastlhu/zulip,kaiyuanheshang/zulip,Suninus/zulip,so0k/zulip,shrikrishnaholla/zulip,rishig/zulip,easyfmxu/zulip,technicalpickles/zulip,he15his/zulip,tdr130/zulip,peiwei/zulip,huangkebo/zulip,esander91/zulip,wdaher/zulip,mdavid/zulip,proliming/zulip,andersk/zulip,Jianchun1/zulip,codeKonami/zulip,qq1012803704/zulip,vaidap/zulip,niftynei/zulip,Suninus/zulip,joshisa/zulip,peguin40/zulip,ufosky-server/zulip,bastianh/zulip,hayderimran7/zulip,dhcrzf/zulip,calvinleenyc/zulip,susansls/zulip,hafeez3000/zulip,developerfm/zulip,wavelets/zulip,aakash-cr7/zulip,jerryge/zulip,JanzTam/zulip,vaidap/zulip,bluesea/zulip,zachallaun/zulip,vakila/zulip,bitemyapp/zulip,dxq-git/zulip,themass/zulip,PhilSk/zulip,hustlzp/zulip,Vallher/zulip,showell/zulip,seapasulli/zulip,LeeRisk/zulip,ufosky-server/zulip,seapasulli/zulip,christi3k/zulip,Drooids/zulip,timabbott/zulip,dawran6/zulip,calvinleenyc/zulip,yocome/zulip,hayderimran7/zulip,peiwei/zulip,ashwinirudrappa/zulip,wangdeshui/zulip,peiwei/zulip,Drooids/zulip,bssrdf/zulip,j831/zulip,babbage/zulip,souravbadami/zulip,wavelets/zulip,wangdeshui/zulip,jerryge/zulip,gkotian/zulip,christi3k/zulip,jeffcao/zulip,mdavid/zulip,praveenaki/zulip,tommyip/zulip,jessedhillon/zulip,deer-hope/zulip,swinghu/zulip,gkotian/zulip,jerryge/zulip,dwrpayne/zulip,synicalsyntax/zulip,aliceriot/zulip,willingc/zulip,vikas-parashar/zulip,suxinde2009/zulip,thomasboyt/zulip,huangkebo/zulip,RobotCaleb/zulip,wdaher/zulip,jeffcao/zulip,JPJPJPOPOP/zulip,zofuthan/zulip,vaidap/zulip,bssrdf/zulip,luyifan/zulip,praveenaki/zulip,brainwane/zulip,sharmaeklavya2/zulip,niftynei/zulip,LAndreas/zulip,showell/zulip,sharmaeklavya2/zulip,yocome/zulip,vaidap/zulip,tiansiyuan/zulip,ahmadassaf/zulip,moria/zulip,bluesea/zulip,yuvipanda/zulip,JanzTam/zulip,karamcnair/zulip,LAndreas/zulip,tdr130/zulip,brockwhittaker/zulip,jessedhillon/zulip,swinghu/zulip,christi3k/zulip,bowlofstew/zulip,tdr130/zulip,stamhe/zulip,armooo/zulip,ikasumiwt/zulip,showell/zulip,levixie/zulip,esander91/zulip,tbutter/zulip,wangdeshui/zulip,johnnygaddarr/zulip,ryanbackman/zulip,proliming/zulip,KJin99/zulip,samatdav/zulip,m1ssou/zulip,lfranchi/zulip,rishig/zulip,so0k/zulip,huangkebo/zulip,xuxiao/zulip,hackerkid/zulip,sonali0901/zulip,zorojean/zulip,hafeez3000/zulip,willingc/zulip,suxinde2009/zulip,m1ssou/zulip,reyha/zulip,tbutter/zulip,mansilladev/zulip,punchagan/zulip,kokoar/zulip,RobotCaleb/zulip,alliejones/zulip,calvinleenyc/zulip,xuanhan863/zulip,reyha/zulip,m1ssou/zulip,noroot/zulip,jainayush975/zulip,Cheppers/zulip,EasonYi/zulip,aps-sids/zulip,saitodisse/zulip,Galexrt/zulip,tommyip/zulip,kokoar/zulip,Diptanshu8/zulip,fw1121/zulip,amyliu345/zulip,blaze225/zulip,PhilSk/zulip,codeKonami/zulip,willingc/zulip,xuxiao/zulip,hengqujushi/zulip,schatt/zulip,voidException/zulip,brockwhittaker/zulip,MayB/zulip,jeffcao/zulip,Qgap/zulip,glovebx/zulip,ryansnowboarder/zulip,tdr130/zulip,krtkmj/zulip,KingxBanana/zulip,saitodisse/zulip,PaulPetring/zulip,dhcrzf/zulip,joyhchen/zulip,yocome/zulip,rishig/zulip,PaulPetring/zulip,zofuthan/zulip,vakila/zulip,brainwane/zulip,dnmfarrell/zulip,levixie/zulip,shrikrishnaholla/zulip,aakash-cr7/zulip,Vallher/zulip,dwrpayne/zulip,Suninus/zulip,stamhe/zulip,mahim97/zulip,Suninus/zulip,wangdeshui/zulip,noroot/zulip,bssrdf/zulip,bitemyapp/zulip,saitodisse/zulip,qq1012803704/zulip,schatt/zulip,natanovia/zulip,cosmicAsymmetry/zulip,verma-varsha/zulip,LAndreas/zulip,cosmicAsymmetry/zulip,j831/zulip,gkotian/zulip,atomic-labs/zulip,guiquanz/zulip,DazWorrall/zulip,ahmadassaf/zulip,bastianh/zulip,joshisa/zulip,lfranchi/zulip,LeeRisk/zulip,synicalsyntax/zulip,stamhe/zulip,easyfmxu/zulip,wangdeshui/zulip,hafeez3000/zulip,niftynei/zulip,aps-sids/zulip,ufosky-server/zulip,shaunstanislaus/zulip,KJin99/zulip,zhaoweigg/zulip,huangkebo/zulip,mohsenSy/zulip,mdavid/zulip,ryansnowboarder/zulip,Juanvulcano/zulip,DazWorrall/zulip,arpitpanwar/zulip,mahim97/zulip,lfranchi/zulip,ryanbackman/zulip,shubhamdhama/zulip,isht3/zulip,eastlhu/zulip,MariaFaBella85/zulip,swinghu/zulip,RobotCaleb/zulip,zwily/zulip,mdavid/zulip,joyhchen/zulip,jphilipsen05/zulip,dattatreya303/zulip,hustlzp/zulip,bastianh/zulip,aliceriot/zulip,jonesgithub/zulip,showell/zulip,dnmfarrell/zulip,bssrdf/zulip,moria/zulip,ryanbackman/zulip,joyhchen/zulip,itnihao/zulip,joshisa/zulip,dwrpayne/zulip,bitemyapp/zulip,voidException/zulip,thomasboyt/zulip,guiquanz/zulip,hustlzp/zulip,vabs22/zulip,kaiyuanheshang/zulip,KingxBanana/zulip,shubhamdhama/zulip,littledogboy/zulip,arpitpanwar/zulip,dhcrzf/zulip,jerryge/zulip,reyha/zulip,Frouk/zulip,nicholasbs/zulip,firstblade/zulip,rht/zulip,Cheppers/zulip,Qgap/zulip,rht/zulip,gigawhitlocks/zulip,hayderimran7/zulip,xuanhan863/zulip,karamcnair/zulip,amallia/zulip,sonali0901/zulip,punchagan/zulip,mohsenSy/zulip,jphilipsen05/zulip
|
digest: Add a management command to enqueue digest email recipients.
(imported from commit 70ff2c7a4dae654f4077041c45e2154b3ac7afb7)
|
from __future__ import absolute_import
import datetime
import pytz
from django.core.management.base import BaseCommand
from zerver.lib.queue import queue_json_publish
from zerver.models import UserActivity, get_user_profile_by_email
VALID_DIGEST_DAYS = (1, 2, 3)
def inactive_since(user_profile, cutoff):
# Hasn't used the app in the last 24 business-day hours.
most_recent_visit = [row.last_visit for row in \
UserActivity.objects.filter(
user_profile=user_profile)]
if not most_recent_visit:
# This person has never used the app.
return True
last_visit = max(most_recent_visit)
return last_visit < cutoff
def last_business_day():
one_day = datetime.timedelta(hours=24)
previous_day = datetime.datetime.now(tz=pytz.utc) - one_day
while previous_day.weekday() not in VALID_DIGEST_DAYS:
previous_day -= one_day
return previous_day
# Changes to this should also be reflected in
# zerver/worker/queue_processors.py:DigestWorker.consume()
def queue_digest_recipient(user_profile, cutoff):
# Convert cutoff to epoch seconds for transit.
event = {"user_profile_id": user_profile.id,
"cutoff": cutoff.strftime('%s')}
queue_json_publish("digest_emails", event, lambda event: None)
class Command(BaseCommand):
help = """Enqueue digest emails for users that haven't checked the app
in a while.
"""
def handle(self, *args, **options):
# To be really conservative while we don't have user timezones or
# special-casing for companies with non-standard workweeks, only
# try to send mail on Tuesdays, Wednesdays, and Thursdays.
if datetime.datetime.utcnow().weekday() not in VALID_DIGEST_DAYS:
return
for email in ["jesstess@zulip.com", "jessica.mckellar@gmail.com",
"sipbtest@mit.edu", "jesstess+si@zulip.com"]:
user_profile = get_user_profile_by_email(email)
cutoff = last_business_day()
if inactive_since(user_profile, cutoff):
queue_digest_recipient(user_profile, cutoff)
|
<commit_before><commit_msg>digest: Add a management command to enqueue digest email recipients.
(imported from commit 70ff2c7a4dae654f4077041c45e2154b3ac7afb7)<commit_after>
|
from __future__ import absolute_import
import datetime
import pytz
from django.core.management.base import BaseCommand
from zerver.lib.queue import queue_json_publish
from zerver.models import UserActivity, get_user_profile_by_email
VALID_DIGEST_DAYS = (1, 2, 3)
def inactive_since(user_profile, cutoff):
# Hasn't used the app in the last 24 business-day hours.
most_recent_visit = [row.last_visit for row in \
UserActivity.objects.filter(
user_profile=user_profile)]
if not most_recent_visit:
# This person has never used the app.
return True
last_visit = max(most_recent_visit)
return last_visit < cutoff
def last_business_day():
one_day = datetime.timedelta(hours=24)
previous_day = datetime.datetime.now(tz=pytz.utc) - one_day
while previous_day.weekday() not in VALID_DIGEST_DAYS:
previous_day -= one_day
return previous_day
# Changes to this should also be reflected in
# zerver/worker/queue_processors.py:DigestWorker.consume()
def queue_digest_recipient(user_profile, cutoff):
# Convert cutoff to epoch seconds for transit.
event = {"user_profile_id": user_profile.id,
"cutoff": cutoff.strftime('%s')}
queue_json_publish("digest_emails", event, lambda event: None)
class Command(BaseCommand):
help = """Enqueue digest emails for users that haven't checked the app
in a while.
"""
def handle(self, *args, **options):
# To be really conservative while we don't have user timezones or
# special-casing for companies with non-standard workweeks, only
# try to send mail on Tuesdays, Wednesdays, and Thursdays.
if datetime.datetime.utcnow().weekday() not in VALID_DIGEST_DAYS:
return
for email in ["jesstess@zulip.com", "jessica.mckellar@gmail.com",
"sipbtest@mit.edu", "jesstess+si@zulip.com"]:
user_profile = get_user_profile_by_email(email)
cutoff = last_business_day()
if inactive_since(user_profile, cutoff):
queue_digest_recipient(user_profile, cutoff)
|
digest: Add a management command to enqueue digest email recipients.
(imported from commit 70ff2c7a4dae654f4077041c45e2154b3ac7afb7)from __future__ import absolute_import
import datetime
import pytz
from django.core.management.base import BaseCommand
from zerver.lib.queue import queue_json_publish
from zerver.models import UserActivity, get_user_profile_by_email
VALID_DIGEST_DAYS = (1, 2, 3)
def inactive_since(user_profile, cutoff):
# Hasn't used the app in the last 24 business-day hours.
most_recent_visit = [row.last_visit for row in \
UserActivity.objects.filter(
user_profile=user_profile)]
if not most_recent_visit:
# This person has never used the app.
return True
last_visit = max(most_recent_visit)
return last_visit < cutoff
def last_business_day():
one_day = datetime.timedelta(hours=24)
previous_day = datetime.datetime.now(tz=pytz.utc) - one_day
while previous_day.weekday() not in VALID_DIGEST_DAYS:
previous_day -= one_day
return previous_day
# Changes to this should also be reflected in
# zerver/worker/queue_processors.py:DigestWorker.consume()
def queue_digest_recipient(user_profile, cutoff):
# Convert cutoff to epoch seconds for transit.
event = {"user_profile_id": user_profile.id,
"cutoff": cutoff.strftime('%s')}
queue_json_publish("digest_emails", event, lambda event: None)
class Command(BaseCommand):
help = """Enqueue digest emails for users that haven't checked the app
in a while.
"""
def handle(self, *args, **options):
# To be really conservative while we don't have user timezones or
# special-casing for companies with non-standard workweeks, only
# try to send mail on Tuesdays, Wednesdays, and Thursdays.
if datetime.datetime.utcnow().weekday() not in VALID_DIGEST_DAYS:
return
for email in ["jesstess@zulip.com", "jessica.mckellar@gmail.com",
"sipbtest@mit.edu", "jesstess+si@zulip.com"]:
user_profile = get_user_profile_by_email(email)
cutoff = last_business_day()
if inactive_since(user_profile, cutoff):
queue_digest_recipient(user_profile, cutoff)
|
<commit_before><commit_msg>digest: Add a management command to enqueue digest email recipients.
(imported from commit 70ff2c7a4dae654f4077041c45e2154b3ac7afb7)<commit_after>from __future__ import absolute_import
import datetime
import pytz
from django.core.management.base import BaseCommand
from zerver.lib.queue import queue_json_publish
from zerver.models import UserActivity, get_user_profile_by_email
VALID_DIGEST_DAYS = (1, 2, 3)
def inactive_since(user_profile, cutoff):
# Hasn't used the app in the last 24 business-day hours.
most_recent_visit = [row.last_visit for row in \
UserActivity.objects.filter(
user_profile=user_profile)]
if not most_recent_visit:
# This person has never used the app.
return True
last_visit = max(most_recent_visit)
return last_visit < cutoff
def last_business_day():
one_day = datetime.timedelta(hours=24)
previous_day = datetime.datetime.now(tz=pytz.utc) - one_day
while previous_day.weekday() not in VALID_DIGEST_DAYS:
previous_day -= one_day
return previous_day
# Changes to this should also be reflected in
# zerver/worker/queue_processors.py:DigestWorker.consume()
def queue_digest_recipient(user_profile, cutoff):
# Convert cutoff to epoch seconds for transit.
event = {"user_profile_id": user_profile.id,
"cutoff": cutoff.strftime('%s')}
queue_json_publish("digest_emails", event, lambda event: None)
class Command(BaseCommand):
help = """Enqueue digest emails for users that haven't checked the app
in a while.
"""
def handle(self, *args, **options):
# To be really conservative while we don't have user timezones or
# special-casing for companies with non-standard workweeks, only
# try to send mail on Tuesdays, Wednesdays, and Thursdays.
if datetime.datetime.utcnow().weekday() not in VALID_DIGEST_DAYS:
return
for email in ["jesstess@zulip.com", "jessica.mckellar@gmail.com",
"sipbtest@mit.edu", "jesstess+si@zulip.com"]:
user_profile = get_user_profile_by_email(email)
cutoff = last_business_day()
if inactive_since(user_profile, cutoff):
queue_digest_recipient(user_profile, cutoff)
|
|
1a0be0d7731a6e89c4b84991f1fd486722b7eb39
|
sample-code/examples/simple_real_device.py
|
sample-code/examples/simple_real_device.py
|
"""Be sure to use the latest selenium version
as there might be some problems with JSON serialization
Before running the test make sure you started appium server
with TestApp app: grunt appium:TestApp
"""
import unittest
import os
from random import randint
from selenium import webdriver
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
# set up appium
app = "io.appium.TestApp"
self.driver = webdriver.Remote(
command_executor='http://127.0.0.1:4723/wd/hub',
desired_capabilities={
'browserName': 'iOS',
'platform': 'Mac',
'version': '6.0',
'app': app
})
self._values = []
def _populate(self):
# populate text fields with two random number
elems = self.driver.find_elements_by_tag_name('textField')
for elem in elems:
rndNum = randint(0, 10)
elem.send_keys(rndNum)
self._values.append(rndNum)
def test_ui_computation(self):
# populate text fields with values
self._populate()
# trigger computation by using the button
buttons = self.driver.find_elements_by_tag_name("button")
buttons[0].click()
# is sum equal ?
texts = self.driver.find_elements_by_tag_name("staticText")
self.assertEqual(int(texts[0].text), self._values[0] + self._values[1])
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
Add example for real device (iOS).
|
Add example for real device (iOS).
|
Python
|
apache-2.0
|
appium/appium,appium/appium,appium/appium,appium/appium,Sw0rdstream/appium,appium/appium,appium/appium
|
Add example for real device (iOS).
|
"""Be sure to use the latest selenium version
as there might be some problems with JSON serialization
Before running the test make sure you started appium server
with TestApp app: grunt appium:TestApp
"""
import unittest
import os
from random import randint
from selenium import webdriver
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
# set up appium
app = "io.appium.TestApp"
self.driver = webdriver.Remote(
command_executor='http://127.0.0.1:4723/wd/hub',
desired_capabilities={
'browserName': 'iOS',
'platform': 'Mac',
'version': '6.0',
'app': app
})
self._values = []
def _populate(self):
# populate text fields with two random number
elems = self.driver.find_elements_by_tag_name('textField')
for elem in elems:
rndNum = randint(0, 10)
elem.send_keys(rndNum)
self._values.append(rndNum)
def test_ui_computation(self):
# populate text fields with values
self._populate()
# trigger computation by using the button
buttons = self.driver.find_elements_by_tag_name("button")
buttons[0].click()
# is sum equal ?
texts = self.driver.find_elements_by_tag_name("staticText")
self.assertEqual(int(texts[0].text), self._values[0] + self._values[1])
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add example for real device (iOS).<commit_after>
|
"""Be sure to use the latest selenium version
as there might be some problems with JSON serialization
Before running the test make sure you started appium server
with TestApp app: grunt appium:TestApp
"""
import unittest
import os
from random import randint
from selenium import webdriver
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
# set up appium
app = "io.appium.TestApp"
self.driver = webdriver.Remote(
command_executor='http://127.0.0.1:4723/wd/hub',
desired_capabilities={
'browserName': 'iOS',
'platform': 'Mac',
'version': '6.0',
'app': app
})
self._values = []
def _populate(self):
# populate text fields with two random number
elems = self.driver.find_elements_by_tag_name('textField')
for elem in elems:
rndNum = randint(0, 10)
elem.send_keys(rndNum)
self._values.append(rndNum)
def test_ui_computation(self):
# populate text fields with values
self._populate()
# trigger computation by using the button
buttons = self.driver.find_elements_by_tag_name("button")
buttons[0].click()
# is sum equal ?
texts = self.driver.find_elements_by_tag_name("staticText")
self.assertEqual(int(texts[0].text), self._values[0] + self._values[1])
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
Add example for real device (iOS)."""Be sure to use the latest selenium version
as there might be some problems with JSON serialization
Before running the test make sure you started appium server
with TestApp app: grunt appium:TestApp
"""
import unittest
import os
from random import randint
from selenium import webdriver
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
# set up appium
app = "io.appium.TestApp"
self.driver = webdriver.Remote(
command_executor='http://127.0.0.1:4723/wd/hub',
desired_capabilities={
'browserName': 'iOS',
'platform': 'Mac',
'version': '6.0',
'app': app
})
self._values = []
def _populate(self):
# populate text fields with two random number
elems = self.driver.find_elements_by_tag_name('textField')
for elem in elems:
rndNum = randint(0, 10)
elem.send_keys(rndNum)
self._values.append(rndNum)
def test_ui_computation(self):
# populate text fields with values
self._populate()
# trigger computation by using the button
buttons = self.driver.find_elements_by_tag_name("button")
buttons[0].click()
# is sum equal ?
texts = self.driver.find_elements_by_tag_name("staticText")
self.assertEqual(int(texts[0].text), self._values[0] + self._values[1])
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add example for real device (iOS).<commit_after>"""Be sure to use the latest selenium version
as there might be some problems with JSON serialization
Before running the test make sure you started appium server
with TestApp app: grunt appium:TestApp
"""
import unittest
import os
from random import randint
from selenium import webdriver
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
# set up appium
app = "io.appium.TestApp"
self.driver = webdriver.Remote(
command_executor='http://127.0.0.1:4723/wd/hub',
desired_capabilities={
'browserName': 'iOS',
'platform': 'Mac',
'version': '6.0',
'app': app
})
self._values = []
def _populate(self):
# populate text fields with two random number
elems = self.driver.find_elements_by_tag_name('textField')
for elem in elems:
rndNum = randint(0, 10)
elem.send_keys(rndNum)
self._values.append(rndNum)
def test_ui_computation(self):
# populate text fields with values
self._populate()
# trigger computation by using the button
buttons = self.driver.find_elements_by_tag_name("button")
buttons[0].click()
# is sum equal ?
texts = self.driver.find_elements_by_tag_name("staticText")
self.assertEqual(int(texts[0].text), self._values[0] + self._values[1])
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
|
5658b7267d3f7070371fb6a8d54e0c128db9b39b
|
heat/db/sqlalchemy/migrate_repo/versions/028_text_mysql_longtext.py
|
heat/db/sqlalchemy/migrate_repo/versions/028_text_mysql_longtext.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy.dialects import mysql
from sqlalchemy import types as sqltypes
def upgrade(migrate_engine):
if migrate_engine.name != 'mysql':
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.parameters.alter(type=mysql.LONGTEXT())
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.rsrc_metadata.alter(type=mysql.LONGTEXT())
watch_rule = sqlalchemy.Table('watch_rule', meta, autoload=True)
watch_rule.c.rule.alter(type=mysql.LONGTEXT())
watch_data = sqlalchemy.Table('watch_data', meta, autoload=True)
watch_data.c.data.alter(type=mysql.LONGTEXT())
def downgrade(migrate_engine):
if migrate_engine.name != 'mysql':
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.parameters.alter(type=sqltypes.TEXT())
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.rsrc_metadata.alter(type=sqltypes.TEXT())
watch_rule = sqlalchemy.Table('watch_rule', meta, autoload=True)
watch_rule.c.rule.alter(type=sqltypes.TEXT())
watch_data = sqlalchemy.Table('watch_data', meta, autoload=True)
watch_data.c.data.alter(type=sqltypes.TEXT())
|
Migrate remaining TEXT columns to LONGTEXT
|
Migrate remaining TEXT columns to LONGTEXT
To avoid cases where mysql silently truncates json data
when it is > 2^16 bytes, migrate all remaining TEXT columns
to LONGTEXT, in a similar way to the fix for bug #1210799
Combined with the fix for bug #1215501 which limits the request
body size, we should never truncate, even if mysql is configured
such that it can happen
Fixes bug #1223029
Change-Id: Ib536cfa8e2952a5e047cd8573288cdd9fee0622c
|
Python
|
apache-2.0
|
openstack/heat,ntt-sic/heat,varunarya10/heat,steveb/heat,varunarya10/heat,maestro-hybrid-cloud/heat,NeCTAR-RC/heat,noironetworks/heat,rdo-management/heat,noironetworks/heat,openstack/heat,rh-s/heat,miguelgrinberg/heat,redhat-openstack/heat,cryptickp/heat,pshchelo/heat,citrix-openstack-build/heat,ntt-sic/heat,NeCTAR-RC/heat,citrix-openstack-build/heat,dragorosson/heat,redhat-openstack/heat,JioCloud/heat,cryptickp/heat,takeshineshiro/heat,dims/heat,rh-s/heat,rdo-management/heat,dragorosson/heat,pratikmallya/heat,rickerc/heat_audit,cwolferh/heat-scratch,takeshineshiro/heat,gonzolino/heat,JioCloud/heat,jasondunsmore/heat,pratikmallya/heat,dims/heat,maestro-hybrid-cloud/heat,gonzolino/heat,cwolferh/heat-scratch,miguelgrinberg/heat,steveb/heat,srznew/heat,pshchelo/heat,srznew/heat,rickerc/heat_audit,jasondunsmore/heat
|
Migrate remaining TEXT columns to LONGTEXT
To avoid cases where mysql silently truncates json data
when it is > 2^16 bytes, migrate all remaining TEXT columns
to LONGTEXT, in a similar way to the fix for bug #1210799
Combined with the fix for bug #1215501 which limits the request
body size, we should never truncate, even if mysql is configured
such that it can happen
Fixes bug #1223029
Change-Id: Ib536cfa8e2952a5e047cd8573288cdd9fee0622c
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy.dialects import mysql
from sqlalchemy import types as sqltypes
def upgrade(migrate_engine):
if migrate_engine.name != 'mysql':
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.parameters.alter(type=mysql.LONGTEXT())
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.rsrc_metadata.alter(type=mysql.LONGTEXT())
watch_rule = sqlalchemy.Table('watch_rule', meta, autoload=True)
watch_rule.c.rule.alter(type=mysql.LONGTEXT())
watch_data = sqlalchemy.Table('watch_data', meta, autoload=True)
watch_data.c.data.alter(type=mysql.LONGTEXT())
def downgrade(migrate_engine):
if migrate_engine.name != 'mysql':
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.parameters.alter(type=sqltypes.TEXT())
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.rsrc_metadata.alter(type=sqltypes.TEXT())
watch_rule = sqlalchemy.Table('watch_rule', meta, autoload=True)
watch_rule.c.rule.alter(type=sqltypes.TEXT())
watch_data = sqlalchemy.Table('watch_data', meta, autoload=True)
watch_data.c.data.alter(type=sqltypes.TEXT())
|
<commit_before><commit_msg>Migrate remaining TEXT columns to LONGTEXT
To avoid cases where mysql silently truncates json data
when it is > 2^16 bytes, migrate all remaining TEXT columns
to LONGTEXT, in a similar way to the fix for bug #1210799
Combined with the fix for bug #1215501 which limits the request
body size, we should never truncate, even if mysql is configured
such that it can happen
Fixes bug #1223029
Change-Id: Ib536cfa8e2952a5e047cd8573288cdd9fee0622c<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy.dialects import mysql
from sqlalchemy import types as sqltypes
def upgrade(migrate_engine):
if migrate_engine.name != 'mysql':
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.parameters.alter(type=mysql.LONGTEXT())
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.rsrc_metadata.alter(type=mysql.LONGTEXT())
watch_rule = sqlalchemy.Table('watch_rule', meta, autoload=True)
watch_rule.c.rule.alter(type=mysql.LONGTEXT())
watch_data = sqlalchemy.Table('watch_data', meta, autoload=True)
watch_data.c.data.alter(type=mysql.LONGTEXT())
def downgrade(migrate_engine):
if migrate_engine.name != 'mysql':
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.parameters.alter(type=sqltypes.TEXT())
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.rsrc_metadata.alter(type=sqltypes.TEXT())
watch_rule = sqlalchemy.Table('watch_rule', meta, autoload=True)
watch_rule.c.rule.alter(type=sqltypes.TEXT())
watch_data = sqlalchemy.Table('watch_data', meta, autoload=True)
watch_data.c.data.alter(type=sqltypes.TEXT())
|
Migrate remaining TEXT columns to LONGTEXT
To avoid cases where mysql silently truncates json data
when it is > 2^16 bytes, migrate all remaining TEXT columns
to LONGTEXT, in a similar way to the fix for bug #1210799
Combined with the fix for bug #1215501 which limits the request
body size, we should never truncate, even if mysql is configured
such that it can happen
Fixes bug #1223029
Change-Id: Ib536cfa8e2952a5e047cd8573288cdd9fee0622c# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy.dialects import mysql
from sqlalchemy import types as sqltypes
def upgrade(migrate_engine):
if migrate_engine.name != 'mysql':
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.parameters.alter(type=mysql.LONGTEXT())
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.rsrc_metadata.alter(type=mysql.LONGTEXT())
watch_rule = sqlalchemy.Table('watch_rule', meta, autoload=True)
watch_rule.c.rule.alter(type=mysql.LONGTEXT())
watch_data = sqlalchemy.Table('watch_data', meta, autoload=True)
watch_data.c.data.alter(type=mysql.LONGTEXT())
def downgrade(migrate_engine):
if migrate_engine.name != 'mysql':
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.parameters.alter(type=sqltypes.TEXT())
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.rsrc_metadata.alter(type=sqltypes.TEXT())
watch_rule = sqlalchemy.Table('watch_rule', meta, autoload=True)
watch_rule.c.rule.alter(type=sqltypes.TEXT())
watch_data = sqlalchemy.Table('watch_data', meta, autoload=True)
watch_data.c.data.alter(type=sqltypes.TEXT())
|
<commit_before><commit_msg>Migrate remaining TEXT columns to LONGTEXT
To avoid cases where mysql silently truncates json data
when it is > 2^16 bytes, migrate all remaining TEXT columns
to LONGTEXT, in a similar way to the fix for bug #1210799
Combined with the fix for bug #1215501 which limits the request
body size, we should never truncate, even if mysql is configured
such that it can happen
Fixes bug #1223029
Change-Id: Ib536cfa8e2952a5e047cd8573288cdd9fee0622c<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from sqlalchemy.dialects import mysql
from sqlalchemy import types as sqltypes
def upgrade(migrate_engine):
if migrate_engine.name != 'mysql':
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.parameters.alter(type=mysql.LONGTEXT())
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.rsrc_metadata.alter(type=mysql.LONGTEXT())
watch_rule = sqlalchemy.Table('watch_rule', meta, autoload=True)
watch_rule.c.rule.alter(type=mysql.LONGTEXT())
watch_data = sqlalchemy.Table('watch_data', meta, autoload=True)
watch_data.c.data.alter(type=mysql.LONGTEXT())
def downgrade(migrate_engine):
if migrate_engine.name != 'mysql':
return
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack.c.parameters.alter(type=sqltypes.TEXT())
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource.c.rsrc_metadata.alter(type=sqltypes.TEXT())
watch_rule = sqlalchemy.Table('watch_rule', meta, autoload=True)
watch_rule.c.rule.alter(type=sqltypes.TEXT())
watch_data = sqlalchemy.Table('watch_data', meta, autoload=True)
watch_data.c.data.alter(type=sqltypes.TEXT())
|
|
d709a553d73438a88e56df2aa96532f0c4d570e9
|
nbgrader/alembic/versions/50a4d84c131a_add_kernelspecs.py
|
nbgrader/alembic/versions/50a4d84c131a_add_kernelspecs.py
|
"""add kernelspecs
Revision ID: 50a4d84c131a
Revises: b6d005d67074
Create Date: 2017-06-01 16:48:02.243764
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '50a4d84c131a'
down_revision = 'b6d005d67074'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('notebook', sa.Column(
'kernelspec', sa.String(1024), nullable=False,
server_default='{"display_name": "Python", "name": "python", "language": "python"}'))
def downgrade():
op.drop_column('notebook', 'kernelspec')
|
Upgrade database to add kernelspec column
|
Upgrade database to add kernelspec column
|
Python
|
bsd-3-clause
|
jhamrick/nbgrader,jupyter/nbgrader,jupyter/nbgrader,jhamrick/nbgrader,jhamrick/nbgrader,jhamrick/nbgrader,jupyter/nbgrader,jupyter/nbgrader,jupyter/nbgrader
|
Upgrade database to add kernelspec column
|
"""add kernelspecs
Revision ID: 50a4d84c131a
Revises: b6d005d67074
Create Date: 2017-06-01 16:48:02.243764
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '50a4d84c131a'
down_revision = 'b6d005d67074'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('notebook', sa.Column(
'kernelspec', sa.String(1024), nullable=False,
server_default='{"display_name": "Python", "name": "python", "language": "python"}'))
def downgrade():
op.drop_column('notebook', 'kernelspec')
|
<commit_before><commit_msg>Upgrade database to add kernelspec column<commit_after>
|
"""add kernelspecs
Revision ID: 50a4d84c131a
Revises: b6d005d67074
Create Date: 2017-06-01 16:48:02.243764
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '50a4d84c131a'
down_revision = 'b6d005d67074'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('notebook', sa.Column(
'kernelspec', sa.String(1024), nullable=False,
server_default='{"display_name": "Python", "name": "python", "language": "python"}'))
def downgrade():
op.drop_column('notebook', 'kernelspec')
|
Upgrade database to add kernelspec column"""add kernelspecs
Revision ID: 50a4d84c131a
Revises: b6d005d67074
Create Date: 2017-06-01 16:48:02.243764
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '50a4d84c131a'
down_revision = 'b6d005d67074'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('notebook', sa.Column(
'kernelspec', sa.String(1024), nullable=False,
server_default='{"display_name": "Python", "name": "python", "language": "python"}'))
def downgrade():
op.drop_column('notebook', 'kernelspec')
|
<commit_before><commit_msg>Upgrade database to add kernelspec column<commit_after>"""add kernelspecs
Revision ID: 50a4d84c131a
Revises: b6d005d67074
Create Date: 2017-06-01 16:48:02.243764
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '50a4d84c131a'
down_revision = 'b6d005d67074'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('notebook', sa.Column(
'kernelspec', sa.String(1024), nullable=False,
server_default='{"display_name": "Python", "name": "python", "language": "python"}'))
def downgrade():
op.drop_column('notebook', 'kernelspec')
|
|
79026437668aac69c24790c3d06868067c4d0bd5
|
src/s.save_csv.py
|
src/s.save_csv.py
|
import argparse
import os
import caffe
import leveldb
import numpy as np
import pandas as pd
import dataset as data
DFLT_INPUT = 'data/nn00/test_65001'
HELP_INPUT = 'Fullpath of file/folder with results'
DFLT_NKEYS = 1304000
HELP_NKEYS = 'Number of keys in the database'
HELP_OUTPUT ='Name of csv file for submission'
#TODO: save csv
def average_crops(arr):
"""Perform the average of arr every 10 rows
"""
avg = np.empty((arr.shape[0]/10, arr.shape[1]), np.float32)
for i in xrange(0, arr.shape[0]/10):
idx1, idx2 = i * 10, (i + 1) * 10
avg[i, :] = np.mean(arr[idx1:idx2, :], axis=0)
return avg
def dump_results(filename, arr):
"""Save arr as CSV
"""
labels = data.label_list(data.train_folder())
img_names = [os.path.basename(i) for i in data.test_list()]
data_frame = pd.DataFrame(arr, columns=labels, index=img_names)
data_frame.index.name = 'image'
data_frame.to_csv(filename)
return None
def get_blob_size(db, key='0'):
"""Return blob size
"""
val = db.Get(key)
datum = caffe.io.caffe_pb2.Datum()
datum.ParseFromString(val)
return (datum.channels, datum.height, datum.width)
def levedb_to_array(filename, n_keys):
"""Return caffe blobs stored on leveldb as ndarray
"""
db = leveldb.LevelDB(filename)
blob_sz = get_blob_size(db)
dim = blob_sz[0] * blob_sz[1] * blob_sz[2]
db_mem = np.empty((n_keys, dim), np.float32)
for key, val in db.RangeIter():
datum = caffe.io.caffe_pb2.Datum()
datum.ParseFromString(val)
arr = caffe.io.datum_to_array(datum)
db_mem[int(key), :] = arr.flatten()
return db_mem
def main(output_file, input_file, n_keys, **kwargs):
aug_pred = levedb_to_array(input_file, n_keys)
pred = average_crops(aug_pred)
dump_results(output_file, pred)
return None
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('-i', '--input_file', type=str, default=DFLT_INPUT,
help=HELP_INPUT)
p.add_argument('-n', '--n_keys', type=int, default=DFLT_NKEYS,
help=HELP_NKEYS)
p.add_argument('output_file', type=str, help=HELP_OUTPUT)
main(**vars(p.parse_args()))
|
Add script to create csv for submissions
|
Add script to create csv for submissions
|
Python
|
mit
|
escorciav/ndsb,escorciav/ndsb
|
Add script to create csv for submissions
|
import argparse
import os
import caffe
import leveldb
import numpy as np
import pandas as pd
import dataset as data
DFLT_INPUT = 'data/nn00/test_65001'
HELP_INPUT = 'Fullpath of file/folder with results'
DFLT_NKEYS = 1304000
HELP_NKEYS = 'Number of keys in the database'
HELP_OUTPUT ='Name of csv file for submission'
#TODO: save csv
def average_crops(arr):
"""Perform the average of arr every 10 rows
"""
avg = np.empty((arr.shape[0]/10, arr.shape[1]), np.float32)
for i in xrange(0, arr.shape[0]/10):
idx1, idx2 = i * 10, (i + 1) * 10
avg[i, :] = np.mean(arr[idx1:idx2, :], axis=0)
return avg
def dump_results(filename, arr):
"""Save arr as CSV
"""
labels = data.label_list(data.train_folder())
img_names = [os.path.basename(i) for i in data.test_list()]
data_frame = pd.DataFrame(arr, columns=labels, index=img_names)
data_frame.index.name = 'image'
data_frame.to_csv(filename)
return None
def get_blob_size(db, key='0'):
"""Return blob size
"""
val = db.Get(key)
datum = caffe.io.caffe_pb2.Datum()
datum.ParseFromString(val)
return (datum.channels, datum.height, datum.width)
def levedb_to_array(filename, n_keys):
"""Return caffe blobs stored on leveldb as ndarray
"""
db = leveldb.LevelDB(filename)
blob_sz = get_blob_size(db)
dim = blob_sz[0] * blob_sz[1] * blob_sz[2]
db_mem = np.empty((n_keys, dim), np.float32)
for key, val in db.RangeIter():
datum = caffe.io.caffe_pb2.Datum()
datum.ParseFromString(val)
arr = caffe.io.datum_to_array(datum)
db_mem[int(key), :] = arr.flatten()
return db_mem
def main(output_file, input_file, n_keys, **kwargs):
aug_pred = levedb_to_array(input_file, n_keys)
pred = average_crops(aug_pred)
dump_results(output_file, pred)
return None
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('-i', '--input_file', type=str, default=DFLT_INPUT,
help=HELP_INPUT)
p.add_argument('-n', '--n_keys', type=int, default=DFLT_NKEYS,
help=HELP_NKEYS)
p.add_argument('output_file', type=str, help=HELP_OUTPUT)
main(**vars(p.parse_args()))
|
<commit_before><commit_msg>Add script to create csv for submissions<commit_after>
|
import argparse
import os
import caffe
import leveldb
import numpy as np
import pandas as pd
import dataset as data
DFLT_INPUT = 'data/nn00/test_65001'
HELP_INPUT = 'Fullpath of file/folder with results'
DFLT_NKEYS = 1304000
HELP_NKEYS = 'Number of keys in the database'
HELP_OUTPUT ='Name of csv file for submission'
#TODO: save csv
def average_crops(arr):
"""Perform the average of arr every 10 rows
"""
avg = np.empty((arr.shape[0]/10, arr.shape[1]), np.float32)
for i in xrange(0, arr.shape[0]/10):
idx1, idx2 = i * 10, (i + 1) * 10
avg[i, :] = np.mean(arr[idx1:idx2, :], axis=0)
return avg
def dump_results(filename, arr):
"""Save arr as CSV
"""
labels = data.label_list(data.train_folder())
img_names = [os.path.basename(i) for i in data.test_list()]
data_frame = pd.DataFrame(arr, columns=labels, index=img_names)
data_frame.index.name = 'image'
data_frame.to_csv(filename)
return None
def get_blob_size(db, key='0'):
"""Return blob size
"""
val = db.Get(key)
datum = caffe.io.caffe_pb2.Datum()
datum.ParseFromString(val)
return (datum.channels, datum.height, datum.width)
def levedb_to_array(filename, n_keys):
"""Return caffe blobs stored on leveldb as ndarray
"""
db = leveldb.LevelDB(filename)
blob_sz = get_blob_size(db)
dim = blob_sz[0] * blob_sz[1] * blob_sz[2]
db_mem = np.empty((n_keys, dim), np.float32)
for key, val in db.RangeIter():
datum = caffe.io.caffe_pb2.Datum()
datum.ParseFromString(val)
arr = caffe.io.datum_to_array(datum)
db_mem[int(key), :] = arr.flatten()
return db_mem
def main(output_file, input_file, n_keys, **kwargs):
aug_pred = levedb_to_array(input_file, n_keys)
pred = average_crops(aug_pred)
dump_results(output_file, pred)
return None
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('-i', '--input_file', type=str, default=DFLT_INPUT,
help=HELP_INPUT)
p.add_argument('-n', '--n_keys', type=int, default=DFLT_NKEYS,
help=HELP_NKEYS)
p.add_argument('output_file', type=str, help=HELP_OUTPUT)
main(**vars(p.parse_args()))
|
Add script to create csv for submissionsimport argparse
import os
import caffe
import leveldb
import numpy as np
import pandas as pd
import dataset as data
DFLT_INPUT = 'data/nn00/test_65001'
HELP_INPUT = 'Fullpath of file/folder with results'
DFLT_NKEYS = 1304000
HELP_NKEYS = 'Number of keys in the database'
HELP_OUTPUT ='Name of csv file for submission'
#TODO: save csv
def average_crops(arr):
"""Perform the average of arr every 10 rows
"""
avg = np.empty((arr.shape[0]/10, arr.shape[1]), np.float32)
for i in xrange(0, arr.shape[0]/10):
idx1, idx2 = i * 10, (i + 1) * 10
avg[i, :] = np.mean(arr[idx1:idx2, :], axis=0)
return avg
def dump_results(filename, arr):
"""Save arr as CSV
"""
labels = data.label_list(data.train_folder())
img_names = [os.path.basename(i) for i in data.test_list()]
data_frame = pd.DataFrame(arr, columns=labels, index=img_names)
data_frame.index.name = 'image'
data_frame.to_csv(filename)
return None
def get_blob_size(db, key='0'):
"""Return blob size
"""
val = db.Get(key)
datum = caffe.io.caffe_pb2.Datum()
datum.ParseFromString(val)
return (datum.channels, datum.height, datum.width)
def levedb_to_array(filename, n_keys):
"""Return caffe blobs stored on leveldb as ndarray
"""
db = leveldb.LevelDB(filename)
blob_sz = get_blob_size(db)
dim = blob_sz[0] * blob_sz[1] * blob_sz[2]
db_mem = np.empty((n_keys, dim), np.float32)
for key, val in db.RangeIter():
datum = caffe.io.caffe_pb2.Datum()
datum.ParseFromString(val)
arr = caffe.io.datum_to_array(datum)
db_mem[int(key), :] = arr.flatten()
return db_mem
def main(output_file, input_file, n_keys, **kwargs):
aug_pred = levedb_to_array(input_file, n_keys)
pred = average_crops(aug_pred)
dump_results(output_file, pred)
return None
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('-i', '--input_file', type=str, default=DFLT_INPUT,
help=HELP_INPUT)
p.add_argument('-n', '--n_keys', type=int, default=DFLT_NKEYS,
help=HELP_NKEYS)
p.add_argument('output_file', type=str, help=HELP_OUTPUT)
main(**vars(p.parse_args()))
|
<commit_before><commit_msg>Add script to create csv for submissions<commit_after>import argparse
import os
import caffe
import leveldb
import numpy as np
import pandas as pd
import dataset as data
DFLT_INPUT = 'data/nn00/test_65001'
HELP_INPUT = 'Fullpath of file/folder with results'
DFLT_NKEYS = 1304000
HELP_NKEYS = 'Number of keys in the database'
HELP_OUTPUT ='Name of csv file for submission'
#TODO: save csv
def average_crops(arr):
"""Perform the average of arr every 10 rows
"""
avg = np.empty((arr.shape[0]/10, arr.shape[1]), np.float32)
for i in xrange(0, arr.shape[0]/10):
idx1, idx2 = i * 10, (i + 1) * 10
avg[i, :] = np.mean(arr[idx1:idx2, :], axis=0)
return avg
def dump_results(filename, arr):
"""Save arr as CSV
"""
labels = data.label_list(data.train_folder())
img_names = [os.path.basename(i) for i in data.test_list()]
data_frame = pd.DataFrame(arr, columns=labels, index=img_names)
data_frame.index.name = 'image'
data_frame.to_csv(filename)
return None
def get_blob_size(db, key='0'):
"""Return blob size
"""
val = db.Get(key)
datum = caffe.io.caffe_pb2.Datum()
datum.ParseFromString(val)
return (datum.channels, datum.height, datum.width)
def levedb_to_array(filename, n_keys):
"""Return caffe blobs stored on leveldb as ndarray
"""
db = leveldb.LevelDB(filename)
blob_sz = get_blob_size(db)
dim = blob_sz[0] * blob_sz[1] * blob_sz[2]
db_mem = np.empty((n_keys, dim), np.float32)
for key, val in db.RangeIter():
datum = caffe.io.caffe_pb2.Datum()
datum.ParseFromString(val)
arr = caffe.io.datum_to_array(datum)
db_mem[int(key), :] = arr.flatten()
return db_mem
def main(output_file, input_file, n_keys, **kwargs):
aug_pred = levedb_to_array(input_file, n_keys)
pred = average_crops(aug_pred)
dump_results(output_file, pred)
return None
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('-i', '--input_file', type=str, default=DFLT_INPUT,
help=HELP_INPUT)
p.add_argument('-n', '--n_keys', type=int, default=DFLT_NKEYS,
help=HELP_NKEYS)
p.add_argument('output_file', type=str, help=HELP_OUTPUT)
main(**vars(p.parse_args()))
|
|
1a58a6f4ca0e1d00270fbbc35120328cb769d807
|
src/swift-auth.py
|
src/swift-auth.py
|
# coding: utf-8
import requests
url = 'https://lon-identity.open.softlayer.com/v3/auth/tokens'
headers={'Content-Type': 'application/json'}
data = '''{
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"id": "",
"password": ""
}
}
},
"scope": {
"project": {
"id": ""
}
}
}
}'''
response = requests.post(url, data=data, headers=headers)
#print (response.json())
#print (response.headers)
#print (response.headers['X-Subject-Token'])
#print (response.json()['token']['catalog'][0]['endpoints'][0]['url'])
token = response.headers['X-Subject-Token']
endpoint = response.json()['token']['catalog'][7]['endpoints'][3]['url']
headers = {'X-Auth-Token': token}
response = requests.get(endpoint, headers=headers)
print (response.status_code)
print (response.headers)
print (response.text)
|
Create python script to access bluemix object storage
|
Create python script to access bluemix object storage
|
Python
|
mit
|
tomoyuki-nakabayashi/bluemix-object-storage-test,tomoyuki-nakabayashi/bluemix-object-storage-test
|
Create python script to access bluemix object storage
|
# coding: utf-8
import requests
url = 'https://lon-identity.open.softlayer.com/v3/auth/tokens'
headers={'Content-Type': 'application/json'}
data = '''{
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"id": "",
"password": ""
}
}
},
"scope": {
"project": {
"id": ""
}
}
}
}'''
response = requests.post(url, data=data, headers=headers)
#print (response.json())
#print (response.headers)
#print (response.headers['X-Subject-Token'])
#print (response.json()['token']['catalog'][0]['endpoints'][0]['url'])
token = response.headers['X-Subject-Token']
endpoint = response.json()['token']['catalog'][7]['endpoints'][3]['url']
headers = {'X-Auth-Token': token}
response = requests.get(endpoint, headers=headers)
print (response.status_code)
print (response.headers)
print (response.text)
|
<commit_before><commit_msg>Create python script to access bluemix object storage<commit_after>
|
# coding: utf-8
import requests
url = 'https://lon-identity.open.softlayer.com/v3/auth/tokens'
headers={'Content-Type': 'application/json'}
data = '''{
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"id": "",
"password": ""
}
}
},
"scope": {
"project": {
"id": ""
}
}
}
}'''
response = requests.post(url, data=data, headers=headers)
#print (response.json())
#print (response.headers)
#print (response.headers['X-Subject-Token'])
#print (response.json()['token']['catalog'][0]['endpoints'][0]['url'])
token = response.headers['X-Subject-Token']
endpoint = response.json()['token']['catalog'][7]['endpoints'][3]['url']
headers = {'X-Auth-Token': token}
response = requests.get(endpoint, headers=headers)
print (response.status_code)
print (response.headers)
print (response.text)
|
Create python script to access bluemix object storage# coding: utf-8
import requests
url = 'https://lon-identity.open.softlayer.com/v3/auth/tokens'
headers={'Content-Type': 'application/json'}
data = '''{
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"id": "",
"password": ""
}
}
},
"scope": {
"project": {
"id": ""
}
}
}
}'''
response = requests.post(url, data=data, headers=headers)
#print (response.json())
#print (response.headers)
#print (response.headers['X-Subject-Token'])
#print (response.json()['token']['catalog'][0]['endpoints'][0]['url'])
token = response.headers['X-Subject-Token']
endpoint = response.json()['token']['catalog'][7]['endpoints'][3]['url']
headers = {'X-Auth-Token': token}
response = requests.get(endpoint, headers=headers)
print (response.status_code)
print (response.headers)
print (response.text)
|
<commit_before><commit_msg>Create python script to access bluemix object storage<commit_after># coding: utf-8
import requests
url = 'https://lon-identity.open.softlayer.com/v3/auth/tokens'
headers={'Content-Type': 'application/json'}
data = '''{
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"id": "",
"password": ""
}
}
},
"scope": {
"project": {
"id": ""
}
}
}
}'''
response = requests.post(url, data=data, headers=headers)
#print (response.json())
#print (response.headers)
#print (response.headers['X-Subject-Token'])
#print (response.json()['token']['catalog'][0]['endpoints'][0]['url'])
token = response.headers['X-Subject-Token']
endpoint = response.json()['token']['catalog'][7]['endpoints'][3]['url']
headers = {'X-Auth-Token': token}
response = requests.get(endpoint, headers=headers)
print (response.status_code)
print (response.headers)
print (response.text)
|
|
c90748dba9b751f833d6663f115f1592c1293ae7
|
ucf_sub_embedded_ros/ucf_sub/src/sub_sensors/src/StartButton.py
|
ucf_sub_embedded_ros/ucf_sub/src/sub_sensors/src/StartButton.py
|
#!/usr/bin/env python3
import gpio
import rospy
import time
from std_msgs.msg import Bool
CHANNEL = 395
gpio.setup(CHANNEL, gpio.IN)
if __name__ == '__main__':
rospy.init_node("StartButton")
start_pub = rospy.Publisher("/start", Bool, queue_size=10)
r = rospy.Rate(4)
while not rospy.is_shutdown():
if not gpio.input(CHANNEL):
start_pub.publish(Bool(True))
r.sleep()
|
Create simple node to check start button state
|
Create simple node to check start button state
|
Python
|
mit
|
RoboticsClubatUCF/RoboSub,RoboticsClubatUCF/RoboSub,RoboticsClubatUCF/RoboSub,RoboticsClubatUCF/RoboSub
|
Create simple node to check start button state
|
#!/usr/bin/env python3
import gpio
import rospy
import time
from std_msgs.msg import Bool
CHANNEL = 395
gpio.setup(CHANNEL, gpio.IN)
if __name__ == '__main__':
rospy.init_node("StartButton")
start_pub = rospy.Publisher("/start", Bool, queue_size=10)
r = rospy.Rate(4)
while not rospy.is_shutdown():
if not gpio.input(CHANNEL):
start_pub.publish(Bool(True))
r.sleep()
|
<commit_before><commit_msg>Create simple node to check start button state<commit_after>
|
#!/usr/bin/env python3
import gpio
import rospy
import time
from std_msgs.msg import Bool
CHANNEL = 395
gpio.setup(CHANNEL, gpio.IN)
if __name__ == '__main__':
rospy.init_node("StartButton")
start_pub = rospy.Publisher("/start", Bool, queue_size=10)
r = rospy.Rate(4)
while not rospy.is_shutdown():
if not gpio.input(CHANNEL):
start_pub.publish(Bool(True))
r.sleep()
|
Create simple node to check start button state#!/usr/bin/env python3
import gpio
import rospy
import time
from std_msgs.msg import Bool
CHANNEL = 395
gpio.setup(CHANNEL, gpio.IN)
if __name__ == '__main__':
rospy.init_node("StartButton")
start_pub = rospy.Publisher("/start", Bool, queue_size=10)
r = rospy.Rate(4)
while not rospy.is_shutdown():
if not gpio.input(CHANNEL):
start_pub.publish(Bool(True))
r.sleep()
|
<commit_before><commit_msg>Create simple node to check start button state<commit_after>#!/usr/bin/env python3
import gpio
import rospy
import time
from std_msgs.msg import Bool
CHANNEL = 395
gpio.setup(CHANNEL, gpio.IN)
if __name__ == '__main__':
rospy.init_node("StartButton")
start_pub = rospy.Publisher("/start", Bool, queue_size=10)
r = rospy.Rate(4)
while not rospy.is_shutdown():
if not gpio.input(CHANNEL):
start_pub.publish(Bool(True))
r.sleep()
|
|
a89ab233ab57f37b6e5bb07906fc6d54c9a0efb0
|
registrations/management/commands/repopulate_subscriptions.py
|
registrations/management/commands/repopulate_subscriptions.py
|
from os import environ
from django.core.management.base import BaseCommand, CommandError
from registrations.models import Registration
from registrations.tasks import validate_registration
from seed_services_client import StageBasedMessagingApiClient
from ._utils import validate_and_return_url
class Command(BaseCommand):
help = ("Validates all Registrations without Subscription Requests and "
"creates one for each. This should also lead to the creation of a "
"Subscription in the SMB service")
def add_arguments(self, parser):
parser.add_argument(
'--blind', action='store_false', default=True,
dest='check_subscription',
help=('Do not check with the stage based messaging API whether'
'or not a subscription for the identity already exists.'
'NOT RECOMMENDED AT ALL'))
parser.add_argument(
'--sbm-url', dest='sbm_url', type=validate_and_return_url,
default=environ.get('STAGE_BASED_MESSAGING_URL'),
help=('The Stage Based Messaging Service to verify '
'subscriptions for.'))
parser.add_argument(
'--sbm-token', dest='sbm_token',
default=environ.get('STAGE_BASED_MESSAGING_TOKEN'),
help=('The Authorization token for the SBM Service')
)
def handle(self, *args, **kwargs):
sbm_url = kwargs['sbm_url']
sbm_token = kwargs['sbm_token']
check_subscription = kwargs['check_subscription']
if check_subscription:
if not sbm_url:
raise CommandError(
'Please make sure either the STAGE_BASED_MESSAGING_URL '
'environment variable or --sbm-url is set.')
if not sbm_token:
raise CommandError(
'Please make sure either the STAGE_BASED_MESSAGING_TOKEN '
'environment variable or --sbm-token is set.')
client = StageBasedMessagingApiClient(sbm_token, sbm_url)
registrations = Registration.objects.filter(validated=True)
for reg in registrations:
requests = reg.get_subscription_requests()
if requests.count() > 0:
continue
for req in requests:
if check_subscription and self.count_subscriptions(client,
req):
self.log(('Registration %s without Subscription Requests '
'already has subscription (identity: %s). '
'Skipping.')
% (reg.pk, reg.mother_id))
continue
"""
validate_registration() ensures no invalid registrations get
subscriptions and creates the Subscription Request
"""
output = validate_registration(registration_id=str(reg.id))
output = output + " (%s)"
self.log(output % (reg.mother_id))
def log(self, log):
self.stdout.write('%s\n' % (log,))
def count_subscriptions(self, sbm_client, subscription_request):
subscriptions = sbm_client.get_subscriptions({
'identity': subscription_request.identity,
})
return int(subscriptions['count'])
|
Add management command for repopulating subscriptions
|
Add management command for repopulating subscriptions
|
Python
|
bsd-3-clause
|
praekelt/hellomama-registration,praekelt/hellomama-registration
|
Add management command for repopulating subscriptions
|
from os import environ
from django.core.management.base import BaseCommand, CommandError
from registrations.models import Registration
from registrations.tasks import validate_registration
from seed_services_client import StageBasedMessagingApiClient
from ._utils import validate_and_return_url
class Command(BaseCommand):
help = ("Validates all Registrations without Subscription Requests and "
"creates one for each. This should also lead to the creation of a "
"Subscription in the SMB service")
def add_arguments(self, parser):
parser.add_argument(
'--blind', action='store_false', default=True,
dest='check_subscription',
help=('Do not check with the stage based messaging API whether'
'or not a subscription for the identity already exists.'
'NOT RECOMMENDED AT ALL'))
parser.add_argument(
'--sbm-url', dest='sbm_url', type=validate_and_return_url,
default=environ.get('STAGE_BASED_MESSAGING_URL'),
help=('The Stage Based Messaging Service to verify '
'subscriptions for.'))
parser.add_argument(
'--sbm-token', dest='sbm_token',
default=environ.get('STAGE_BASED_MESSAGING_TOKEN'),
help=('The Authorization token for the SBM Service')
)
def handle(self, *args, **kwargs):
sbm_url = kwargs['sbm_url']
sbm_token = kwargs['sbm_token']
check_subscription = kwargs['check_subscription']
if check_subscription:
if not sbm_url:
raise CommandError(
'Please make sure either the STAGE_BASED_MESSAGING_URL '
'environment variable or --sbm-url is set.')
if not sbm_token:
raise CommandError(
'Please make sure either the STAGE_BASED_MESSAGING_TOKEN '
'environment variable or --sbm-token is set.')
client = StageBasedMessagingApiClient(sbm_token, sbm_url)
registrations = Registration.objects.filter(validated=True)
for reg in registrations:
requests = reg.get_subscription_requests()
if requests.count() > 0:
continue
for req in requests:
if check_subscription and self.count_subscriptions(client,
req):
self.log(('Registration %s without Subscription Requests '
'already has subscription (identity: %s). '
'Skipping.')
% (reg.pk, reg.mother_id))
continue
"""
validate_registration() ensures no invalid registrations get
subscriptions and creates the Subscription Request
"""
output = validate_registration(registration_id=str(reg.id))
output = output + " (%s)"
self.log(output % (reg.mother_id))
def log(self, log):
self.stdout.write('%s\n' % (log,))
def count_subscriptions(self, sbm_client, subscription_request):
subscriptions = sbm_client.get_subscriptions({
'identity': subscription_request.identity,
})
return int(subscriptions['count'])
|
<commit_before><commit_msg>Add management command for repopulating subscriptions<commit_after>
|
from os import environ
from django.core.management.base import BaseCommand, CommandError
from registrations.models import Registration
from registrations.tasks import validate_registration
from seed_services_client import StageBasedMessagingApiClient
from ._utils import validate_and_return_url
class Command(BaseCommand):
help = ("Validates all Registrations without Subscription Requests and "
"creates one for each. This should also lead to the creation of a "
"Subscription in the SMB service")
def add_arguments(self, parser):
parser.add_argument(
'--blind', action='store_false', default=True,
dest='check_subscription',
help=('Do not check with the stage based messaging API whether'
'or not a subscription for the identity already exists.'
'NOT RECOMMENDED AT ALL'))
parser.add_argument(
'--sbm-url', dest='sbm_url', type=validate_and_return_url,
default=environ.get('STAGE_BASED_MESSAGING_URL'),
help=('The Stage Based Messaging Service to verify '
'subscriptions for.'))
parser.add_argument(
'--sbm-token', dest='sbm_token',
default=environ.get('STAGE_BASED_MESSAGING_TOKEN'),
help=('The Authorization token for the SBM Service')
)
def handle(self, *args, **kwargs):
sbm_url = kwargs['sbm_url']
sbm_token = kwargs['sbm_token']
check_subscription = kwargs['check_subscription']
if check_subscription:
if not sbm_url:
raise CommandError(
'Please make sure either the STAGE_BASED_MESSAGING_URL '
'environment variable or --sbm-url is set.')
if not sbm_token:
raise CommandError(
'Please make sure either the STAGE_BASED_MESSAGING_TOKEN '
'environment variable or --sbm-token is set.')
client = StageBasedMessagingApiClient(sbm_token, sbm_url)
registrations = Registration.objects.filter(validated=True)
for reg in registrations:
requests = reg.get_subscription_requests()
if requests.count() > 0:
continue
for req in requests:
if check_subscription and self.count_subscriptions(client,
req):
self.log(('Registration %s without Subscription Requests '
'already has subscription (identity: %s). '
'Skipping.')
% (reg.pk, reg.mother_id))
continue
"""
validate_registration() ensures no invalid registrations get
subscriptions and creates the Subscription Request
"""
output = validate_registration(registration_id=str(reg.id))
output = output + " (%s)"
self.log(output % (reg.mother_id))
def log(self, log):
self.stdout.write('%s\n' % (log,))
def count_subscriptions(self, sbm_client, subscription_request):
subscriptions = sbm_client.get_subscriptions({
'identity': subscription_request.identity,
})
return int(subscriptions['count'])
|
Add management command for repopulating subscriptionsfrom os import environ
from django.core.management.base import BaseCommand, CommandError
from registrations.models import Registration
from registrations.tasks import validate_registration
from seed_services_client import StageBasedMessagingApiClient
from ._utils import validate_and_return_url
class Command(BaseCommand):
help = ("Validates all Registrations without Subscription Requests and "
"creates one for each. This should also lead to the creation of a "
"Subscription in the SMB service")
def add_arguments(self, parser):
parser.add_argument(
'--blind', action='store_false', default=True,
dest='check_subscription',
help=('Do not check with the stage based messaging API whether'
'or not a subscription for the identity already exists.'
'NOT RECOMMENDED AT ALL'))
parser.add_argument(
'--sbm-url', dest='sbm_url', type=validate_and_return_url,
default=environ.get('STAGE_BASED_MESSAGING_URL'),
help=('The Stage Based Messaging Service to verify '
'subscriptions for.'))
parser.add_argument(
'--sbm-token', dest='sbm_token',
default=environ.get('STAGE_BASED_MESSAGING_TOKEN'),
help=('The Authorization token for the SBM Service')
)
def handle(self, *args, **kwargs):
sbm_url = kwargs['sbm_url']
sbm_token = kwargs['sbm_token']
check_subscription = kwargs['check_subscription']
if check_subscription:
if not sbm_url:
raise CommandError(
'Please make sure either the STAGE_BASED_MESSAGING_URL '
'environment variable or --sbm-url is set.')
if not sbm_token:
raise CommandError(
'Please make sure either the STAGE_BASED_MESSAGING_TOKEN '
'environment variable or --sbm-token is set.')
client = StageBasedMessagingApiClient(sbm_token, sbm_url)
registrations = Registration.objects.filter(validated=True)
for reg in registrations:
requests = reg.get_subscription_requests()
if requests.count() > 0:
continue
for req in requests:
if check_subscription and self.count_subscriptions(client,
req):
self.log(('Registration %s without Subscription Requests '
'already has subscription (identity: %s). '
'Skipping.')
% (reg.pk, reg.mother_id))
continue
"""
validate_registration() ensures no invalid registrations get
subscriptions and creates the Subscription Request
"""
output = validate_registration(registration_id=str(reg.id))
output = output + " (%s)"
self.log(output % (reg.mother_id))
def log(self, log):
self.stdout.write('%s\n' % (log,))
def count_subscriptions(self, sbm_client, subscription_request):
subscriptions = sbm_client.get_subscriptions({
'identity': subscription_request.identity,
})
return int(subscriptions['count'])
|
<commit_before><commit_msg>Add management command for repopulating subscriptions<commit_after>from os import environ
from django.core.management.base import BaseCommand, CommandError
from registrations.models import Registration
from registrations.tasks import validate_registration
from seed_services_client import StageBasedMessagingApiClient
from ._utils import validate_and_return_url
class Command(BaseCommand):
help = ("Validates all Registrations without Subscription Requests and "
"creates one for each. This should also lead to the creation of a "
"Subscription in the SMB service")
def add_arguments(self, parser):
parser.add_argument(
'--blind', action='store_false', default=True,
dest='check_subscription',
help=('Do not check with the stage based messaging API whether'
'or not a subscription for the identity already exists.'
'NOT RECOMMENDED AT ALL'))
parser.add_argument(
'--sbm-url', dest='sbm_url', type=validate_and_return_url,
default=environ.get('STAGE_BASED_MESSAGING_URL'),
help=('The Stage Based Messaging Service to verify '
'subscriptions for.'))
parser.add_argument(
'--sbm-token', dest='sbm_token',
default=environ.get('STAGE_BASED_MESSAGING_TOKEN'),
help=('The Authorization token for the SBM Service')
)
def handle(self, *args, **kwargs):
sbm_url = kwargs['sbm_url']
sbm_token = kwargs['sbm_token']
check_subscription = kwargs['check_subscription']
if check_subscription:
if not sbm_url:
raise CommandError(
'Please make sure either the STAGE_BASED_MESSAGING_URL '
'environment variable or --sbm-url is set.')
if not sbm_token:
raise CommandError(
'Please make sure either the STAGE_BASED_MESSAGING_TOKEN '
'environment variable or --sbm-token is set.')
client = StageBasedMessagingApiClient(sbm_token, sbm_url)
registrations = Registration.objects.filter(validated=True)
for reg in registrations:
requests = reg.get_subscription_requests()
if requests.count() > 0:
continue
for req in requests:
if check_subscription and self.count_subscriptions(client,
req):
self.log(('Registration %s without Subscription Requests '
'already has subscription (identity: %s). '
'Skipping.')
% (reg.pk, reg.mother_id))
continue
"""
validate_registration() ensures no invalid registrations get
subscriptions and creates the Subscription Request
"""
output = validate_registration(registration_id=str(reg.id))
output = output + " (%s)"
self.log(output % (reg.mother_id))
def log(self, log):
self.stdout.write('%s\n' % (log,))
def count_subscriptions(self, sbm_client, subscription_request):
subscriptions = sbm_client.get_subscriptions({
'identity': subscription_request.identity,
})
return int(subscriptions['count'])
|
|
d9818c3212e327b8de25f9844b0456e4a56270a3
|
examples/char_lcd.py
|
examples/char_lcd.py
|
#!/usr/bin/python
# Example using a character LCD connected to a Raspberry Pi or BeagleBone Black.
import math
import time
import Adafruit_CharLCD as LCD
# Raspberry Pi pin configuration:
lcd_rs = 27 # Note this might need to be changed to 21 for older revision Pi's.
lcd_en = 22
lcd_d4 = 25
lcd_d5 = 24
lcd_d6 = 23
lcd_d7 = 18
lcd_backlight = 4
# BeagleBone Black configuration:
# lcd_rs = 'P9_8'
# lcd_en = 'P9_10'
# lcd_d4 = 'P9_18'
# lcd_d5 = 'P9_16'
# lcd_d6 = 'P9_14'
# lcd_d7 = 'P9_12'
# lcd_backlight = 'P9_7'
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
# Alternatively specify a 20x4 LCD.
# lcd_columns = 20
# lcd_rows = 4
# Initialize the LCD using the pins above.
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_backlight)
# Print a two line message
lcd.message('Hello\nworld!')
# Wait 5 seconds
time.sleep(5.0)
# Demo showing the cursor.
lcd.clear()
lcd.show_cursor(True)
lcd.message('Show cursor')
time.sleep(5.0)
# Demo showing the blinking cursor.
lcd.clear()
lcd.blink(True)
lcd.message('Blink cursor')
time.sleep(5.0)
# Stop blinking and showing cursor.
lcd.show_cursor(False)
lcd.blink(False)
# Demo scrolling message right/left.
lcd.clear()
message = 'Scroll'
lcd.message(message)
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_right()
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_left()
# Demo turning backlight off and on.
lcd.clear()
lcd.message('Flash backlight\nin 5 seconds...')
time.sleep(5.0)
# Turn backlight off.
lcd.set_backlight(0)
time.sleep(2.0)
# Change message.
lcd.clear()
lcd.message('Goodbye!')
# Turn backlight on.
lcd.set_backlight(1)
|
Add basic char LCD example.
|
Add basic char LCD example.
|
Python
|
mit
|
jfabi/infomini,mtnahill/DomeTrainingRig,adafruit/Adafruit_Python_CharLCD,sylvandb/Adafruit_Python_CharLCD,retrography/Adafruit_Python_CharLCD,mtnahill/DomeTrainingRig,thurstylark/Adafruit_Python_CharLCD,gik60/LabProject,saraf/Adafruit_Python_CharLCD
|
Add basic char LCD example.
|
#!/usr/bin/python
# Example using a character LCD connected to a Raspberry Pi or BeagleBone Black.
import math
import time
import Adafruit_CharLCD as LCD
# Raspberry Pi pin configuration:
lcd_rs = 27 # Note this might need to be changed to 21 for older revision Pi's.
lcd_en = 22
lcd_d4 = 25
lcd_d5 = 24
lcd_d6 = 23
lcd_d7 = 18
lcd_backlight = 4
# BeagleBone Black configuration:
# lcd_rs = 'P9_8'
# lcd_en = 'P9_10'
# lcd_d4 = 'P9_18'
# lcd_d5 = 'P9_16'
# lcd_d6 = 'P9_14'
# lcd_d7 = 'P9_12'
# lcd_backlight = 'P9_7'
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
# Alternatively specify a 20x4 LCD.
# lcd_columns = 20
# lcd_rows = 4
# Initialize the LCD using the pins above.
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_backlight)
# Print a two line message
lcd.message('Hello\nworld!')
# Wait 5 seconds
time.sleep(5.0)
# Demo showing the cursor.
lcd.clear()
lcd.show_cursor(True)
lcd.message('Show cursor')
time.sleep(5.0)
# Demo showing the blinking cursor.
lcd.clear()
lcd.blink(True)
lcd.message('Blink cursor')
time.sleep(5.0)
# Stop blinking and showing cursor.
lcd.show_cursor(False)
lcd.blink(False)
# Demo scrolling message right/left.
lcd.clear()
message = 'Scroll'
lcd.message(message)
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_right()
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_left()
# Demo turning backlight off and on.
lcd.clear()
lcd.message('Flash backlight\nin 5 seconds...')
time.sleep(5.0)
# Turn backlight off.
lcd.set_backlight(0)
time.sleep(2.0)
# Change message.
lcd.clear()
lcd.message('Goodbye!')
# Turn backlight on.
lcd.set_backlight(1)
|
<commit_before><commit_msg>Add basic char LCD example.<commit_after>
|
#!/usr/bin/python
# Example using a character LCD connected to a Raspberry Pi or BeagleBone Black.
import math
import time
import Adafruit_CharLCD as LCD
# Raspberry Pi pin configuration:
lcd_rs = 27 # Note this might need to be changed to 21 for older revision Pi's.
lcd_en = 22
lcd_d4 = 25
lcd_d5 = 24
lcd_d6 = 23
lcd_d7 = 18
lcd_backlight = 4
# BeagleBone Black configuration:
# lcd_rs = 'P9_8'
# lcd_en = 'P9_10'
# lcd_d4 = 'P9_18'
# lcd_d5 = 'P9_16'
# lcd_d6 = 'P9_14'
# lcd_d7 = 'P9_12'
# lcd_backlight = 'P9_7'
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
# Alternatively specify a 20x4 LCD.
# lcd_columns = 20
# lcd_rows = 4
# Initialize the LCD using the pins above.
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_backlight)
# Print a two line message
lcd.message('Hello\nworld!')
# Wait 5 seconds
time.sleep(5.0)
# Demo showing the cursor.
lcd.clear()
lcd.show_cursor(True)
lcd.message('Show cursor')
time.sleep(5.0)
# Demo showing the blinking cursor.
lcd.clear()
lcd.blink(True)
lcd.message('Blink cursor')
time.sleep(5.0)
# Stop blinking and showing cursor.
lcd.show_cursor(False)
lcd.blink(False)
# Demo scrolling message right/left.
lcd.clear()
message = 'Scroll'
lcd.message(message)
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_right()
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_left()
# Demo turning backlight off and on.
lcd.clear()
lcd.message('Flash backlight\nin 5 seconds...')
time.sleep(5.0)
# Turn backlight off.
lcd.set_backlight(0)
time.sleep(2.0)
# Change message.
lcd.clear()
lcd.message('Goodbye!')
# Turn backlight on.
lcd.set_backlight(1)
|
Add basic char LCD example.#!/usr/bin/python
# Example using a character LCD connected to a Raspberry Pi or BeagleBone Black.
import math
import time
import Adafruit_CharLCD as LCD
# Raspberry Pi pin configuration:
lcd_rs = 27 # Note this might need to be changed to 21 for older revision Pi's.
lcd_en = 22
lcd_d4 = 25
lcd_d5 = 24
lcd_d6 = 23
lcd_d7 = 18
lcd_backlight = 4
# BeagleBone Black configuration:
# lcd_rs = 'P9_8'
# lcd_en = 'P9_10'
# lcd_d4 = 'P9_18'
# lcd_d5 = 'P9_16'
# lcd_d6 = 'P9_14'
# lcd_d7 = 'P9_12'
# lcd_backlight = 'P9_7'
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
# Alternatively specify a 20x4 LCD.
# lcd_columns = 20
# lcd_rows = 4
# Initialize the LCD using the pins above.
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_backlight)
# Print a two line message
lcd.message('Hello\nworld!')
# Wait 5 seconds
time.sleep(5.0)
# Demo showing the cursor.
lcd.clear()
lcd.show_cursor(True)
lcd.message('Show cursor')
time.sleep(5.0)
# Demo showing the blinking cursor.
lcd.clear()
lcd.blink(True)
lcd.message('Blink cursor')
time.sleep(5.0)
# Stop blinking and showing cursor.
lcd.show_cursor(False)
lcd.blink(False)
# Demo scrolling message right/left.
lcd.clear()
message = 'Scroll'
lcd.message(message)
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_right()
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_left()
# Demo turning backlight off and on.
lcd.clear()
lcd.message('Flash backlight\nin 5 seconds...')
time.sleep(5.0)
# Turn backlight off.
lcd.set_backlight(0)
time.sleep(2.0)
# Change message.
lcd.clear()
lcd.message('Goodbye!')
# Turn backlight on.
lcd.set_backlight(1)
|
<commit_before><commit_msg>Add basic char LCD example.<commit_after>#!/usr/bin/python
# Example using a character LCD connected to a Raspberry Pi or BeagleBone Black.
import math
import time
import Adafruit_CharLCD as LCD
# Raspberry Pi pin configuration:
lcd_rs = 27 # Note this might need to be changed to 21 for older revision Pi's.
lcd_en = 22
lcd_d4 = 25
lcd_d5 = 24
lcd_d6 = 23
lcd_d7 = 18
lcd_backlight = 4
# BeagleBone Black configuration:
# lcd_rs = 'P9_8'
# lcd_en = 'P9_10'
# lcd_d4 = 'P9_18'
# lcd_d5 = 'P9_16'
# lcd_d6 = 'P9_14'
# lcd_d7 = 'P9_12'
# lcd_backlight = 'P9_7'
# Define LCD column and row size for 16x2 LCD.
lcd_columns = 16
lcd_rows = 2
# Alternatively specify a 20x4 LCD.
# lcd_columns = 20
# lcd_rows = 4
# Initialize the LCD using the pins above.
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_backlight)
# Print a two line message
lcd.message('Hello\nworld!')
# Wait 5 seconds
time.sleep(5.0)
# Demo showing the cursor.
lcd.clear()
lcd.show_cursor(True)
lcd.message('Show cursor')
time.sleep(5.0)
# Demo showing the blinking cursor.
lcd.clear()
lcd.blink(True)
lcd.message('Blink cursor')
time.sleep(5.0)
# Stop blinking and showing cursor.
lcd.show_cursor(False)
lcd.blink(False)
# Demo scrolling message right/left.
lcd.clear()
message = 'Scroll'
lcd.message(message)
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_right()
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_left()
# Demo turning backlight off and on.
lcd.clear()
lcd.message('Flash backlight\nin 5 seconds...')
time.sleep(5.0)
# Turn backlight off.
lcd.set_backlight(0)
time.sleep(2.0)
# Change message.
lcd.clear()
lcd.message('Goodbye!')
# Turn backlight on.
lcd.set_backlight(1)
|
|
5059c8776c6ddc16c2f037e40dd0af849e9ca673
|
alembic/versions/2ea9623b21fa_add_an_active_field_to_filter.py
|
alembic/versions/2ea9623b21fa_add_an_active_field_to_filter.py
|
"""Add an active field to Filter
Revision ID: 2ea9623b21fa
Revises: 18ebf3181f87
Create Date: 2014-09-03 09:37:39.653039
"""
# revision identifiers, used by Alembic.
revision = '2ea9623b21fa'
down_revision = '18ebf3181f87'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'filters',
sa.Column(
'active',
sa.Boolean(),
default=True,
nullable=False)
)
def downgrade():
op.drop_column('filters', 'active')
|
Add an alembic migration script adding the `active` field to the filters table
|
Add an alembic migration script adding the `active` field to the filters table
Relates to https://github.com/fedora-infra/fmn/issues/13
|
Python
|
lgpl-2.1
|
jeremycline/fmn,jeremycline/fmn,jeremycline/fmn
|
Add an alembic migration script adding the `active` field to the filters table
Relates to https://github.com/fedora-infra/fmn/issues/13
|
"""Add an active field to Filter
Revision ID: 2ea9623b21fa
Revises: 18ebf3181f87
Create Date: 2014-09-03 09:37:39.653039
"""
# revision identifiers, used by Alembic.
revision = '2ea9623b21fa'
down_revision = '18ebf3181f87'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'filters',
sa.Column(
'active',
sa.Boolean(),
default=True,
nullable=False)
)
def downgrade():
op.drop_column('filters', 'active')
|
<commit_before><commit_msg>Add an alembic migration script adding the `active` field to the filters table
Relates to https://github.com/fedora-infra/fmn/issues/13<commit_after>
|
"""Add an active field to Filter
Revision ID: 2ea9623b21fa
Revises: 18ebf3181f87
Create Date: 2014-09-03 09:37:39.653039
"""
# revision identifiers, used by Alembic.
revision = '2ea9623b21fa'
down_revision = '18ebf3181f87'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'filters',
sa.Column(
'active',
sa.Boolean(),
default=True,
nullable=False)
)
def downgrade():
op.drop_column('filters', 'active')
|
Add an alembic migration script adding the `active` field to the filters table
Relates to https://github.com/fedora-infra/fmn/issues/13"""Add an active field to Filter
Revision ID: 2ea9623b21fa
Revises: 18ebf3181f87
Create Date: 2014-09-03 09:37:39.653039
"""
# revision identifiers, used by Alembic.
revision = '2ea9623b21fa'
down_revision = '18ebf3181f87'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'filters',
sa.Column(
'active',
sa.Boolean(),
default=True,
nullable=False)
)
def downgrade():
op.drop_column('filters', 'active')
|
<commit_before><commit_msg>Add an alembic migration script adding the `active` field to the filters table
Relates to https://github.com/fedora-infra/fmn/issues/13<commit_after>"""Add an active field to Filter
Revision ID: 2ea9623b21fa
Revises: 18ebf3181f87
Create Date: 2014-09-03 09:37:39.653039
"""
# revision identifiers, used by Alembic.
revision = '2ea9623b21fa'
down_revision = '18ebf3181f87'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'filters',
sa.Column(
'active',
sa.Boolean(),
default=True,
nullable=False)
)
def downgrade():
op.drop_column('filters', 'active')
|
|
406ea23adcf67e4bcce5959dac2f1386374a6b34
|
analysis/plot-target-approach.py
|
analysis/plot-target-approach.py
|
import climate
import itertools
import lmj.plot
import numpy as np
import source as experiment
import plots
@climate.annotate(
root='plot data from this experiment subjects',
pattern=('plot data from files matching this pattern', 'option'),
markers=('plot data for these mocap markers', 'option'),
target_num=('plot data for this target', 'option', None, int),
approach_sec=('plot variance for N sec prior to target acquisition', 'option', None, float),
)
def main(root, pattern='*/*block00/*circuit??.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', target_num=5, approach_sec=2):
data = {(m, s): [] for m in markers.split() for s in range(12)}
columns = num_frames = None
for trial in experiment.Experiment(root).trials_matching(pattern):
trial.realign(order=1)
if columns is None:
columns = {h: i for i, h in trial.marker_columns}
num_frames = int(approach_sec * trial.approx_frame_rate)
df = trial.movement_to(target_num)
for marker, source in data:
if all(df.source == source):
col = columns[marker]
df_ = df.iloc[-num_frames:, col:col+3].reset_index(drop=True)
df_.columns = list('xyz')
data[marker, source].append(df_)
with plots.space() as ax:
for i, (marker, keys) in enumerate(itertools.groupby(sorted(data), lambda x: x[0])):
for j, (_, source) in enumerate(keys):
for df in data[marker, source]:
ax.plot(np.asarray(df.x),
np.asarray(df.z),
zs=np.asarray(df.y),
color=lmj.plot.COLOR11[i],
alpha=0.7)
if __name__ == '__main__':
climate.call(main)
|
Add a starter script for plotting marker trajectories approaching targets.
|
Add a starter script for plotting marker trajectories approaching targets.
|
Python
|
mit
|
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
|
Add a starter script for plotting marker trajectories approaching targets.
|
import climate
import itertools
import lmj.plot
import numpy as np
import source as experiment
import plots
@climate.annotate(
root='plot data from this experiment subjects',
pattern=('plot data from files matching this pattern', 'option'),
markers=('plot data for these mocap markers', 'option'),
target_num=('plot data for this target', 'option', None, int),
approach_sec=('plot variance for N sec prior to target acquisition', 'option', None, float),
)
def main(root, pattern='*/*block00/*circuit??.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', target_num=5, approach_sec=2):
data = {(m, s): [] for m in markers.split() for s in range(12)}
columns = num_frames = None
for trial in experiment.Experiment(root).trials_matching(pattern):
trial.realign(order=1)
if columns is None:
columns = {h: i for i, h in trial.marker_columns}
num_frames = int(approach_sec * trial.approx_frame_rate)
df = trial.movement_to(target_num)
for marker, source in data:
if all(df.source == source):
col = columns[marker]
df_ = df.iloc[-num_frames:, col:col+3].reset_index(drop=True)
df_.columns = list('xyz')
data[marker, source].append(df_)
with plots.space() as ax:
for i, (marker, keys) in enumerate(itertools.groupby(sorted(data), lambda x: x[0])):
for j, (_, source) in enumerate(keys):
for df in data[marker, source]:
ax.plot(np.asarray(df.x),
np.asarray(df.z),
zs=np.asarray(df.y),
color=lmj.plot.COLOR11[i],
alpha=0.7)
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add a starter script for plotting marker trajectories approaching targets.<commit_after>
|
import climate
import itertools
import lmj.plot
import numpy as np
import source as experiment
import plots
@climate.annotate(
root='plot data from this experiment subjects',
pattern=('plot data from files matching this pattern', 'option'),
markers=('plot data for these mocap markers', 'option'),
target_num=('plot data for this target', 'option', None, int),
approach_sec=('plot variance for N sec prior to target acquisition', 'option', None, float),
)
def main(root, pattern='*/*block00/*circuit??.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', target_num=5, approach_sec=2):
data = {(m, s): [] for m in markers.split() for s in range(12)}
columns = num_frames = None
for trial in experiment.Experiment(root).trials_matching(pattern):
trial.realign(order=1)
if columns is None:
columns = {h: i for i, h in trial.marker_columns}
num_frames = int(approach_sec * trial.approx_frame_rate)
df = trial.movement_to(target_num)
for marker, source in data:
if all(df.source == source):
col = columns[marker]
df_ = df.iloc[-num_frames:, col:col+3].reset_index(drop=True)
df_.columns = list('xyz')
data[marker, source].append(df_)
with plots.space() as ax:
for i, (marker, keys) in enumerate(itertools.groupby(sorted(data), lambda x: x[0])):
for j, (_, source) in enumerate(keys):
for df in data[marker, source]:
ax.plot(np.asarray(df.x),
np.asarray(df.z),
zs=np.asarray(df.y),
color=lmj.plot.COLOR11[i],
alpha=0.7)
if __name__ == '__main__':
climate.call(main)
|
Add a starter script for plotting marker trajectories approaching targets.import climate
import itertools
import lmj.plot
import numpy as np
import source as experiment
import plots
@climate.annotate(
root='plot data from this experiment subjects',
pattern=('plot data from files matching this pattern', 'option'),
markers=('plot data for these mocap markers', 'option'),
target_num=('plot data for this target', 'option', None, int),
approach_sec=('plot variance for N sec prior to target acquisition', 'option', None, float),
)
def main(root, pattern='*/*block00/*circuit??.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', target_num=5, approach_sec=2):
data = {(m, s): [] for m in markers.split() for s in range(12)}
columns = num_frames = None
for trial in experiment.Experiment(root).trials_matching(pattern):
trial.realign(order=1)
if columns is None:
columns = {h: i for i, h in trial.marker_columns}
num_frames = int(approach_sec * trial.approx_frame_rate)
df = trial.movement_to(target_num)
for marker, source in data:
if all(df.source == source):
col = columns[marker]
df_ = df.iloc[-num_frames:, col:col+3].reset_index(drop=True)
df_.columns = list('xyz')
data[marker, source].append(df_)
with plots.space() as ax:
for i, (marker, keys) in enumerate(itertools.groupby(sorted(data), lambda x: x[0])):
for j, (_, source) in enumerate(keys):
for df in data[marker, source]:
ax.plot(np.asarray(df.x),
np.asarray(df.z),
zs=np.asarray(df.y),
color=lmj.plot.COLOR11[i],
alpha=0.7)
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add a starter script for plotting marker trajectories approaching targets.<commit_after>import climate
import itertools
import lmj.plot
import numpy as np
import source as experiment
import plots
@climate.annotate(
root='plot data from this experiment subjects',
pattern=('plot data from files matching this pattern', 'option'),
markers=('plot data for these mocap markers', 'option'),
target_num=('plot data for this target', 'option', None, int),
approach_sec=('plot variance for N sec prior to target acquisition', 'option', None, float),
)
def main(root, pattern='*/*block00/*circuit??.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', target_num=5, approach_sec=2):
data = {(m, s): [] for m in markers.split() for s in range(12)}
columns = num_frames = None
for trial in experiment.Experiment(root).trials_matching(pattern):
trial.realign(order=1)
if columns is None:
columns = {h: i for i, h in trial.marker_columns}
num_frames = int(approach_sec * trial.approx_frame_rate)
df = trial.movement_to(target_num)
for marker, source in data:
if all(df.source == source):
col = columns[marker]
df_ = df.iloc[-num_frames:, col:col+3].reset_index(drop=True)
df_.columns = list('xyz')
data[marker, source].append(df_)
with plots.space() as ax:
for i, (marker, keys) in enumerate(itertools.groupby(sorted(data), lambda x: x[0])):
for j, (_, source) in enumerate(keys):
for df in data[marker, source]:
ax.plot(np.asarray(df.x),
np.asarray(df.z),
zs=np.asarray(df.y),
color=lmj.plot.COLOR11[i],
alpha=0.7)
if __name__ == '__main__':
climate.call(main)
|
|
87be0fa9d4cd105ffa27c743e79b02689f0602d3
|
python/static_catalog.py
|
python/static_catalog.py
|
from pymongo import MongoClient
path = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
def load_data():
client = MongoClient('localhost', 27017)
db = client['rgz']
catalog = db['catalog']
return catalog
def run_static(catalog):
filename = '%s/csv/static_catalog.csv'
with open(filename,'w') as f:
# Header
print >> f,'source_id peak1_ra peak1_dec peak1_flux peak2_ra peak2_dec peak2_flux wise_ra wise_dec wise_w1mag'
# Data requested by Larry for double-peaked sources
for c in catalog.find({'radio.numberComponents':2,'AllWISE':{'$exists':True}}):
print >> f,'RGZ_%i' % c['catalog_id'],c['radio']['peaks'][0]['ra'],c['radio']['peaks'][0]['dec'],c['radio']['peaks'][0]['flux'],c['radio']['peaks'][1]['ra'],c['radio']['peaks'][1]['dec'],c['radio']['peaks'][1]['flux'],c['AllWISE']['ra'],c['AllWISE']['dec'],c['AllWISE']['w1mpro']
if __name__ == "__main__":
catalog = load_data()
run_static(catalog)
|
Create an ASCII version of static catalog for Larry
|
Create an ASCII version of static catalog for Larry
|
Python
|
mit
|
willettk/rgz-analysis,willettk/rgz-analysis,willettk/rgz-analysis,afgaron/rgz-analysis,afgaron/rgz-analysis,afgaron/rgz-analysis
|
Create an ASCII version of static catalog for Larry
|
from pymongo import MongoClient
path = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
def load_data():
client = MongoClient('localhost', 27017)
db = client['rgz']
catalog = db['catalog']
return catalog
def run_static(catalog):
filename = '%s/csv/static_catalog.csv'
with open(filename,'w') as f:
# Header
print >> f,'source_id peak1_ra peak1_dec peak1_flux peak2_ra peak2_dec peak2_flux wise_ra wise_dec wise_w1mag'
# Data requested by Larry for double-peaked sources
for c in catalog.find({'radio.numberComponents':2,'AllWISE':{'$exists':True}}):
print >> f,'RGZ_%i' % c['catalog_id'],c['radio']['peaks'][0]['ra'],c['radio']['peaks'][0]['dec'],c['radio']['peaks'][0]['flux'],c['radio']['peaks'][1]['ra'],c['radio']['peaks'][1]['dec'],c['radio']['peaks'][1]['flux'],c['AllWISE']['ra'],c['AllWISE']['dec'],c['AllWISE']['w1mpro']
if __name__ == "__main__":
catalog = load_data()
run_static(catalog)
|
<commit_before><commit_msg>Create an ASCII version of static catalog for Larry<commit_after>
|
from pymongo import MongoClient
path = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
def load_data():
client = MongoClient('localhost', 27017)
db = client['rgz']
catalog = db['catalog']
return catalog
def run_static(catalog):
filename = '%s/csv/static_catalog.csv'
with open(filename,'w') as f:
# Header
print >> f,'source_id peak1_ra peak1_dec peak1_flux peak2_ra peak2_dec peak2_flux wise_ra wise_dec wise_w1mag'
# Data requested by Larry for double-peaked sources
for c in catalog.find({'radio.numberComponents':2,'AllWISE':{'$exists':True}}):
print >> f,'RGZ_%i' % c['catalog_id'],c['radio']['peaks'][0]['ra'],c['radio']['peaks'][0]['dec'],c['radio']['peaks'][0]['flux'],c['radio']['peaks'][1]['ra'],c['radio']['peaks'][1]['dec'],c['radio']['peaks'][1]['flux'],c['AllWISE']['ra'],c['AllWISE']['dec'],c['AllWISE']['w1mpro']
if __name__ == "__main__":
catalog = load_data()
run_static(catalog)
|
Create an ASCII version of static catalog for Larryfrom pymongo import MongoClient
path = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
def load_data():
client = MongoClient('localhost', 27017)
db = client['rgz']
catalog = db['catalog']
return catalog
def run_static(catalog):
filename = '%s/csv/static_catalog.csv'
with open(filename,'w') as f:
# Header
print >> f,'source_id peak1_ra peak1_dec peak1_flux peak2_ra peak2_dec peak2_flux wise_ra wise_dec wise_w1mag'
# Data requested by Larry for double-peaked sources
for c in catalog.find({'radio.numberComponents':2,'AllWISE':{'$exists':True}}):
print >> f,'RGZ_%i' % c['catalog_id'],c['radio']['peaks'][0]['ra'],c['radio']['peaks'][0]['dec'],c['radio']['peaks'][0]['flux'],c['radio']['peaks'][1]['ra'],c['radio']['peaks'][1]['dec'],c['radio']['peaks'][1]['flux'],c['AllWISE']['ra'],c['AllWISE']['dec'],c['AllWISE']['w1mpro']
if __name__ == "__main__":
catalog = load_data()
run_static(catalog)
|
<commit_before><commit_msg>Create an ASCII version of static catalog for Larry<commit_after>from pymongo import MongoClient
path = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
def load_data():
client = MongoClient('localhost', 27017)
db = client['rgz']
catalog = db['catalog']
return catalog
def run_static(catalog):
filename = '%s/csv/static_catalog.csv'
with open(filename,'w') as f:
# Header
print >> f,'source_id peak1_ra peak1_dec peak1_flux peak2_ra peak2_dec peak2_flux wise_ra wise_dec wise_w1mag'
# Data requested by Larry for double-peaked sources
for c in catalog.find({'radio.numberComponents':2,'AllWISE':{'$exists':True}}):
print >> f,'RGZ_%i' % c['catalog_id'],c['radio']['peaks'][0]['ra'],c['radio']['peaks'][0]['dec'],c['radio']['peaks'][0]['flux'],c['radio']['peaks'][1]['ra'],c['radio']['peaks'][1]['dec'],c['radio']['peaks'][1]['flux'],c['AllWISE']['ra'],c['AllWISE']['dec'],c['AllWISE']['w1mpro']
if __name__ == "__main__":
catalog = load_data()
run_static(catalog)
|
|
dca8802b77a4682d9f6a09e68cdc807736e830a8
|
fmn/rules/buidsys.py
|
fmn/rules/buidsys.py
|
def buildsys_build_state_change(config, message):
""" Buildsys: build changed state (started, failed, finished)
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.build.state.change')
def buildsys_package_list_change(config, message):
""" Buildsys: Package list changed
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.package.list.change')
def buildsys_repo_done(config, message):
""" Buildsys: Building a repo has finished
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.repo.done')
def buildsys_repo_init(config, message):
""" Buildsys: Building a repo has started
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.repo.init')
def buildsys_tag(config, message):
""" Buildsys: A package has been tagged
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.tag')
def buildsys_untag(config, message):
""" Buildsys: A package has been untagged
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.untag')
|
Add filters for the buildsystem messages
|
Add filters for the buildsystem messages
|
Python
|
lgpl-2.1
|
jeremycline/fmn,jeremycline/fmn,jeremycline/fmn
|
Add filters for the buildsystem messages
|
def buildsys_build_state_change(config, message):
""" Buildsys: build changed state (started, failed, finished)
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.build.state.change')
def buildsys_package_list_change(config, message):
""" Buildsys: Package list changed
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.package.list.change')
def buildsys_repo_done(config, message):
""" Buildsys: Building a repo has finished
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.repo.done')
def buildsys_repo_init(config, message):
""" Buildsys: Building a repo has started
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.repo.init')
def buildsys_tag(config, message):
""" Buildsys: A package has been tagged
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.tag')
def buildsys_untag(config, message):
""" Buildsys: A package has been untagged
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.untag')
|
<commit_before><commit_msg>Add filters for the buildsystem messages<commit_after>
|
def buildsys_build_state_change(config, message):
""" Buildsys: build changed state (started, failed, finished)
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.build.state.change')
def buildsys_package_list_change(config, message):
""" Buildsys: Package list changed
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.package.list.change')
def buildsys_repo_done(config, message):
""" Buildsys: Building a repo has finished
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.repo.done')
def buildsys_repo_init(config, message):
""" Buildsys: Building a repo has started
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.repo.init')
def buildsys_tag(config, message):
""" Buildsys: A package has been tagged
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.tag')
def buildsys_untag(config, message):
""" Buildsys: A package has been untagged
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.untag')
|
Add filters for the buildsystem messages
def buildsys_build_state_change(config, message):
""" Buildsys: build changed state (started, failed, finished)
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.build.state.change')
def buildsys_package_list_change(config, message):
""" Buildsys: Package list changed
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.package.list.change')
def buildsys_repo_done(config, message):
""" Buildsys: Building a repo has finished
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.repo.done')
def buildsys_repo_init(config, message):
""" Buildsys: Building a repo has started
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.repo.init')
def buildsys_tag(config, message):
""" Buildsys: A package has been tagged
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.tag')
def buildsys_untag(config, message):
""" Buildsys: A package has been untagged
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.untag')
|
<commit_before><commit_msg>Add filters for the buildsystem messages<commit_after>
def buildsys_build_state_change(config, message):
""" Buildsys: build changed state (started, failed, finished)
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.build.state.change')
def buildsys_package_list_change(config, message):
""" Buildsys: Package list changed
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.package.list.change')
def buildsys_repo_done(config, message):
""" Buildsys: Building a repo has finished
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.repo.done')
def buildsys_repo_init(config, message):
""" Buildsys: Building a repo has started
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.repo.init')
def buildsys_tag(config, message):
""" Buildsys: A package has been tagged
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.tag')
def buildsys_untag(config, message):
""" Buildsys: A package has been untagged
TODO description for the web interface goes here
"""
return message['topic'].endswith('buildsys.untag')
|
|
94f1fc2b227ad2b6c1c441e74352b108688f0172
|
loinc_generate.py
|
loinc_generate.py
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--start", type=int)
parser.add_argument("--num", type=int)
parser.add_argument("--noX", action="count")
args = parser.parse_args()
start = args.start
num = args.num
noX = args.noX
def nextTen(n):
if (n % 10):
n = n + (10 - n % 10)
return n
def addChecksum(code):
codeStr = str(code)
# 1. convert code to character array and reverse to assign positions
codeArr = list(codeStr)[::-1]
# 2. get the odd numbered values and convert to integer
odd = int("".join(codeArr[0::2]))
# 3. multiply by 2
mult = odd*2
# 4. Take the even digit positions
even = int("".join(codeArr[1::2]))
# 5. Append the even value to the front of the value in #3
app = str(even) + str(mult)
# 6. Add the digits together
appArr = list(str(app))
sum = 0
for x in appArr:
sum += int(x)
# 7. Find next multiple of 10
multTen = nextTen(sum)
cksum = multTen - sum
return str(code) + "-" + str(cksum)
# main program
codes = []
for i in range(start, start+num):
code = addChecksum(i)
if noX == None:
code = "X" + code
codes.append(code)
for c in codes:
print(c)
|
Add script to generate custom loinc codes
|
Add script to generate custom loinc codes
|
Python
|
apache-2.0
|
ibmkendrick/streamsx.health,ibmkendrick/streamsx.health,IBMStreams/streamsx.health,IBMStreams/streamsx.health,ibmkendrick/streamsx.health,IBMStreams/streamsx.health,IBMStreams/streamsx.health,ibmkendrick/streamsx.health,IBMStreams/streamsx.health,ibmkendrick/streamsx.health
|
Add script to generate custom loinc codes
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--start", type=int)
parser.add_argument("--num", type=int)
parser.add_argument("--noX", action="count")
args = parser.parse_args()
start = args.start
num = args.num
noX = args.noX
def nextTen(n):
if (n % 10):
n = n + (10 - n % 10)
return n
def addChecksum(code):
codeStr = str(code)
# 1. convert code to character array and reverse to assign positions
codeArr = list(codeStr)[::-1]
# 2. get the odd numbered values and convert to integer
odd = int("".join(codeArr[0::2]))
# 3. multiply by 2
mult = odd*2
# 4. Take the even digit positions
even = int("".join(codeArr[1::2]))
# 5. Append the even value to the front of the value in #3
app = str(even) + str(mult)
# 6. Add the digits together
appArr = list(str(app))
sum = 0
for x in appArr:
sum += int(x)
# 7. Find next multiple of 10
multTen = nextTen(sum)
cksum = multTen - sum
return str(code) + "-" + str(cksum)
# main program
codes = []
for i in range(start, start+num):
code = addChecksum(i)
if noX == None:
code = "X" + code
codes.append(code)
for c in codes:
print(c)
|
<commit_before><commit_msg>Add script to generate custom loinc codes<commit_after>
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--start", type=int)
parser.add_argument("--num", type=int)
parser.add_argument("--noX", action="count")
args = parser.parse_args()
start = args.start
num = args.num
noX = args.noX
def nextTen(n):
if (n % 10):
n = n + (10 - n % 10)
return n
def addChecksum(code):
codeStr = str(code)
# 1. convert code to character array and reverse to assign positions
codeArr = list(codeStr)[::-1]
# 2. get the odd numbered values and convert to integer
odd = int("".join(codeArr[0::2]))
# 3. multiply by 2
mult = odd*2
# 4. Take the even digit positions
even = int("".join(codeArr[1::2]))
# 5. Append the even value to the front of the value in #3
app = str(even) + str(mult)
# 6. Add the digits together
appArr = list(str(app))
sum = 0
for x in appArr:
sum += int(x)
# 7. Find next multiple of 10
multTen = nextTen(sum)
cksum = multTen - sum
return str(code) + "-" + str(cksum)
# main program
codes = []
for i in range(start, start+num):
code = addChecksum(i)
if noX == None:
code = "X" + code
codes.append(code)
for c in codes:
print(c)
|
Add script to generate custom loinc codesimport argparse
parser = argparse.ArgumentParser()
parser.add_argument("--start", type=int)
parser.add_argument("--num", type=int)
parser.add_argument("--noX", action="count")
args = parser.parse_args()
start = args.start
num = args.num
noX = args.noX
def nextTen(n):
if (n % 10):
n = n + (10 - n % 10)
return n
def addChecksum(code):
codeStr = str(code)
# 1. convert code to character array and reverse to assign positions
codeArr = list(codeStr)[::-1]
# 2. get the odd numbered values and convert to integer
odd = int("".join(codeArr[0::2]))
# 3. multiply by 2
mult = odd*2
# 4. Take the even digit positions
even = int("".join(codeArr[1::2]))
# 5. Append the even value to the front of the value in #3
app = str(even) + str(mult)
# 6. Add the digits together
appArr = list(str(app))
sum = 0
for x in appArr:
sum += int(x)
# 7. Find next multiple of 10
multTen = nextTen(sum)
cksum = multTen - sum
return str(code) + "-" + str(cksum)
# main program
codes = []
for i in range(start, start+num):
code = addChecksum(i)
if noX == None:
code = "X" + code
codes.append(code)
for c in codes:
print(c)
|
<commit_before><commit_msg>Add script to generate custom loinc codes<commit_after>import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--start", type=int)
parser.add_argument("--num", type=int)
parser.add_argument("--noX", action="count")
args = parser.parse_args()
start = args.start
num = args.num
noX = args.noX
def nextTen(n):
if (n % 10):
n = n + (10 - n % 10)
return n
def addChecksum(code):
codeStr = str(code)
# 1. convert code to character array and reverse to assign positions
codeArr = list(codeStr)[::-1]
# 2. get the odd numbered values and convert to integer
odd = int("".join(codeArr[0::2]))
# 3. multiply by 2
mult = odd*2
# 4. Take the even digit positions
even = int("".join(codeArr[1::2]))
# 5. Append the even value to the front of the value in #3
app = str(even) + str(mult)
# 6. Add the digits together
appArr = list(str(app))
sum = 0
for x in appArr:
sum += int(x)
# 7. Find next multiple of 10
multTen = nextTen(sum)
cksum = multTen - sum
return str(code) + "-" + str(cksum)
# main program
codes = []
for i in range(start, start+num):
code = addChecksum(i)
if noX == None:
code = "X" + code
codes.append(code)
for c in codes:
print(c)
|
|
d7641279fee4adcf393a68143612199773fcd4c2
|
st2common/tests/test_util_sandboxing.py
|
st2common/tests/test_util_sandboxing.py
|
import os
import sys
import unittest
import mock
from st2common.util.sandboxing import get_sandbox_python_path
class SandboxingUtilsTestCase(unittest.TestCase):
@mock.patch('st2common.util.sandboxing.get_python_lib')
def test_get_sandbox_python_path(self, mock_get_python_lib):
# No inheritence
python_path = get_sandbox_python_path(inherit_from_parent=False,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':')
# Inherit python path from current process
# Mock the current process python path
old_python_path = os.environ.get('PYTHONPATH', '')
os.environ['PYTHONPATH'] = ':/data/test1:/data/test2'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (not running inside virtualenv)
old_real_prefix = sys.real_prefix
del sys.real_prefix
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (running inside virtualenv)
sys.real_prefix = '/usr'
mock_get_python_lib.return_value = sys.prefix + '/virtualenvtest'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=True)
self.assertEqual(python_path, ':/data/test1:/data/test2:%s/virtualenvtest' %
(sys.prefix))
os.environ['PYTHONPATH'] = old_python_path
sys.real_prefix = old_real_prefix
|
Add tests for sandboxing utils.
|
Add tests for sandboxing utils.
|
Python
|
apache-2.0
|
punalpatel/st2,grengojbo/st2,grengojbo/st2,pinterb/st2,pixelrebel/st2,dennybaa/st2,nzlosh/st2,Plexxi/st2,pixelrebel/st2,StackStorm/st2,dennybaa/st2,tonybaloney/st2,StackStorm/st2,nzlosh/st2,armab/st2,armab/st2,StackStorm/st2,Plexxi/st2,grengojbo/st2,peak6/st2,Itxaka/st2,pinterb/st2,pixelrebel/st2,pinterb/st2,tonybaloney/st2,jtopjian/st2,alfasin/st2,alfasin/st2,jtopjian/st2,armab/st2,dennybaa/st2,jtopjian/st2,emedvedev/st2,peak6/st2,Plexxi/st2,tonybaloney/st2,alfasin/st2,emedvedev/st2,StackStorm/st2,lakshmi-kannan/st2,peak6/st2,Itxaka/st2,nzlosh/st2,lakshmi-kannan/st2,emedvedev/st2,Itxaka/st2,lakshmi-kannan/st2,nzlosh/st2,punalpatel/st2,punalpatel/st2,Plexxi/st2
|
Add tests for sandboxing utils.
|
import os
import sys
import unittest
import mock
from st2common.util.sandboxing import get_sandbox_python_path
class SandboxingUtilsTestCase(unittest.TestCase):
@mock.patch('st2common.util.sandboxing.get_python_lib')
def test_get_sandbox_python_path(self, mock_get_python_lib):
# No inheritence
python_path = get_sandbox_python_path(inherit_from_parent=False,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':')
# Inherit python path from current process
# Mock the current process python path
old_python_path = os.environ.get('PYTHONPATH', '')
os.environ['PYTHONPATH'] = ':/data/test1:/data/test2'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (not running inside virtualenv)
old_real_prefix = sys.real_prefix
del sys.real_prefix
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (running inside virtualenv)
sys.real_prefix = '/usr'
mock_get_python_lib.return_value = sys.prefix + '/virtualenvtest'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=True)
self.assertEqual(python_path, ':/data/test1:/data/test2:%s/virtualenvtest' %
(sys.prefix))
os.environ['PYTHONPATH'] = old_python_path
sys.real_prefix = old_real_prefix
|
<commit_before><commit_msg>Add tests for sandboxing utils.<commit_after>
|
import os
import sys
import unittest
import mock
from st2common.util.sandboxing import get_sandbox_python_path
class SandboxingUtilsTestCase(unittest.TestCase):
@mock.patch('st2common.util.sandboxing.get_python_lib')
def test_get_sandbox_python_path(self, mock_get_python_lib):
# No inheritence
python_path = get_sandbox_python_path(inherit_from_parent=False,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':')
# Inherit python path from current process
# Mock the current process python path
old_python_path = os.environ.get('PYTHONPATH', '')
os.environ['PYTHONPATH'] = ':/data/test1:/data/test2'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (not running inside virtualenv)
old_real_prefix = sys.real_prefix
del sys.real_prefix
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (running inside virtualenv)
sys.real_prefix = '/usr'
mock_get_python_lib.return_value = sys.prefix + '/virtualenvtest'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=True)
self.assertEqual(python_path, ':/data/test1:/data/test2:%s/virtualenvtest' %
(sys.prefix))
os.environ['PYTHONPATH'] = old_python_path
sys.real_prefix = old_real_prefix
|
Add tests for sandboxing utils.import os
import sys
import unittest
import mock
from st2common.util.sandboxing import get_sandbox_python_path
class SandboxingUtilsTestCase(unittest.TestCase):
@mock.patch('st2common.util.sandboxing.get_python_lib')
def test_get_sandbox_python_path(self, mock_get_python_lib):
# No inheritence
python_path = get_sandbox_python_path(inherit_from_parent=False,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':')
# Inherit python path from current process
# Mock the current process python path
old_python_path = os.environ.get('PYTHONPATH', '')
os.environ['PYTHONPATH'] = ':/data/test1:/data/test2'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (not running inside virtualenv)
old_real_prefix = sys.real_prefix
del sys.real_prefix
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (running inside virtualenv)
sys.real_prefix = '/usr'
mock_get_python_lib.return_value = sys.prefix + '/virtualenvtest'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=True)
self.assertEqual(python_path, ':/data/test1:/data/test2:%s/virtualenvtest' %
(sys.prefix))
os.environ['PYTHONPATH'] = old_python_path
sys.real_prefix = old_real_prefix
|
<commit_before><commit_msg>Add tests for sandboxing utils.<commit_after>import os
import sys
import unittest
import mock
from st2common.util.sandboxing import get_sandbox_python_path
class SandboxingUtilsTestCase(unittest.TestCase):
@mock.patch('st2common.util.sandboxing.get_python_lib')
def test_get_sandbox_python_path(self, mock_get_python_lib):
# No inheritence
python_path = get_sandbox_python_path(inherit_from_parent=False,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':')
# Inherit python path from current process
# Mock the current process python path
old_python_path = os.environ.get('PYTHONPATH', '')
os.environ['PYTHONPATH'] = ':/data/test1:/data/test2'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (not running inside virtualenv)
old_real_prefix = sys.real_prefix
del sys.real_prefix
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=False)
self.assertEqual(python_path, ':/data/test1:/data/test2')
# Inherit from current process and from virtualenv (running inside virtualenv)
sys.real_prefix = '/usr'
mock_get_python_lib.return_value = sys.prefix + '/virtualenvtest'
python_path = get_sandbox_python_path(inherit_from_parent=True,
inherit_parent_virtualenv=True)
self.assertEqual(python_path, ':/data/test1:/data/test2:%s/virtualenvtest' %
(sys.prefix))
os.environ['PYTHONPATH'] = old_python_path
sys.real_prefix = old_real_prefix
|
|
da4e1b3e2598e448d6325f97142e613af079d106
|
tests/conftest.py
|
tests/conftest.py
|
import pytest
@pytest.fixture
def credentials():
"""Fake set of MWS credentials"""
return {
"access_key": "AAAAAAAAAAAAAAAAAAAA",
"secret_key": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"account_id": "AAAAAAAAAAAAAA",
}
|
Add fake MWS credentials pytest fixture
|
Add fake MWS credentials pytest fixture
|
Python
|
unlicense
|
Bobspadger/python-amazon-mws,GriceTurrble/python-amazon-mws
|
Add fake MWS credentials pytest fixture
|
import pytest
@pytest.fixture
def credentials():
"""Fake set of MWS credentials"""
return {
"access_key": "AAAAAAAAAAAAAAAAAAAA",
"secret_key": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"account_id": "AAAAAAAAAAAAAA",
}
|
<commit_before><commit_msg>Add fake MWS credentials pytest fixture<commit_after>
|
import pytest
@pytest.fixture
def credentials():
"""Fake set of MWS credentials"""
return {
"access_key": "AAAAAAAAAAAAAAAAAAAA",
"secret_key": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"account_id": "AAAAAAAAAAAAAA",
}
|
Add fake MWS credentials pytest fixtureimport pytest
@pytest.fixture
def credentials():
"""Fake set of MWS credentials"""
return {
"access_key": "AAAAAAAAAAAAAAAAAAAA",
"secret_key": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"account_id": "AAAAAAAAAAAAAA",
}
|
<commit_before><commit_msg>Add fake MWS credentials pytest fixture<commit_after>import pytest
@pytest.fixture
def credentials():
"""Fake set of MWS credentials"""
return {
"access_key": "AAAAAAAAAAAAAAAAAAAA",
"secret_key": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"account_id": "AAAAAAAAAAAAAA",
}
|
|
63f9bf61a8edcbd88844281c3d343097a8ca49ef
|
tests/test_cli.py
|
tests/test_cli.py
|
import re
def test_simple(pyscript):
script = pyscript("""
import click
from daemonocle.cli import DaemonCLI
@click.command(cls=DaemonCLI,
daemon_params={'prog': 'foo', 'pidfile': 'foo.pid'})
def main():
\"\"\"My awesome daemon\"\"\"
pass
if __name__ == '__main__':
main()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'My awesome daemon' in result.stdout
assert re.search((
br'\s*start\s+Start the daemon\.\n'
br'\s*stop\s+Stop the daemon\.\n'
br'\s*restart\s+Stop then start the daemon\.\n'
br'\s*status\s+Get the status of the daemon\.\n'),
result.stdout)
result = script.run('start', '--help')
assert result.returncode == 0
assert re.search(
br'\s*--debug\s+Do NOT detach and run in the background\.\n',
result.stdout)
assert script.run('stop', '--help').returncode == 0
assert script.run('restart', '--help').returncode == 0
assert script.run('status', '--help').returncode == 0
def test_debug(pyscript):
script = pyscript("""
import click
from daemonocle.cli import DaemonCLI
@click.command(cls=DaemonCLI, daemon_params={'prog': 'foo'})
def main():
\"\"\"My awesome daemon\"\"\"
print('hello world')
if __name__ == '__main__':
main()
""")
result = script.run('start', '--debug')
assert result.returncode == 0
assert result.stdout == (
b'Starting foo ... OK\n'
b'hello world\n'
b'All children are gone. Parent is exiting...\n')
assert result.stderr == b''
def test_custom_actions(pyscript):
script = pyscript("""
import time
import click
from daemonocle import Daemon, expose_action
from daemonocle.cli import DaemonCLI
class BananaDaemon(Daemon):
@expose_action
def banana(self):
\"\"\"Go bananas.\"\"\"
pass
def plantain(self):
pass
@click.command(cls=DaemonCLI, daemon_class=BananaDaemon,
daemon_params={'prog': 'foo', 'pidfile': 'foo.pid'})
def main():
\"\"\"The banana daemon\"\"\"
pass
if __name__ == '__main__':
main()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'The banana daemon' in result.stdout
assert re.search(br'\s*banana\s+Go bananas\.\n', result.stdout)
assert script.run('banana', '--help').returncode == 0
result = script.run('plantain', '--help')
assert result.returncode != 0
assert 'No such command' in result.stderr
|
Add tests for Click integration
|
Add tests for Click integration
|
Python
|
mit
|
jnrbsn/daemonocle
|
Add tests for Click integration
|
import re
def test_simple(pyscript):
script = pyscript("""
import click
from daemonocle.cli import DaemonCLI
@click.command(cls=DaemonCLI,
daemon_params={'prog': 'foo', 'pidfile': 'foo.pid'})
def main():
\"\"\"My awesome daemon\"\"\"
pass
if __name__ == '__main__':
main()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'My awesome daemon' in result.stdout
assert re.search((
br'\s*start\s+Start the daemon\.\n'
br'\s*stop\s+Stop the daemon\.\n'
br'\s*restart\s+Stop then start the daemon\.\n'
br'\s*status\s+Get the status of the daemon\.\n'),
result.stdout)
result = script.run('start', '--help')
assert result.returncode == 0
assert re.search(
br'\s*--debug\s+Do NOT detach and run in the background\.\n',
result.stdout)
assert script.run('stop', '--help').returncode == 0
assert script.run('restart', '--help').returncode == 0
assert script.run('status', '--help').returncode == 0
def test_debug(pyscript):
script = pyscript("""
import click
from daemonocle.cli import DaemonCLI
@click.command(cls=DaemonCLI, daemon_params={'prog': 'foo'})
def main():
\"\"\"My awesome daemon\"\"\"
print('hello world')
if __name__ == '__main__':
main()
""")
result = script.run('start', '--debug')
assert result.returncode == 0
assert result.stdout == (
b'Starting foo ... OK\n'
b'hello world\n'
b'All children are gone. Parent is exiting...\n')
assert result.stderr == b''
def test_custom_actions(pyscript):
script = pyscript("""
import time
import click
from daemonocle import Daemon, expose_action
from daemonocle.cli import DaemonCLI
class BananaDaemon(Daemon):
@expose_action
def banana(self):
\"\"\"Go bananas.\"\"\"
pass
def plantain(self):
pass
@click.command(cls=DaemonCLI, daemon_class=BananaDaemon,
daemon_params={'prog': 'foo', 'pidfile': 'foo.pid'})
def main():
\"\"\"The banana daemon\"\"\"
pass
if __name__ == '__main__':
main()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'The banana daemon' in result.stdout
assert re.search(br'\s*banana\s+Go bananas\.\n', result.stdout)
assert script.run('banana', '--help').returncode == 0
result = script.run('plantain', '--help')
assert result.returncode != 0
assert 'No such command' in result.stderr
|
<commit_before><commit_msg>Add tests for Click integration<commit_after>
|
import re
def test_simple(pyscript):
script = pyscript("""
import click
from daemonocle.cli import DaemonCLI
@click.command(cls=DaemonCLI,
daemon_params={'prog': 'foo', 'pidfile': 'foo.pid'})
def main():
\"\"\"My awesome daemon\"\"\"
pass
if __name__ == '__main__':
main()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'My awesome daemon' in result.stdout
assert re.search((
br'\s*start\s+Start the daemon\.\n'
br'\s*stop\s+Stop the daemon\.\n'
br'\s*restart\s+Stop then start the daemon\.\n'
br'\s*status\s+Get the status of the daemon\.\n'),
result.stdout)
result = script.run('start', '--help')
assert result.returncode == 0
assert re.search(
br'\s*--debug\s+Do NOT detach and run in the background\.\n',
result.stdout)
assert script.run('stop', '--help').returncode == 0
assert script.run('restart', '--help').returncode == 0
assert script.run('status', '--help').returncode == 0
def test_debug(pyscript):
script = pyscript("""
import click
from daemonocle.cli import DaemonCLI
@click.command(cls=DaemonCLI, daemon_params={'prog': 'foo'})
def main():
\"\"\"My awesome daemon\"\"\"
print('hello world')
if __name__ == '__main__':
main()
""")
result = script.run('start', '--debug')
assert result.returncode == 0
assert result.stdout == (
b'Starting foo ... OK\n'
b'hello world\n'
b'All children are gone. Parent is exiting...\n')
assert result.stderr == b''
def test_custom_actions(pyscript):
script = pyscript("""
import time
import click
from daemonocle import Daemon, expose_action
from daemonocle.cli import DaemonCLI
class BananaDaemon(Daemon):
@expose_action
def banana(self):
\"\"\"Go bananas.\"\"\"
pass
def plantain(self):
pass
@click.command(cls=DaemonCLI, daemon_class=BananaDaemon,
daemon_params={'prog': 'foo', 'pidfile': 'foo.pid'})
def main():
\"\"\"The banana daemon\"\"\"
pass
if __name__ == '__main__':
main()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'The banana daemon' in result.stdout
assert re.search(br'\s*banana\s+Go bananas\.\n', result.stdout)
assert script.run('banana', '--help').returncode == 0
result = script.run('plantain', '--help')
assert result.returncode != 0
assert 'No such command' in result.stderr
|
Add tests for Click integrationimport re
def test_simple(pyscript):
script = pyscript("""
import click
from daemonocle.cli import DaemonCLI
@click.command(cls=DaemonCLI,
daemon_params={'prog': 'foo', 'pidfile': 'foo.pid'})
def main():
\"\"\"My awesome daemon\"\"\"
pass
if __name__ == '__main__':
main()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'My awesome daemon' in result.stdout
assert re.search((
br'\s*start\s+Start the daemon\.\n'
br'\s*stop\s+Stop the daemon\.\n'
br'\s*restart\s+Stop then start the daemon\.\n'
br'\s*status\s+Get the status of the daemon\.\n'),
result.stdout)
result = script.run('start', '--help')
assert result.returncode == 0
assert re.search(
br'\s*--debug\s+Do NOT detach and run in the background\.\n',
result.stdout)
assert script.run('stop', '--help').returncode == 0
assert script.run('restart', '--help').returncode == 0
assert script.run('status', '--help').returncode == 0
def test_debug(pyscript):
script = pyscript("""
import click
from daemonocle.cli import DaemonCLI
@click.command(cls=DaemonCLI, daemon_params={'prog': 'foo'})
def main():
\"\"\"My awesome daemon\"\"\"
print('hello world')
if __name__ == '__main__':
main()
""")
result = script.run('start', '--debug')
assert result.returncode == 0
assert result.stdout == (
b'Starting foo ... OK\n'
b'hello world\n'
b'All children are gone. Parent is exiting...\n')
assert result.stderr == b''
def test_custom_actions(pyscript):
script = pyscript("""
import time
import click
from daemonocle import Daemon, expose_action
from daemonocle.cli import DaemonCLI
class BananaDaemon(Daemon):
@expose_action
def banana(self):
\"\"\"Go bananas.\"\"\"
pass
def plantain(self):
pass
@click.command(cls=DaemonCLI, daemon_class=BananaDaemon,
daemon_params={'prog': 'foo', 'pidfile': 'foo.pid'})
def main():
\"\"\"The banana daemon\"\"\"
pass
if __name__ == '__main__':
main()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'The banana daemon' in result.stdout
assert re.search(br'\s*banana\s+Go bananas\.\n', result.stdout)
assert script.run('banana', '--help').returncode == 0
result = script.run('plantain', '--help')
assert result.returncode != 0
assert 'No such command' in result.stderr
|
<commit_before><commit_msg>Add tests for Click integration<commit_after>import re
def test_simple(pyscript):
script = pyscript("""
import click
from daemonocle.cli import DaemonCLI
@click.command(cls=DaemonCLI,
daemon_params={'prog': 'foo', 'pidfile': 'foo.pid'})
def main():
\"\"\"My awesome daemon\"\"\"
pass
if __name__ == '__main__':
main()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'My awesome daemon' in result.stdout
assert re.search((
br'\s*start\s+Start the daemon\.\n'
br'\s*stop\s+Stop the daemon\.\n'
br'\s*restart\s+Stop then start the daemon\.\n'
br'\s*status\s+Get the status of the daemon\.\n'),
result.stdout)
result = script.run('start', '--help')
assert result.returncode == 0
assert re.search(
br'\s*--debug\s+Do NOT detach and run in the background\.\n',
result.stdout)
assert script.run('stop', '--help').returncode == 0
assert script.run('restart', '--help').returncode == 0
assert script.run('status', '--help').returncode == 0
def test_debug(pyscript):
script = pyscript("""
import click
from daemonocle.cli import DaemonCLI
@click.command(cls=DaemonCLI, daemon_params={'prog': 'foo'})
def main():
\"\"\"My awesome daemon\"\"\"
print('hello world')
if __name__ == '__main__':
main()
""")
result = script.run('start', '--debug')
assert result.returncode == 0
assert result.stdout == (
b'Starting foo ... OK\n'
b'hello world\n'
b'All children are gone. Parent is exiting...\n')
assert result.stderr == b''
def test_custom_actions(pyscript):
script = pyscript("""
import time
import click
from daemonocle import Daemon, expose_action
from daemonocle.cli import DaemonCLI
class BananaDaemon(Daemon):
@expose_action
def banana(self):
\"\"\"Go bananas.\"\"\"
pass
def plantain(self):
pass
@click.command(cls=DaemonCLI, daemon_class=BananaDaemon,
daemon_params={'prog': 'foo', 'pidfile': 'foo.pid'})
def main():
\"\"\"The banana daemon\"\"\"
pass
if __name__ == '__main__':
main()
""")
result = script.run('--help')
assert result.returncode == 0
assert b'The banana daemon' in result.stdout
assert re.search(br'\s*banana\s+Go bananas\.\n', result.stdout)
assert script.run('banana', '--help').returncode == 0
result = script.run('plantain', '--help')
assert result.returncode != 0
assert 'No such command' in result.stderr
|
|
dd4ca2f934978b36a381a756c6a6c97429dea6a5
|
zerver/migrations/0008_preregistrationuser_upper_email_idx.py
|
zerver/migrations/0008_preregistrationuser_upper_email_idx.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0007_userprofile_is_bot_active_indexes'),
]
operations = [
migrations.RunSQL("CREATE INDEX upper_preregistration_email_idx ON zerver_preregistrationuser ((upper(email)));",
reverse_sql="DROP INDEX upper_preregistration_email_idx;"),
]
|
Add case-insensitive index on PreregistrationUser.email.
|
Add case-insensitive index on PreregistrationUser.email.
This fixes a performance issue joining a server with a large number of
users.
Thanks to @dbiollo for the suggestion!
|
Python
|
apache-2.0
|
vikas-parashar/zulip,amanharitsh123/zulip,paxapy/zulip,jainayush975/zulip,Galexrt/zulip,Jianchun1/zulip,KingxBanana/zulip,mohsenSy/zulip,Vallher/zulip,Galexrt/zulip,synicalsyntax/zulip,dhcrzf/zulip,punchagan/zulip,reyha/zulip,blaze225/zulip,cosmicAsymmetry/zulip,verma-varsha/zulip,joyhchen/zulip,isht3/zulip,punchagan/zulip,peiwei/zulip,jainayush975/zulip,ryanbackman/zulip,sup95/zulip,Vallher/zulip,eeshangarg/zulip,Juanvulcano/zulip,synicalsyntax/zulip,reyha/zulip,amyliu345/zulip,sonali0901/zulip,krtkmj/zulip,rht/zulip,paxapy/zulip,SmartPeople/zulip,AZtheAsian/zulip,mohsenSy/zulip,joyhchen/zulip,calvinleenyc/zulip,vaidap/zulip,brockwhittaker/zulip,tommyip/zulip,PhilSk/zulip,jackrzhang/zulip,SmartPeople/zulip,j831/zulip,Frouk/zulip,susansls/zulip,timabbott/zulip,rishig/zulip,Diptanshu8/zulip,TigorC/zulip,jainayush975/zulip,arpith/zulip,showell/zulip,Diptanshu8/zulip,ahmadassaf/zulip,zacps/zulip,rht/zulip,Frouk/zulip,zulip/zulip,peguin40/zulip,ryansnowboarder/zulip,aakash-cr7/zulip,aakash-cr7/zulip,peiwei/zulip,Diptanshu8/zulip,umkay/zulip,Frouk/zulip,mahim97/zulip,TigorC/zulip,krtkmj/zulip,shubhamdhama/zulip,Frouk/zulip,vikas-parashar/zulip,grave-w-grave/zulip,zulip/zulip,ahmadassaf/zulip,jrowan/zulip,reyha/zulip,timabbott/zulip,mahim97/zulip,eeshangarg/zulip,hackerkid/zulip,j831/zulip,samatdav/zulip,zulip/zulip,isht3/zulip,amanharitsh123/zulip,jrowan/zulip,mohsenSy/zulip,JPJPJPOPOP/zulip,kou/zulip,vikas-parashar/zulip,dwrpayne/zulip,ahmadassaf/zulip,calvinleenyc/zulip,paxapy/zulip,amyliu345/zulip,joyhchen/zulip,mahim97/zulip,timabbott/zulip,amanharitsh123/zulip,jphilipsen05/zulip,Vallher/zulip,dattatreya303/zulip,isht3/zulip,showell/zulip,Frouk/zulip,Diptanshu8/zulip,dawran6/zulip,krtkmj/zulip,Juanvulcano/zulip,Frouk/zulip,calvinleenyc/zulip,eeshangarg/zulip,mahim97/zulip,dawran6/zulip,shubhamdhama/zulip,aakash-cr7/zulip,hackerkid/zulip,grave-w-grave/zulip,vikas-parashar/zulip,jphilipsen05/zulip,sup95/zulip,ahmadassaf/zulip,ahmadassaf/zulip,Juanvulcano/zulip,rht/zulip,Jianchun1/zulip,PhilSk/zulip,AZtheAsian/zulip,eeshangarg/zulip,sharmaeklavya2/zulip,showell/zulip,dwrpayne/zulip,jainayush975/zulip,j831/zulip,sup95/zulip,souravbadami/zulip,hackerkid/zulip,shubhamdhama/zulip,JPJPJPOPOP/zulip,Jianchun1/zulip,samatdav/zulip,showell/zulip,synicalsyntax/zulip,vabs22/zulip,zacps/zulip,susansls/zulip,vikas-parashar/zulip,tommyip/zulip,Frouk/zulip,arpith/zulip,calvinleenyc/zulip,krtkmj/zulip,peiwei/zulip,umkay/zulip,samatdav/zulip,cosmicAsymmetry/zulip,amanharitsh123/zulip,Galexrt/zulip,krtkmj/zulip,eeshangarg/zulip,JPJPJPOPOP/zulip,jackrzhang/zulip,sharmaeklavya2/zulip,peiwei/zulip,jackrzhang/zulip,Jianchun1/zulip,Vallher/zulip,reyha/zulip,vabs22/zulip,tommyip/zulip,arpith/zulip,grave-w-grave/zulip,shubhamdhama/zulip,mohsenSy/zulip,christi3k/zulip,Diptanshu8/zulip,brockwhittaker/zulip,peguin40/zulip,synicalsyntax/zulip,Diptanshu8/zulip,TigorC/zulip,vabs22/zulip,dhcrzf/zulip,amyliu345/zulip,ryansnowboarder/zulip,cosmicAsymmetry/zulip,arpith/zulip,rishig/zulip,aakash-cr7/zulip,kou/zulip,dattatreya303/zulip,punchagan/zulip,aakash-cr7/zulip,AZtheAsian/zulip,kou/zulip,kou/zulip,joyhchen/zulip,Jianchun1/zulip,susansls/zulip,vaidap/zulip,dwrpayne/zulip,andersk/zulip,JPJPJPOPOP/zulip,vaidap/zulip,verma-varsha/zulip,ahmadassaf/zulip,susansls/zulip,sharmaeklavya2/zulip,andersk/zulip,sonali0901/zulip,hackerkid/zulip,umkay/zulip,dwrpayne/zulip,jphilipsen05/zulip,tommyip/zulip,niftynei/zulip,brainwane/zulip,vaidap/zulip,brainwane/zulip,jainayush975/zulip,punchagan/zulip,jackrzhang/zulip,SmartPeople/zulip,PhilSk/zulip,sharmaeklavya2/zulip,isht3/zulip,dawran6/zulip,synicalsyntax/zulip,umkay/zulip,dattatreya303/zulip,andersk/zulip,Galexrt/zulip,isht3/zulip,zulip/zulip,Vallher/zulip,sharmaeklavya2/zulip,cosmicAsymmetry/zulip,Vallher/zulip,brockwhittaker/zulip,dwrpayne/zulip,ryansnowboarder/zulip,sonali0901/zulip,jainayush975/zulip,grave-w-grave/zulip,joyhchen/zulip,mohsenSy/zulip,kou/zulip,dhcrzf/zulip,timabbott/zulip,brockwhittaker/zulip,dattatreya303/zulip,PhilSk/zulip,calvinleenyc/zulip,kou/zulip,ryanbackman/zulip,ahmadassaf/zulip,susansls/zulip,shubhamdhama/zulip,krtkmj/zulip,Vallher/zulip,verma-varsha/zulip,rishig/zulip,zacps/zulip,KingxBanana/zulip,hackerkid/zulip,jrowan/zulip,brainwane/zulip,vabs22/zulip,rishig/zulip,Juanvulcano/zulip,TigorC/zulip,jrowan/zulip,tommyip/zulip,blaze225/zulip,souravbadami/zulip,jphilipsen05/zulip,tommyip/zulip,ryanbackman/zulip,dawran6/zulip,souravbadami/zulip,isht3/zulip,blaze225/zulip,blaze225/zulip,showell/zulip,samatdav/zulip,dattatreya303/zulip,zulip/zulip,arpith/zulip,krtkmj/zulip,TigorC/zulip,AZtheAsian/zulip,rishig/zulip,brainwane/zulip,brockwhittaker/zulip,vaidap/zulip,eeshangarg/zulip,andersk/zulip,TigorC/zulip,sonali0901/zulip,andersk/zulip,PhilSk/zulip,cosmicAsymmetry/zulip,jrowan/zulip,dwrpayne/zulip,amyliu345/zulip,sup95/zulip,brockwhittaker/zulip,rht/zulip,ryansnowboarder/zulip,SmartPeople/zulip,amanharitsh123/zulip,dhcrzf/zulip,dawran6/zulip,sup95/zulip,zulip/zulip,ryanbackman/zulip,mahim97/zulip,KingxBanana/zulip,Galexrt/zulip,peiwei/zulip,andersk/zulip,amyliu345/zulip,paxapy/zulip,dhcrzf/zulip,blaze225/zulip,ryansnowboarder/zulip,dhcrzf/zulip,Jianchun1/zulip,timabbott/zulip,vabs22/zulip,vikas-parashar/zulip,tommyip/zulip,shubhamdhama/zulip,dawran6/zulip,KingxBanana/zulip,ryansnowboarder/zulip,punchagan/zulip,arpith/zulip,jphilipsen05/zulip,rht/zulip,grave-w-grave/zulip,peguin40/zulip,christi3k/zulip,rht/zulip,verma-varsha/zulip,peiwei/zulip,timabbott/zulip,peiwei/zulip,grave-w-grave/zulip,Juanvulcano/zulip,synicalsyntax/zulip,rht/zulip,niftynei/zulip,amyliu345/zulip,umkay/zulip,ryansnowboarder/zulip,brainwane/zulip,samatdav/zulip,susansls/zulip,showell/zulip,hackerkid/zulip,eeshangarg/zulip,zacps/zulip,ryanbackman/zulip,JPJPJPOPOP/zulip,souravbadami/zulip,niftynei/zulip,umkay/zulip,umkay/zulip,niftynei/zulip,kou/zulip,AZtheAsian/zulip,sharmaeklavya2/zulip,ryanbackman/zulip,jackrzhang/zulip,blaze225/zulip,Galexrt/zulip,paxapy/zulip,sonali0901/zulip,zulip/zulip,Galexrt/zulip,zacps/zulip,rishig/zulip,AZtheAsian/zulip,jackrzhang/zulip,dwrpayne/zulip,vabs22/zulip,andersk/zulip,souravbadami/zulip,SmartPeople/zulip,punchagan/zulip,niftynei/zulip,JPJPJPOPOP/zulip,showell/zulip,hackerkid/zulip,synicalsyntax/zulip,KingxBanana/zulip,KingxBanana/zulip,brainwane/zulip,joyhchen/zulip,souravbadami/zulip,verma-varsha/zulip,SmartPeople/zulip,j831/zulip,jackrzhang/zulip,reyha/zulip,christi3k/zulip,calvinleenyc/zulip,mohsenSy/zulip,PhilSk/zulip,timabbott/zulip,peguin40/zulip,Juanvulcano/zulip,dattatreya303/zulip,brainwane/zulip,jphilipsen05/zulip,dhcrzf/zulip,peguin40/zulip,zacps/zulip,vaidap/zulip,sonali0901/zulip,christi3k/zulip,j831/zulip,niftynei/zulip,samatdav/zulip,cosmicAsymmetry/zulip,christi3k/zulip,christi3k/zulip,verma-varsha/zulip,jrowan/zulip,rishig/zulip,mahim97/zulip,paxapy/zulip,peguin40/zulip,j831/zulip,punchagan/zulip,aakash-cr7/zulip,shubhamdhama/zulip,sup95/zulip,reyha/zulip,amanharitsh123/zulip
|
Add case-insensitive index on PreregistrationUser.email.
This fixes a performance issue joining a server with a large number of
users.
Thanks to @dbiollo for the suggestion!
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0007_userprofile_is_bot_active_indexes'),
]
operations = [
migrations.RunSQL("CREATE INDEX upper_preregistration_email_idx ON zerver_preregistrationuser ((upper(email)));",
reverse_sql="DROP INDEX upper_preregistration_email_idx;"),
]
|
<commit_before><commit_msg>Add case-insensitive index on PreregistrationUser.email.
This fixes a performance issue joining a server with a large number of
users.
Thanks to @dbiollo for the suggestion!<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0007_userprofile_is_bot_active_indexes'),
]
operations = [
migrations.RunSQL("CREATE INDEX upper_preregistration_email_idx ON zerver_preregistrationuser ((upper(email)));",
reverse_sql="DROP INDEX upper_preregistration_email_idx;"),
]
|
Add case-insensitive index on PreregistrationUser.email.
This fixes a performance issue joining a server with a large number of
users.
Thanks to @dbiollo for the suggestion!# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0007_userprofile_is_bot_active_indexes'),
]
operations = [
migrations.RunSQL("CREATE INDEX upper_preregistration_email_idx ON zerver_preregistrationuser ((upper(email)));",
reverse_sql="DROP INDEX upper_preregistration_email_idx;"),
]
|
<commit_before><commit_msg>Add case-insensitive index on PreregistrationUser.email.
This fixes a performance issue joining a server with a large number of
users.
Thanks to @dbiollo for the suggestion!<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zerver', '0007_userprofile_is_bot_active_indexes'),
]
operations = [
migrations.RunSQL("CREATE INDEX upper_preregistration_email_idx ON zerver_preregistrationuser ((upper(email)));",
reverse_sql="DROP INDEX upper_preregistration_email_idx;"),
]
|
|
cf6c98abea7e14f897f650f9c4e644c3380caab2
|
django/website/main/tests/test_merge_coverage_prepare_package_filters.py
|
django/website/main/tests/test_merge_coverage_prepare_package_filters.py
|
from main.tests.helper_methods import get_command_with_parsed_options
def test_prepare_package_filters_returns_none_when_no_package_filters():
merge_coverage_files_command = get_command_with_parsed_options({
'packagefilters': [],
})
assert merge_coverage_files_command.prepare_packagefilters() is None
def test_prepare_package_filters_converts_filters_to_regex():
merge_coverage_files_command = get_command_with_parsed_options({
'packagefilters': ['missingletters', 'test1.class', 'test2.*'],
})
assert ['^missingletters$', '^test1\.class$', '^test2\..*$'] == merge_coverage_files_command.prepare_packagefilters()
|
Add tests for preparing filters
|
Add tests for preparing filters
|
Python
|
agpl-3.0
|
aptivate/alfie,aptivate/kashana,aptivate/kashana,aptivate/alfie,aptivate/kashana,aptivate/alfie,daniell/kashana,aptivate/kashana,aptivate/alfie,daniell/kashana,daniell/kashana,daniell/kashana
|
Add tests for preparing filters
|
from main.tests.helper_methods import get_command_with_parsed_options
def test_prepare_package_filters_returns_none_when_no_package_filters():
merge_coverage_files_command = get_command_with_parsed_options({
'packagefilters': [],
})
assert merge_coverage_files_command.prepare_packagefilters() is None
def test_prepare_package_filters_converts_filters_to_regex():
merge_coverage_files_command = get_command_with_parsed_options({
'packagefilters': ['missingletters', 'test1.class', 'test2.*'],
})
assert ['^missingletters$', '^test1\.class$', '^test2\..*$'] == merge_coverage_files_command.prepare_packagefilters()
|
<commit_before><commit_msg>Add tests for preparing filters<commit_after>
|
from main.tests.helper_methods import get_command_with_parsed_options
def test_prepare_package_filters_returns_none_when_no_package_filters():
merge_coverage_files_command = get_command_with_parsed_options({
'packagefilters': [],
})
assert merge_coverage_files_command.prepare_packagefilters() is None
def test_prepare_package_filters_converts_filters_to_regex():
merge_coverage_files_command = get_command_with_parsed_options({
'packagefilters': ['missingletters', 'test1.class', 'test2.*'],
})
assert ['^missingletters$', '^test1\.class$', '^test2\..*$'] == merge_coverage_files_command.prepare_packagefilters()
|
Add tests for preparing filtersfrom main.tests.helper_methods import get_command_with_parsed_options
def test_prepare_package_filters_returns_none_when_no_package_filters():
merge_coverage_files_command = get_command_with_parsed_options({
'packagefilters': [],
})
assert merge_coverage_files_command.prepare_packagefilters() is None
def test_prepare_package_filters_converts_filters_to_regex():
merge_coverage_files_command = get_command_with_parsed_options({
'packagefilters': ['missingletters', 'test1.class', 'test2.*'],
})
assert ['^missingletters$', '^test1\.class$', '^test2\..*$'] == merge_coverage_files_command.prepare_packagefilters()
|
<commit_before><commit_msg>Add tests for preparing filters<commit_after>from main.tests.helper_methods import get_command_with_parsed_options
def test_prepare_package_filters_returns_none_when_no_package_filters():
merge_coverage_files_command = get_command_with_parsed_options({
'packagefilters': [],
})
assert merge_coverage_files_command.prepare_packagefilters() is None
def test_prepare_package_filters_converts_filters_to_regex():
merge_coverage_files_command = get_command_with_parsed_options({
'packagefilters': ['missingletters', 'test1.class', 'test2.*'],
})
assert ['^missingletters$', '^test1\.class$', '^test2\..*$'] == merge_coverage_files_command.prepare_packagefilters()
|
|
d12a4d5fbaeb068e5e7544a9b9e09a9842e48bc1
|
satori.core/satori/core/wsgi.py
|
satori.core/satori/core/wsgi.py
|
import sys
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'satori.core.settings')
application = get_wsgi_application()
# initialize thrift server structures - takes a long time and it's better
# to do it on startup than during the first request
import satori.core.thrift_server
|
Add satori.core WSGI aplication file (belongs to 97b22ae4a25e, but forgot to add the file).
|
Add satori.core WSGI aplication file (belongs to 97b22ae4a25e, but forgot to add the file).
|
Python
|
mit
|
zielmicha/satori,zielmicha/satori,zielmicha/satori,zielmicha/satori,zielmicha/satori,zielmicha/satori,zielmicha/satori
|
Add satori.core WSGI aplication file (belongs to 97b22ae4a25e, but forgot to add the file).
|
import sys
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'satori.core.settings')
application = get_wsgi_application()
# initialize thrift server structures - takes a long time and it's better
# to do it on startup than during the first request
import satori.core.thrift_server
|
<commit_before><commit_msg>Add satori.core WSGI aplication file (belongs to 97b22ae4a25e, but forgot to add the file).<commit_after>
|
import sys
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'satori.core.settings')
application = get_wsgi_application()
# initialize thrift server structures - takes a long time and it's better
# to do it on startup than during the first request
import satori.core.thrift_server
|
Add satori.core WSGI aplication file (belongs to 97b22ae4a25e, but forgot to add the file).import sys
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'satori.core.settings')
application = get_wsgi_application()
# initialize thrift server structures - takes a long time and it's better
# to do it on startup than during the first request
import satori.core.thrift_server
|
<commit_before><commit_msg>Add satori.core WSGI aplication file (belongs to 97b22ae4a25e, but forgot to add the file).<commit_after>import sys
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'satori.core.settings')
application = get_wsgi_application()
# initialize thrift server structures - takes a long time and it's better
# to do it on startup than during the first request
import satori.core.thrift_server
|
|
d97b34d01c60ded20f79f968a7fa8f2f866a1a89
|
File_Transfer_Protocol/ftp_send_receive.py
|
File_Transfer_Protocol/ftp_send_receive.py
|
"""
File transfer protocol used to send and receive files using FTP server.
Use credentials to provide access to the FTP client
Note: Do not use root username & password for security reasons
Create a seperate user and provide access to a home directory of the user
Use login id and password of the user created
cwd here stands for current working directory
"""
from ftplib import FTP
ftp = FTP('xxx.xxx.x.x') """ Enter the ip address or the domain name here """
ftp.login(user='username', passwd='password')
ftp.cwd('/Enter the directory here/')
"""
The file which will be received via the FTP server
Enter the location of the file where the file is received
"""
def ReceiveFile():
FileName = 'example.txt' """ Enter the location of the file """
LocalFile = open(FileName, 'wb')
ftp.retrbinary('RETR ' + filename, LocalFile.write, 1024)
ftp.quit()
LocalFile.close()
"""
The file which will be sent via the FTP server
The file send will be send to the current working directory
"""
def SendFile():
FileName = 'example.txt' """ Enter the name of the file """
ftp.storbinary('STOR ' + FileName, open(FileName, 'rb'))
ftp.quit()
|
Send & receive files via ftp
|
Send & receive files via ftp
|
Python
|
mit
|
TheAlgorithms/Python
|
Send & receive files via ftp
|
"""
File transfer protocol used to send and receive files using FTP server.
Use credentials to provide access to the FTP client
Note: Do not use root username & password for security reasons
Create a seperate user and provide access to a home directory of the user
Use login id and password of the user created
cwd here stands for current working directory
"""
from ftplib import FTP
ftp = FTP('xxx.xxx.x.x') """ Enter the ip address or the domain name here """
ftp.login(user='username', passwd='password')
ftp.cwd('/Enter the directory here/')
"""
The file which will be received via the FTP server
Enter the location of the file where the file is received
"""
def ReceiveFile():
FileName = 'example.txt' """ Enter the location of the file """
LocalFile = open(FileName, 'wb')
ftp.retrbinary('RETR ' + filename, LocalFile.write, 1024)
ftp.quit()
LocalFile.close()
"""
The file which will be sent via the FTP server
The file send will be send to the current working directory
"""
def SendFile():
FileName = 'example.txt' """ Enter the name of the file """
ftp.storbinary('STOR ' + FileName, open(FileName, 'rb'))
ftp.quit()
|
<commit_before><commit_msg>Send & receive files via ftp<commit_after>
|
"""
File transfer protocol used to send and receive files using FTP server.
Use credentials to provide access to the FTP client
Note: Do not use root username & password for security reasons
Create a seperate user and provide access to a home directory of the user
Use login id and password of the user created
cwd here stands for current working directory
"""
from ftplib import FTP
ftp = FTP('xxx.xxx.x.x') """ Enter the ip address or the domain name here """
ftp.login(user='username', passwd='password')
ftp.cwd('/Enter the directory here/')
"""
The file which will be received via the FTP server
Enter the location of the file where the file is received
"""
def ReceiveFile():
FileName = 'example.txt' """ Enter the location of the file """
LocalFile = open(FileName, 'wb')
ftp.retrbinary('RETR ' + filename, LocalFile.write, 1024)
ftp.quit()
LocalFile.close()
"""
The file which will be sent via the FTP server
The file send will be send to the current working directory
"""
def SendFile():
FileName = 'example.txt' """ Enter the name of the file """
ftp.storbinary('STOR ' + FileName, open(FileName, 'rb'))
ftp.quit()
|
Send & receive files via ftp"""
File transfer protocol used to send and receive files using FTP server.
Use credentials to provide access to the FTP client
Note: Do not use root username & password for security reasons
Create a seperate user and provide access to a home directory of the user
Use login id and password of the user created
cwd here stands for current working directory
"""
from ftplib import FTP
ftp = FTP('xxx.xxx.x.x') """ Enter the ip address or the domain name here """
ftp.login(user='username', passwd='password')
ftp.cwd('/Enter the directory here/')
"""
The file which will be received via the FTP server
Enter the location of the file where the file is received
"""
def ReceiveFile():
FileName = 'example.txt' """ Enter the location of the file """
LocalFile = open(FileName, 'wb')
ftp.retrbinary('RETR ' + filename, LocalFile.write, 1024)
ftp.quit()
LocalFile.close()
"""
The file which will be sent via the FTP server
The file send will be send to the current working directory
"""
def SendFile():
FileName = 'example.txt' """ Enter the name of the file """
ftp.storbinary('STOR ' + FileName, open(FileName, 'rb'))
ftp.quit()
|
<commit_before><commit_msg>Send & receive files via ftp<commit_after>"""
File transfer protocol used to send and receive files using FTP server.
Use credentials to provide access to the FTP client
Note: Do not use root username & password for security reasons
Create a seperate user and provide access to a home directory of the user
Use login id and password of the user created
cwd here stands for current working directory
"""
from ftplib import FTP
ftp = FTP('xxx.xxx.x.x') """ Enter the ip address or the domain name here """
ftp.login(user='username', passwd='password')
ftp.cwd('/Enter the directory here/')
"""
The file which will be received via the FTP server
Enter the location of the file where the file is received
"""
def ReceiveFile():
FileName = 'example.txt' """ Enter the location of the file """
LocalFile = open(FileName, 'wb')
ftp.retrbinary('RETR ' + filename, LocalFile.write, 1024)
ftp.quit()
LocalFile.close()
"""
The file which will be sent via the FTP server
The file send will be send to the current working directory
"""
def SendFile():
FileName = 'example.txt' """ Enter the name of the file """
ftp.storbinary('STOR ' + FileName, open(FileName, 'rb'))
ftp.quit()
|
|
325862842887a671dd485277b73b9277fbbcc6e0
|
core/tests/test_models.py
|
core/tests/test_models.py
|
import core.tests.utils as test_utils
from core import models
class WagtailCompanyPageTestCase(test_utils.WagtailTest):
def test_twitter_handler(self):
twitter_user = 'springloadnz'
twitter_url = 'https://twitter.com/{}'.format(twitter_user)
twitter_handle = '@{}'.format(twitter_user)
page = models.WagtailCompanyPage(title='Springload', twitter_url=twitter_url)
self.assertEqual(page.twitter_handler, twitter_handle)
def test_twitter_handler_with_trailing_slash(self):
twitter_user = 'springloadnz'
twitter_url = 'https://twitter.com/{}/'.format(twitter_user)
twitter_handle = '@{}'.format(twitter_user)
page = models.WagtailCompanyPage(title='Springload', twitter_url=twitter_url)
self.assertEqual(page.twitter_handler, twitter_handle)
def test_twitter_handler_with_no_url(self):
page = models.WagtailCompanyPage(title='Springload')
self.assertIsNone(page.twitter_handler)
def test_github_user(self):
github_user = 'springload'
github_url = 'https://github.com/{}'.format(github_user)
page = models.WagtailCompanyPage(title='Springload', github_url=github_url)
self.assertEqual(page.github_user, github_user)
def test_github_user_with_trailing_slash(self):
github_user = 'springload'
github_url = 'https://github.com/{}/'.format(github_user)
page = models.WagtailCompanyPage(title='Springload', github_url=github_url)
self.assertEqual(page.github_user, github_user)
def test_github_user_with_no_url(self):
page = models.WagtailCompanyPage(title='Springload')
self.assertIsNone(page.github_user)
|
Add failing tests for social handles
|
Add failing tests for social handles
|
Python
|
mit
|
springload/madewithwagtail,springload/madewithwagtail,springload/madewithwagtail,springload/madewithwagtail
|
Add failing tests for social handles
|
import core.tests.utils as test_utils
from core import models
class WagtailCompanyPageTestCase(test_utils.WagtailTest):
def test_twitter_handler(self):
twitter_user = 'springloadnz'
twitter_url = 'https://twitter.com/{}'.format(twitter_user)
twitter_handle = '@{}'.format(twitter_user)
page = models.WagtailCompanyPage(title='Springload', twitter_url=twitter_url)
self.assertEqual(page.twitter_handler, twitter_handle)
def test_twitter_handler_with_trailing_slash(self):
twitter_user = 'springloadnz'
twitter_url = 'https://twitter.com/{}/'.format(twitter_user)
twitter_handle = '@{}'.format(twitter_user)
page = models.WagtailCompanyPage(title='Springload', twitter_url=twitter_url)
self.assertEqual(page.twitter_handler, twitter_handle)
def test_twitter_handler_with_no_url(self):
page = models.WagtailCompanyPage(title='Springload')
self.assertIsNone(page.twitter_handler)
def test_github_user(self):
github_user = 'springload'
github_url = 'https://github.com/{}'.format(github_user)
page = models.WagtailCompanyPage(title='Springload', github_url=github_url)
self.assertEqual(page.github_user, github_user)
def test_github_user_with_trailing_slash(self):
github_user = 'springload'
github_url = 'https://github.com/{}/'.format(github_user)
page = models.WagtailCompanyPage(title='Springload', github_url=github_url)
self.assertEqual(page.github_user, github_user)
def test_github_user_with_no_url(self):
page = models.WagtailCompanyPage(title='Springload')
self.assertIsNone(page.github_user)
|
<commit_before><commit_msg>Add failing tests for social handles<commit_after>
|
import core.tests.utils as test_utils
from core import models
class WagtailCompanyPageTestCase(test_utils.WagtailTest):
def test_twitter_handler(self):
twitter_user = 'springloadnz'
twitter_url = 'https://twitter.com/{}'.format(twitter_user)
twitter_handle = '@{}'.format(twitter_user)
page = models.WagtailCompanyPage(title='Springload', twitter_url=twitter_url)
self.assertEqual(page.twitter_handler, twitter_handle)
def test_twitter_handler_with_trailing_slash(self):
twitter_user = 'springloadnz'
twitter_url = 'https://twitter.com/{}/'.format(twitter_user)
twitter_handle = '@{}'.format(twitter_user)
page = models.WagtailCompanyPage(title='Springload', twitter_url=twitter_url)
self.assertEqual(page.twitter_handler, twitter_handle)
def test_twitter_handler_with_no_url(self):
page = models.WagtailCompanyPage(title='Springload')
self.assertIsNone(page.twitter_handler)
def test_github_user(self):
github_user = 'springload'
github_url = 'https://github.com/{}'.format(github_user)
page = models.WagtailCompanyPage(title='Springload', github_url=github_url)
self.assertEqual(page.github_user, github_user)
def test_github_user_with_trailing_slash(self):
github_user = 'springload'
github_url = 'https://github.com/{}/'.format(github_user)
page = models.WagtailCompanyPage(title='Springload', github_url=github_url)
self.assertEqual(page.github_user, github_user)
def test_github_user_with_no_url(self):
page = models.WagtailCompanyPage(title='Springload')
self.assertIsNone(page.github_user)
|
Add failing tests for social handlesimport core.tests.utils as test_utils
from core import models
class WagtailCompanyPageTestCase(test_utils.WagtailTest):
def test_twitter_handler(self):
twitter_user = 'springloadnz'
twitter_url = 'https://twitter.com/{}'.format(twitter_user)
twitter_handle = '@{}'.format(twitter_user)
page = models.WagtailCompanyPage(title='Springload', twitter_url=twitter_url)
self.assertEqual(page.twitter_handler, twitter_handle)
def test_twitter_handler_with_trailing_slash(self):
twitter_user = 'springloadnz'
twitter_url = 'https://twitter.com/{}/'.format(twitter_user)
twitter_handle = '@{}'.format(twitter_user)
page = models.WagtailCompanyPage(title='Springload', twitter_url=twitter_url)
self.assertEqual(page.twitter_handler, twitter_handle)
def test_twitter_handler_with_no_url(self):
page = models.WagtailCompanyPage(title='Springload')
self.assertIsNone(page.twitter_handler)
def test_github_user(self):
github_user = 'springload'
github_url = 'https://github.com/{}'.format(github_user)
page = models.WagtailCompanyPage(title='Springload', github_url=github_url)
self.assertEqual(page.github_user, github_user)
def test_github_user_with_trailing_slash(self):
github_user = 'springload'
github_url = 'https://github.com/{}/'.format(github_user)
page = models.WagtailCompanyPage(title='Springload', github_url=github_url)
self.assertEqual(page.github_user, github_user)
def test_github_user_with_no_url(self):
page = models.WagtailCompanyPage(title='Springload')
self.assertIsNone(page.github_user)
|
<commit_before><commit_msg>Add failing tests for social handles<commit_after>import core.tests.utils as test_utils
from core import models
class WagtailCompanyPageTestCase(test_utils.WagtailTest):
def test_twitter_handler(self):
twitter_user = 'springloadnz'
twitter_url = 'https://twitter.com/{}'.format(twitter_user)
twitter_handle = '@{}'.format(twitter_user)
page = models.WagtailCompanyPage(title='Springload', twitter_url=twitter_url)
self.assertEqual(page.twitter_handler, twitter_handle)
def test_twitter_handler_with_trailing_slash(self):
twitter_user = 'springloadnz'
twitter_url = 'https://twitter.com/{}/'.format(twitter_user)
twitter_handle = '@{}'.format(twitter_user)
page = models.WagtailCompanyPage(title='Springload', twitter_url=twitter_url)
self.assertEqual(page.twitter_handler, twitter_handle)
def test_twitter_handler_with_no_url(self):
page = models.WagtailCompanyPage(title='Springload')
self.assertIsNone(page.twitter_handler)
def test_github_user(self):
github_user = 'springload'
github_url = 'https://github.com/{}'.format(github_user)
page = models.WagtailCompanyPage(title='Springload', github_url=github_url)
self.assertEqual(page.github_user, github_user)
def test_github_user_with_trailing_slash(self):
github_user = 'springload'
github_url = 'https://github.com/{}/'.format(github_user)
page = models.WagtailCompanyPage(title='Springload', github_url=github_url)
self.assertEqual(page.github_user, github_user)
def test_github_user_with_no_url(self):
page = models.WagtailCompanyPage(title='Springload')
self.assertIsNone(page.github_user)
|
|
dd4d624ab51b610cfe1a7047368d8c305156916b
|
scripts/check_repeated_token.py
|
scripts/check_repeated_token.py
|
#!/usr/bin/env python
"""
Checker for repeated tokens
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Helper script to find suspicious lexers which produce the same token
repeatedly, i.e. for example:
.. code::
'd' Text
'a' Text
't' Text
'a' Text
'b' Text
'a' Text
's' Text
'e' Text
This script has two test modes: Check for tokens repeating more often than
a given threshold, and exclude anything but single-character tokens.
Repeated single-character tokens are quite problematic as they result in
bloated output and are usually an indication that someone is missing a + or *
in the regex.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import argparse
import os
import sys
def unpack_file(path):
"""Unpack a file into text, token pairs."""
from collections import namedtuple
pair = namedtuple('TextTokenPair', ['text', 'token'])
for line in open(path).readlines():
line = line.strip()
if line:
quotation_start = line.find('\'')
quotation_end = line.rfind('\'')
text = line[quotation_start+1:quotation_end]
token = line.split()[-1]
text = text.replace('\\n', '\n')
text = text.replace('\\t', '\t')
yield pair(text, token)
def check_file(path, threshold, single_only):
current_token = ''
current_token_repeat_count = 1
is_suspicious = False
for value, token in unpack_file(path):
if single_only and len(value) > 1:
token = ''
current_token_repeat_count = 1
continue
if token != current_token:
current_token = token
current_token_repeat_count = 1
else:
current_token_repeat_count += 1
if current_token_repeat_count > threshold:
is_suspicious = True
break
if is_suspicious:
print(path)
return not is_suspicious
def main(args):
errors = 0
for dir, _, files in os.walk(args.TEST_ROOT):
for file in files:
if not file.endswith('.output'):
continue
path = os.path.join(dir, file)
if not check_file(path, args.threshold, args.single):
errors += 1
if errors > 0:
return 1
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('TEST_ROOT',
help='Root directory containing the tests')
parser.add_argument('-t', '--threshold', type=int, default=5,
help='Warn if a token repeats itself more often then this number.')
parser.add_argument('-s', '--single', action='store_true', default=False,
help='Only look at tokens matching a single character')
args = parser.parse_args()
sys.exit(main(args))
|
Add a script to check for repeated tokens.
|
Add a script to check for repeated tokens.
|
Python
|
bsd-2-clause
|
dscorbett/pygments,pygments/pygments,dscorbett/pygments,pygments/pygments,dscorbett/pygments,dscorbett/pygments,pygments/pygments,pygments/pygments,pygments/pygments,dscorbett/pygments,dscorbett/pygments,dscorbett/pygments,pygments/pygments,pygments/pygments,dscorbett/pygments,pygments/pygments,pygments/pygments,pygments/pygments,dscorbett/pygments,dscorbett/pygments,dscorbett/pygments,dscorbett/pygments,pygments/pygments,dscorbett/pygments,pygments/pygments,pygments/pygments,pygments/pygments,dscorbett/pygments,dscorbett/pygments,pygments/pygments,dscorbett/pygments,pygments/pygments,dscorbett/pygments,pygments/pygments,pygments/pygments,dscorbett/pygments
|
Add a script to check for repeated tokens.
|
#!/usr/bin/env python
"""
Checker for repeated tokens
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Helper script to find suspicious lexers which produce the same token
repeatedly, i.e. for example:
.. code::
'd' Text
'a' Text
't' Text
'a' Text
'b' Text
'a' Text
's' Text
'e' Text
This script has two test modes: Check for tokens repeating more often than
a given threshold, and exclude anything but single-character tokens.
Repeated single-character tokens are quite problematic as they result in
bloated output and are usually an indication that someone is missing a + or *
in the regex.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import argparse
import os
import sys
def unpack_file(path):
"""Unpack a file into text, token pairs."""
from collections import namedtuple
pair = namedtuple('TextTokenPair', ['text', 'token'])
for line in open(path).readlines():
line = line.strip()
if line:
quotation_start = line.find('\'')
quotation_end = line.rfind('\'')
text = line[quotation_start+1:quotation_end]
token = line.split()[-1]
text = text.replace('\\n', '\n')
text = text.replace('\\t', '\t')
yield pair(text, token)
def check_file(path, threshold, single_only):
current_token = ''
current_token_repeat_count = 1
is_suspicious = False
for value, token in unpack_file(path):
if single_only and len(value) > 1:
token = ''
current_token_repeat_count = 1
continue
if token != current_token:
current_token = token
current_token_repeat_count = 1
else:
current_token_repeat_count += 1
if current_token_repeat_count > threshold:
is_suspicious = True
break
if is_suspicious:
print(path)
return not is_suspicious
def main(args):
errors = 0
for dir, _, files in os.walk(args.TEST_ROOT):
for file in files:
if not file.endswith('.output'):
continue
path = os.path.join(dir, file)
if not check_file(path, args.threshold, args.single):
errors += 1
if errors > 0:
return 1
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('TEST_ROOT',
help='Root directory containing the tests')
parser.add_argument('-t', '--threshold', type=int, default=5,
help='Warn if a token repeats itself more often then this number.')
parser.add_argument('-s', '--single', action='store_true', default=False,
help='Only look at tokens matching a single character')
args = parser.parse_args()
sys.exit(main(args))
|
<commit_before><commit_msg>Add a script to check for repeated tokens.<commit_after>
|
#!/usr/bin/env python
"""
Checker for repeated tokens
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Helper script to find suspicious lexers which produce the same token
repeatedly, i.e. for example:
.. code::
'd' Text
'a' Text
't' Text
'a' Text
'b' Text
'a' Text
's' Text
'e' Text
This script has two test modes: Check for tokens repeating more often than
a given threshold, and exclude anything but single-character tokens.
Repeated single-character tokens are quite problematic as they result in
bloated output and are usually an indication that someone is missing a + or *
in the regex.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import argparse
import os
import sys
def unpack_file(path):
"""Unpack a file into text, token pairs."""
from collections import namedtuple
pair = namedtuple('TextTokenPair', ['text', 'token'])
for line in open(path).readlines():
line = line.strip()
if line:
quotation_start = line.find('\'')
quotation_end = line.rfind('\'')
text = line[quotation_start+1:quotation_end]
token = line.split()[-1]
text = text.replace('\\n', '\n')
text = text.replace('\\t', '\t')
yield pair(text, token)
def check_file(path, threshold, single_only):
current_token = ''
current_token_repeat_count = 1
is_suspicious = False
for value, token in unpack_file(path):
if single_only and len(value) > 1:
token = ''
current_token_repeat_count = 1
continue
if token != current_token:
current_token = token
current_token_repeat_count = 1
else:
current_token_repeat_count += 1
if current_token_repeat_count > threshold:
is_suspicious = True
break
if is_suspicious:
print(path)
return not is_suspicious
def main(args):
errors = 0
for dir, _, files in os.walk(args.TEST_ROOT):
for file in files:
if not file.endswith('.output'):
continue
path = os.path.join(dir, file)
if not check_file(path, args.threshold, args.single):
errors += 1
if errors > 0:
return 1
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('TEST_ROOT',
help='Root directory containing the tests')
parser.add_argument('-t', '--threshold', type=int, default=5,
help='Warn if a token repeats itself more often then this number.')
parser.add_argument('-s', '--single', action='store_true', default=False,
help='Only look at tokens matching a single character')
args = parser.parse_args()
sys.exit(main(args))
|
Add a script to check for repeated tokens.#!/usr/bin/env python
"""
Checker for repeated tokens
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Helper script to find suspicious lexers which produce the same token
repeatedly, i.e. for example:
.. code::
'd' Text
'a' Text
't' Text
'a' Text
'b' Text
'a' Text
's' Text
'e' Text
This script has two test modes: Check for tokens repeating more often than
a given threshold, and exclude anything but single-character tokens.
Repeated single-character tokens are quite problematic as they result in
bloated output and are usually an indication that someone is missing a + or *
in the regex.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import argparse
import os
import sys
def unpack_file(path):
"""Unpack a file into text, token pairs."""
from collections import namedtuple
pair = namedtuple('TextTokenPair', ['text', 'token'])
for line in open(path).readlines():
line = line.strip()
if line:
quotation_start = line.find('\'')
quotation_end = line.rfind('\'')
text = line[quotation_start+1:quotation_end]
token = line.split()[-1]
text = text.replace('\\n', '\n')
text = text.replace('\\t', '\t')
yield pair(text, token)
def check_file(path, threshold, single_only):
current_token = ''
current_token_repeat_count = 1
is_suspicious = False
for value, token in unpack_file(path):
if single_only and len(value) > 1:
token = ''
current_token_repeat_count = 1
continue
if token != current_token:
current_token = token
current_token_repeat_count = 1
else:
current_token_repeat_count += 1
if current_token_repeat_count > threshold:
is_suspicious = True
break
if is_suspicious:
print(path)
return not is_suspicious
def main(args):
errors = 0
for dir, _, files in os.walk(args.TEST_ROOT):
for file in files:
if not file.endswith('.output'):
continue
path = os.path.join(dir, file)
if not check_file(path, args.threshold, args.single):
errors += 1
if errors > 0:
return 1
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('TEST_ROOT',
help='Root directory containing the tests')
parser.add_argument('-t', '--threshold', type=int, default=5,
help='Warn if a token repeats itself more often then this number.')
parser.add_argument('-s', '--single', action='store_true', default=False,
help='Only look at tokens matching a single character')
args = parser.parse_args()
sys.exit(main(args))
|
<commit_before><commit_msg>Add a script to check for repeated tokens.<commit_after>#!/usr/bin/env python
"""
Checker for repeated tokens
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Helper script to find suspicious lexers which produce the same token
repeatedly, i.e. for example:
.. code::
'd' Text
'a' Text
't' Text
'a' Text
'b' Text
'a' Text
's' Text
'e' Text
This script has two test modes: Check for tokens repeating more often than
a given threshold, and exclude anything but single-character tokens.
Repeated single-character tokens are quite problematic as they result in
bloated output and are usually an indication that someone is missing a + or *
in the regex.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import argparse
import os
import sys
def unpack_file(path):
"""Unpack a file into text, token pairs."""
from collections import namedtuple
pair = namedtuple('TextTokenPair', ['text', 'token'])
for line in open(path).readlines():
line = line.strip()
if line:
quotation_start = line.find('\'')
quotation_end = line.rfind('\'')
text = line[quotation_start+1:quotation_end]
token = line.split()[-1]
text = text.replace('\\n', '\n')
text = text.replace('\\t', '\t')
yield pair(text, token)
def check_file(path, threshold, single_only):
current_token = ''
current_token_repeat_count = 1
is_suspicious = False
for value, token in unpack_file(path):
if single_only and len(value) > 1:
token = ''
current_token_repeat_count = 1
continue
if token != current_token:
current_token = token
current_token_repeat_count = 1
else:
current_token_repeat_count += 1
if current_token_repeat_count > threshold:
is_suspicious = True
break
if is_suspicious:
print(path)
return not is_suspicious
def main(args):
errors = 0
for dir, _, files in os.walk(args.TEST_ROOT):
for file in files:
if not file.endswith('.output'):
continue
path = os.path.join(dir, file)
if not check_file(path, args.threshold, args.single):
errors += 1
if errors > 0:
return 1
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('TEST_ROOT',
help='Root directory containing the tests')
parser.add_argument('-t', '--threshold', type=int, default=5,
help='Warn if a token repeats itself more often then this number.')
parser.add_argument('-s', '--single', action='store_true', default=False,
help='Only look at tokens matching a single character')
args = parser.parse_args()
sys.exit(main(args))
|
|
12e00be812ec8167ec9aa97426d4eebae5d5280c
|
databroker/tests/test_document.py
|
databroker/tests/test_document.py
|
import copy
import pytest
from ..core import Document, NotMutable
def test_immutable():
d = Document({'a': 1})
with pytest.raises(NotMutable):
# Update existing key
d['a'] = 2
with pytest.raises(NotMutable):
# Add new key
d['b'] = 2
with pytest.raises(NotMutable):
d.setdefault('a', 2)
with pytest.raises(NotMutable):
d.setdefault('b', 2)
with pytest.raises(NotMutable):
del d['a']
with pytest.raises(NotMutable):
d.pop('a')
with pytest.raises(NotMutable):
d.popitem()
with pytest.raises(NotMutable):
d.clear()
with pytest.raises(NotMutable):
# Update existing key
d.update({'a': 2})
with pytest.raises(NotMutable):
# Add new key
d.update({'b': 2})
def test_deep_copy():
a = Document({'x': {'y': {'z': 1}}})
b = copy.deepcopy(a)
b['x']['y']['z'] = 2
# Verify original is not modified.
assert a['x']['y']['z'] == 1
|
Test immutability and deepcopy behavior.
|
Test immutability and deepcopy behavior.
|
Python
|
bsd-3-clause
|
ericdill/databroker,ericdill/databroker
|
Test immutability and deepcopy behavior.
|
import copy
import pytest
from ..core import Document, NotMutable
def test_immutable():
d = Document({'a': 1})
with pytest.raises(NotMutable):
# Update existing key
d['a'] = 2
with pytest.raises(NotMutable):
# Add new key
d['b'] = 2
with pytest.raises(NotMutable):
d.setdefault('a', 2)
with pytest.raises(NotMutable):
d.setdefault('b', 2)
with pytest.raises(NotMutable):
del d['a']
with pytest.raises(NotMutable):
d.pop('a')
with pytest.raises(NotMutable):
d.popitem()
with pytest.raises(NotMutable):
d.clear()
with pytest.raises(NotMutable):
# Update existing key
d.update({'a': 2})
with pytest.raises(NotMutable):
# Add new key
d.update({'b': 2})
def test_deep_copy():
a = Document({'x': {'y': {'z': 1}}})
b = copy.deepcopy(a)
b['x']['y']['z'] = 2
# Verify original is not modified.
assert a['x']['y']['z'] == 1
|
<commit_before><commit_msg>Test immutability and deepcopy behavior.<commit_after>
|
import copy
import pytest
from ..core import Document, NotMutable
def test_immutable():
d = Document({'a': 1})
with pytest.raises(NotMutable):
# Update existing key
d['a'] = 2
with pytest.raises(NotMutable):
# Add new key
d['b'] = 2
with pytest.raises(NotMutable):
d.setdefault('a', 2)
with pytest.raises(NotMutable):
d.setdefault('b', 2)
with pytest.raises(NotMutable):
del d['a']
with pytest.raises(NotMutable):
d.pop('a')
with pytest.raises(NotMutable):
d.popitem()
with pytest.raises(NotMutable):
d.clear()
with pytest.raises(NotMutable):
# Update existing key
d.update({'a': 2})
with pytest.raises(NotMutable):
# Add new key
d.update({'b': 2})
def test_deep_copy():
a = Document({'x': {'y': {'z': 1}}})
b = copy.deepcopy(a)
b['x']['y']['z'] = 2
# Verify original is not modified.
assert a['x']['y']['z'] == 1
|
Test immutability and deepcopy behavior.import copy
import pytest
from ..core import Document, NotMutable
def test_immutable():
d = Document({'a': 1})
with pytest.raises(NotMutable):
# Update existing key
d['a'] = 2
with pytest.raises(NotMutable):
# Add new key
d['b'] = 2
with pytest.raises(NotMutable):
d.setdefault('a', 2)
with pytest.raises(NotMutable):
d.setdefault('b', 2)
with pytest.raises(NotMutable):
del d['a']
with pytest.raises(NotMutable):
d.pop('a')
with pytest.raises(NotMutable):
d.popitem()
with pytest.raises(NotMutable):
d.clear()
with pytest.raises(NotMutable):
# Update existing key
d.update({'a': 2})
with pytest.raises(NotMutable):
# Add new key
d.update({'b': 2})
def test_deep_copy():
a = Document({'x': {'y': {'z': 1}}})
b = copy.deepcopy(a)
b['x']['y']['z'] = 2
# Verify original is not modified.
assert a['x']['y']['z'] == 1
|
<commit_before><commit_msg>Test immutability and deepcopy behavior.<commit_after>import copy
import pytest
from ..core import Document, NotMutable
def test_immutable():
d = Document({'a': 1})
with pytest.raises(NotMutable):
# Update existing key
d['a'] = 2
with pytest.raises(NotMutable):
# Add new key
d['b'] = 2
with pytest.raises(NotMutable):
d.setdefault('a', 2)
with pytest.raises(NotMutable):
d.setdefault('b', 2)
with pytest.raises(NotMutable):
del d['a']
with pytest.raises(NotMutable):
d.pop('a')
with pytest.raises(NotMutable):
d.popitem()
with pytest.raises(NotMutable):
d.clear()
with pytest.raises(NotMutable):
# Update existing key
d.update({'a': 2})
with pytest.raises(NotMutable):
# Add new key
d.update({'b': 2})
def test_deep_copy():
a = Document({'x': {'y': {'z': 1}}})
b = copy.deepcopy(a)
b['x']['y']['z'] = 2
# Verify original is not modified.
assert a['x']['y']['z'] == 1
|
|
5bfbd7cdadae691176bab93b754b51b3b185ddd6
|
doc/examples/plot_phase_unwrap.py
|
doc/examples/plot_phase_unwrap.py
|
"""
================
Phase Unwrapping
================
Some signals can only be observed modulo 2*pi, and this can also apply to
two- and three dimensional images. In these cases phase unwrapping is
needed to recover the underlying, unwrapped signal. In this example we will
demonstrate an algorithm [1]_ implemented in ``skimage`` at work for such a
problem.
"""
import numpy as np
from matplotlib import pyplot as plt
from skimage import data, img_as_float, color, exposure
# Load an image as a floating-point grayscale
image = color.rgb2gray(img_as_float(data.chelsea()))
# Scale the image to [0, 4*pi]
image = exposure.rescale_intensity(image, out_range=(0, 4 * np.pi))
# Create a phase-wrapped image in the interval [-pi, pi)
image_wrapped = np.angle(np.exp(1j * image))
# Perform phase unwrapping
image_unwrapped = exposure.unwrap(image_wrapped)
# Plotting
plt.figure()
plt.gray() # grayscale colormap as default
imkwargs = dict(vmin=0, vmax=4 * np.pi)
plt.subplot(221)
plt.title('Original')
plt.imshow(image, **imkwargs)
plt.colorbar()
plt.subplot(222)
plt.title('Wrapped phase')
plt.imshow(image_wrapped, vmin=-np.pi, vmax=np.pi)
plt.colorbar()
plt.subplot(223)
plt.title('After phase unwrapping')
plt.imshow(image_unwrapped)
plt.colorbar()
plt.subplot(224)
plt.title('Unwrapped minus original')
plt.imshow(image_unwrapped - image)
plt.colorbar()
plt.show()
"""
.. image:: PLOT2RST.current_figure
References
----------
.. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,
and Munther A. Gdeisat, "Fast two-dimensional phase-unwrapping
algorithm based on sorting by reliability following a noncontinuous
path", Journal Applied Optics, Vol. 41, No. 35, pp. 7437, 2002
"""
|
Add example for phase unwrapping.
|
Add example for phase unwrapping.
|
Python
|
bsd-3-clause
|
jwiggins/scikit-image,bennlich/scikit-image,keflavich/scikit-image,bennlich/scikit-image,SamHames/scikit-image,pratapvardhan/scikit-image,chintak/scikit-image,paalge/scikit-image,almarklein/scikit-image,bsipocz/scikit-image,ofgulban/scikit-image,ofgulban/scikit-image,rjeli/scikit-image,ofgulban/scikit-image,almarklein/scikit-image,oew1v07/scikit-image,jwiggins/scikit-image,newville/scikit-image,Midafi/scikit-image,vighneshbirodkar/scikit-image,emon10005/scikit-image,chintak/scikit-image,rjeli/scikit-image,SamHames/scikit-image,keflavich/scikit-image,ClinicalGraphics/scikit-image,paalge/scikit-image,Hiyorimi/scikit-image,GaZ3ll3/scikit-image,WarrenWeckesser/scikits-image,blink1073/scikit-image,vighneshbirodkar/scikit-image,Midafi/scikit-image,robintw/scikit-image,blink1073/scikit-image,chintak/scikit-image,chintak/scikit-image,juliusbierk/scikit-image,bsipocz/scikit-image,Hiyorimi/scikit-image,warmspringwinds/scikit-image,juliusbierk/scikit-image,SamHames/scikit-image,almarklein/scikit-image,ajaybhat/scikit-image,Britefury/scikit-image,michaelaye/scikit-image,youprofit/scikit-image,robintw/scikit-image,GaZ3ll3/scikit-image,WarrenWeckesser/scikits-image,emon10005/scikit-image,rjeli/scikit-image,youprofit/scikit-image,chriscrosscutler/scikit-image,Britefury/scikit-image,dpshelio/scikit-image,michaelaye/scikit-image,almarklein/scikit-image,paalge/scikit-image,warmspringwinds/scikit-image,dpshelio/scikit-image,newville/scikit-image,ClinicalGraphics/scikit-image,ajaybhat/scikit-image,michaelpacer/scikit-image,oew1v07/scikit-image,SamHames/scikit-image,chriscrosscutler/scikit-image,vighneshbirodkar/scikit-image,michaelpacer/scikit-image,pratapvardhan/scikit-image
|
Add example for phase unwrapping.
|
"""
================
Phase Unwrapping
================
Some signals can only be observed modulo 2*pi, and this can also apply to
two- and three dimensional images. In these cases phase unwrapping is
needed to recover the underlying, unwrapped signal. In this example we will
demonstrate an algorithm [1]_ implemented in ``skimage`` at work for such a
problem.
"""
import numpy as np
from matplotlib import pyplot as plt
from skimage import data, img_as_float, color, exposure
# Load an image as a floating-point grayscale
image = color.rgb2gray(img_as_float(data.chelsea()))
# Scale the image to [0, 4*pi]
image = exposure.rescale_intensity(image, out_range=(0, 4 * np.pi))
# Create a phase-wrapped image in the interval [-pi, pi)
image_wrapped = np.angle(np.exp(1j * image))
# Perform phase unwrapping
image_unwrapped = exposure.unwrap(image_wrapped)
# Plotting
plt.figure()
plt.gray() # grayscale colormap as default
imkwargs = dict(vmin=0, vmax=4 * np.pi)
plt.subplot(221)
plt.title('Original')
plt.imshow(image, **imkwargs)
plt.colorbar()
plt.subplot(222)
plt.title('Wrapped phase')
plt.imshow(image_wrapped, vmin=-np.pi, vmax=np.pi)
plt.colorbar()
plt.subplot(223)
plt.title('After phase unwrapping')
plt.imshow(image_unwrapped)
plt.colorbar()
plt.subplot(224)
plt.title('Unwrapped minus original')
plt.imshow(image_unwrapped - image)
plt.colorbar()
plt.show()
"""
.. image:: PLOT2RST.current_figure
References
----------
.. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,
and Munther A. Gdeisat, "Fast two-dimensional phase-unwrapping
algorithm based on sorting by reliability following a noncontinuous
path", Journal Applied Optics, Vol. 41, No. 35, pp. 7437, 2002
"""
|
<commit_before><commit_msg>Add example for phase unwrapping.<commit_after>
|
"""
================
Phase Unwrapping
================
Some signals can only be observed modulo 2*pi, and this can also apply to
two- and three dimensional images. In these cases phase unwrapping is
needed to recover the underlying, unwrapped signal. In this example we will
demonstrate an algorithm [1]_ implemented in ``skimage`` at work for such a
problem.
"""
import numpy as np
from matplotlib import pyplot as plt
from skimage import data, img_as_float, color, exposure
# Load an image as a floating-point grayscale
image = color.rgb2gray(img_as_float(data.chelsea()))
# Scale the image to [0, 4*pi]
image = exposure.rescale_intensity(image, out_range=(0, 4 * np.pi))
# Create a phase-wrapped image in the interval [-pi, pi)
image_wrapped = np.angle(np.exp(1j * image))
# Perform phase unwrapping
image_unwrapped = exposure.unwrap(image_wrapped)
# Plotting
plt.figure()
plt.gray() # grayscale colormap as default
imkwargs = dict(vmin=0, vmax=4 * np.pi)
plt.subplot(221)
plt.title('Original')
plt.imshow(image, **imkwargs)
plt.colorbar()
plt.subplot(222)
plt.title('Wrapped phase')
plt.imshow(image_wrapped, vmin=-np.pi, vmax=np.pi)
plt.colorbar()
plt.subplot(223)
plt.title('After phase unwrapping')
plt.imshow(image_unwrapped)
plt.colorbar()
plt.subplot(224)
plt.title('Unwrapped minus original')
plt.imshow(image_unwrapped - image)
plt.colorbar()
plt.show()
"""
.. image:: PLOT2RST.current_figure
References
----------
.. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,
and Munther A. Gdeisat, "Fast two-dimensional phase-unwrapping
algorithm based on sorting by reliability following a noncontinuous
path", Journal Applied Optics, Vol. 41, No. 35, pp. 7437, 2002
"""
|
Add example for phase unwrapping."""
================
Phase Unwrapping
================
Some signals can only be observed modulo 2*pi, and this can also apply to
two- and three dimensional images. In these cases phase unwrapping is
needed to recover the underlying, unwrapped signal. In this example we will
demonstrate an algorithm [1]_ implemented in ``skimage`` at work for such a
problem.
"""
import numpy as np
from matplotlib import pyplot as plt
from skimage import data, img_as_float, color, exposure
# Load an image as a floating-point grayscale
image = color.rgb2gray(img_as_float(data.chelsea()))
# Scale the image to [0, 4*pi]
image = exposure.rescale_intensity(image, out_range=(0, 4 * np.pi))
# Create a phase-wrapped image in the interval [-pi, pi)
image_wrapped = np.angle(np.exp(1j * image))
# Perform phase unwrapping
image_unwrapped = exposure.unwrap(image_wrapped)
# Plotting
plt.figure()
plt.gray() # grayscale colormap as default
imkwargs = dict(vmin=0, vmax=4 * np.pi)
plt.subplot(221)
plt.title('Original')
plt.imshow(image, **imkwargs)
plt.colorbar()
plt.subplot(222)
plt.title('Wrapped phase')
plt.imshow(image_wrapped, vmin=-np.pi, vmax=np.pi)
plt.colorbar()
plt.subplot(223)
plt.title('After phase unwrapping')
plt.imshow(image_unwrapped)
plt.colorbar()
plt.subplot(224)
plt.title('Unwrapped minus original')
plt.imshow(image_unwrapped - image)
plt.colorbar()
plt.show()
"""
.. image:: PLOT2RST.current_figure
References
----------
.. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,
and Munther A. Gdeisat, "Fast two-dimensional phase-unwrapping
algorithm based on sorting by reliability following a noncontinuous
path", Journal Applied Optics, Vol. 41, No. 35, pp. 7437, 2002
"""
|
<commit_before><commit_msg>Add example for phase unwrapping.<commit_after>"""
================
Phase Unwrapping
================
Some signals can only be observed modulo 2*pi, and this can also apply to
two- and three dimensional images. In these cases phase unwrapping is
needed to recover the underlying, unwrapped signal. In this example we will
demonstrate an algorithm [1]_ implemented in ``skimage`` at work for such a
problem.
"""
import numpy as np
from matplotlib import pyplot as plt
from skimage import data, img_as_float, color, exposure
# Load an image as a floating-point grayscale
image = color.rgb2gray(img_as_float(data.chelsea()))
# Scale the image to [0, 4*pi]
image = exposure.rescale_intensity(image, out_range=(0, 4 * np.pi))
# Create a phase-wrapped image in the interval [-pi, pi)
image_wrapped = np.angle(np.exp(1j * image))
# Perform phase unwrapping
image_unwrapped = exposure.unwrap(image_wrapped)
# Plotting
plt.figure()
plt.gray() # grayscale colormap as default
imkwargs = dict(vmin=0, vmax=4 * np.pi)
plt.subplot(221)
plt.title('Original')
plt.imshow(image, **imkwargs)
plt.colorbar()
plt.subplot(222)
plt.title('Wrapped phase')
plt.imshow(image_wrapped, vmin=-np.pi, vmax=np.pi)
plt.colorbar()
plt.subplot(223)
plt.title('After phase unwrapping')
plt.imshow(image_unwrapped)
plt.colorbar()
plt.subplot(224)
plt.title('Unwrapped minus original')
plt.imshow(image_unwrapped - image)
plt.colorbar()
plt.show()
"""
.. image:: PLOT2RST.current_figure
References
----------
.. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,
and Munther A. Gdeisat, "Fast two-dimensional phase-unwrapping
algorithm based on sorting by reliability following a noncontinuous
path", Journal Applied Optics, Vol. 41, No. 35, pp. 7437, 2002
"""
|
|
933ac64d169aecdaf072d9e8147f8dcf1d2b67d0
|
stationspinner/accounting/management/commands/findapiupdate.py
|
stationspinner/accounting/management/commands/findapiupdate.py
|
from django.core.management.base import BaseCommand, CommandError
from stationspinner.accounting.models import APIUpdate, APICall
from stationspinner.character.models import CharacterSheet
from stationspinner.corporation.models import CorporationSheet
import sys
class Command(BaseCommand):
args = '<character/corporation name> <call type> <call name>'
help = 'Find the APIUpdate PK for a specific call associated with a character/corporation.'
def handle(self, *args, **options):
if not len(args) == 3:
print self.help, self.args
sys.exit(1)
try:
entity = CharacterSheet.objects.get(name=args[0])
except CharacterSheet.DoesNotExist:
try:
entity = CorporationSheet.objects.get(corporationName=args[0])
except CorporationSheet.DoesNotExist:
self.stdout.write('No character/corporation named "{0}" exists.'.format(args[0]))
sys.exit(1)
key = entity.owner_key
try:
call = APICall.objects.get(type__iexact=args[1],
name__iexact=args[2])
except APICall.DoesNotExist:
self.stdout.write('No API Call by that name.')
sys.exit(1)
if not call.accessMask & key.accessMask > 0:
self.stdout.write('Key {0} has not the correct mask for "{1}.{2}".'.format(
key.keyID,
call.type,
call.name
))
sys.exit(1)
updates = APIUpdate.objects.filter(apicall=call,
apikey=key,
owner=entity.pk)
if updates.count() == 0:
self.stdout.write('APICall for "{1}.{2}" does not exist.'.format(
call.type,
call.name
))
sys.exit(1)
for update in updates:
self.stdout.write('PK "{0}", characterID {4} keyID {3}, last updated {1}, cached until {2}.'.format(
update.pk,
update.last_update,
update.cached_until,
update.apikey.keyID,
entity.pk
))
|
Add convenience command for finding apiupdates to execute manually
|
Add convenience command for finding apiupdates to execute manually
|
Python
|
agpl-3.0
|
kriberg/stationspinner,kriberg/stationspinner
|
Add convenience command for finding apiupdates to execute manually
|
from django.core.management.base import BaseCommand, CommandError
from stationspinner.accounting.models import APIUpdate, APICall
from stationspinner.character.models import CharacterSheet
from stationspinner.corporation.models import CorporationSheet
import sys
class Command(BaseCommand):
args = '<character/corporation name> <call type> <call name>'
help = 'Find the APIUpdate PK for a specific call associated with a character/corporation.'
def handle(self, *args, **options):
if not len(args) == 3:
print self.help, self.args
sys.exit(1)
try:
entity = CharacterSheet.objects.get(name=args[0])
except CharacterSheet.DoesNotExist:
try:
entity = CorporationSheet.objects.get(corporationName=args[0])
except CorporationSheet.DoesNotExist:
self.stdout.write('No character/corporation named "{0}" exists.'.format(args[0]))
sys.exit(1)
key = entity.owner_key
try:
call = APICall.objects.get(type__iexact=args[1],
name__iexact=args[2])
except APICall.DoesNotExist:
self.stdout.write('No API Call by that name.')
sys.exit(1)
if not call.accessMask & key.accessMask > 0:
self.stdout.write('Key {0} has not the correct mask for "{1}.{2}".'.format(
key.keyID,
call.type,
call.name
))
sys.exit(1)
updates = APIUpdate.objects.filter(apicall=call,
apikey=key,
owner=entity.pk)
if updates.count() == 0:
self.stdout.write('APICall for "{1}.{2}" does not exist.'.format(
call.type,
call.name
))
sys.exit(1)
for update in updates:
self.stdout.write('PK "{0}", characterID {4} keyID {3}, last updated {1}, cached until {2}.'.format(
update.pk,
update.last_update,
update.cached_until,
update.apikey.keyID,
entity.pk
))
|
<commit_before><commit_msg>Add convenience command for finding apiupdates to execute manually<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from stationspinner.accounting.models import APIUpdate, APICall
from stationspinner.character.models import CharacterSheet
from stationspinner.corporation.models import CorporationSheet
import sys
class Command(BaseCommand):
args = '<character/corporation name> <call type> <call name>'
help = 'Find the APIUpdate PK for a specific call associated with a character/corporation.'
def handle(self, *args, **options):
if not len(args) == 3:
print self.help, self.args
sys.exit(1)
try:
entity = CharacterSheet.objects.get(name=args[0])
except CharacterSheet.DoesNotExist:
try:
entity = CorporationSheet.objects.get(corporationName=args[0])
except CorporationSheet.DoesNotExist:
self.stdout.write('No character/corporation named "{0}" exists.'.format(args[0]))
sys.exit(1)
key = entity.owner_key
try:
call = APICall.objects.get(type__iexact=args[1],
name__iexact=args[2])
except APICall.DoesNotExist:
self.stdout.write('No API Call by that name.')
sys.exit(1)
if not call.accessMask & key.accessMask > 0:
self.stdout.write('Key {0} has not the correct mask for "{1}.{2}".'.format(
key.keyID,
call.type,
call.name
))
sys.exit(1)
updates = APIUpdate.objects.filter(apicall=call,
apikey=key,
owner=entity.pk)
if updates.count() == 0:
self.stdout.write('APICall for "{1}.{2}" does not exist.'.format(
call.type,
call.name
))
sys.exit(1)
for update in updates:
self.stdout.write('PK "{0}", characterID {4} keyID {3}, last updated {1}, cached until {2}.'.format(
update.pk,
update.last_update,
update.cached_until,
update.apikey.keyID,
entity.pk
))
|
Add convenience command for finding apiupdates to execute manuallyfrom django.core.management.base import BaseCommand, CommandError
from stationspinner.accounting.models import APIUpdate, APICall
from stationspinner.character.models import CharacterSheet
from stationspinner.corporation.models import CorporationSheet
import sys
class Command(BaseCommand):
args = '<character/corporation name> <call type> <call name>'
help = 'Find the APIUpdate PK for a specific call associated with a character/corporation.'
def handle(self, *args, **options):
if not len(args) == 3:
print self.help, self.args
sys.exit(1)
try:
entity = CharacterSheet.objects.get(name=args[0])
except CharacterSheet.DoesNotExist:
try:
entity = CorporationSheet.objects.get(corporationName=args[0])
except CorporationSheet.DoesNotExist:
self.stdout.write('No character/corporation named "{0}" exists.'.format(args[0]))
sys.exit(1)
key = entity.owner_key
try:
call = APICall.objects.get(type__iexact=args[1],
name__iexact=args[2])
except APICall.DoesNotExist:
self.stdout.write('No API Call by that name.')
sys.exit(1)
if not call.accessMask & key.accessMask > 0:
self.stdout.write('Key {0} has not the correct mask for "{1}.{2}".'.format(
key.keyID,
call.type,
call.name
))
sys.exit(1)
updates = APIUpdate.objects.filter(apicall=call,
apikey=key,
owner=entity.pk)
if updates.count() == 0:
self.stdout.write('APICall for "{1}.{2}" does not exist.'.format(
call.type,
call.name
))
sys.exit(1)
for update in updates:
self.stdout.write('PK "{0}", characterID {4} keyID {3}, last updated {1}, cached until {2}.'.format(
update.pk,
update.last_update,
update.cached_until,
update.apikey.keyID,
entity.pk
))
|
<commit_before><commit_msg>Add convenience command for finding apiupdates to execute manually<commit_after>from django.core.management.base import BaseCommand, CommandError
from stationspinner.accounting.models import APIUpdate, APICall
from stationspinner.character.models import CharacterSheet
from stationspinner.corporation.models import CorporationSheet
import sys
class Command(BaseCommand):
args = '<character/corporation name> <call type> <call name>'
help = 'Find the APIUpdate PK for a specific call associated with a character/corporation.'
def handle(self, *args, **options):
if not len(args) == 3:
print self.help, self.args
sys.exit(1)
try:
entity = CharacterSheet.objects.get(name=args[0])
except CharacterSheet.DoesNotExist:
try:
entity = CorporationSheet.objects.get(corporationName=args[0])
except CorporationSheet.DoesNotExist:
self.stdout.write('No character/corporation named "{0}" exists.'.format(args[0]))
sys.exit(1)
key = entity.owner_key
try:
call = APICall.objects.get(type__iexact=args[1],
name__iexact=args[2])
except APICall.DoesNotExist:
self.stdout.write('No API Call by that name.')
sys.exit(1)
if not call.accessMask & key.accessMask > 0:
self.stdout.write('Key {0} has not the correct mask for "{1}.{2}".'.format(
key.keyID,
call.type,
call.name
))
sys.exit(1)
updates = APIUpdate.objects.filter(apicall=call,
apikey=key,
owner=entity.pk)
if updates.count() == 0:
self.stdout.write('APICall for "{1}.{2}" does not exist.'.format(
call.type,
call.name
))
sys.exit(1)
for update in updates:
self.stdout.write('PK "{0}", characterID {4} keyID {3}, last updated {1}, cached until {2}.'.format(
update.pk,
update.last_update,
update.cached_until,
update.apikey.keyID,
entity.pk
))
|
|
9dbfb4faae1afbed21d0e315b5148db5310643fe
|
tests/util/api.py
|
tests/util/api.py
|
import pytest, zerorpc
from .base import TestDB
@pytest.fixture(scope='session')
def api_client(config, request):
test = TestDB()
api_server_loc = config.get('API_SERVER_LOC')
from inbox.server.api import API
from inbox.server.util.concurrency import make_zerorpc
test.server = make_zerorpc(API, api_server_loc)
test.client = zerorpc.Client(timeout=5)
test.client.connect(api_server_loc)
request.addfinalizer(test.destroy)
return test.client
|
Test fixture for API clients
|
Test fixture for API clients
|
Python
|
agpl-3.0
|
gale320/sync-engine,nylas/sync-engine,Eagles2F/sync-engine,PriviPK/privipk-sync-engine,EthanBlackburn/sync-engine,ErinCall/sync-engine,jobscore/sync-engine,Eagles2F/sync-engine,rmasters/inbox,gale320/sync-engine,wakermahmud/sync-engine,EthanBlackburn/sync-engine,wakermahmud/sync-engine,jobscore/sync-engine,PriviPK/privipk-sync-engine,gale320/sync-engine,wakermahmud/sync-engine,gale320/sync-engine,Eagles2F/sync-engine,rmasters/inbox,closeio/nylas,gale320/sync-engine,ErinCall/sync-engine,wakermahmud/sync-engine,closeio/nylas,nylas/sync-engine,wakermahmud/sync-engine,EthanBlackburn/sync-engine,EthanBlackburn/sync-engine,PriviPK/privipk-sync-engine,rmasters/inbox,nylas/sync-engine,nylas/sync-engine,PriviPK/privipk-sync-engine,ErinCall/sync-engine,rmasters/inbox,jobscore/sync-engine,PriviPK/privipk-sync-engine,jobscore/sync-engine,EthanBlackburn/sync-engine,closeio/nylas,ErinCall/sync-engine,closeio/nylas,ErinCall/sync-engine,Eagles2F/sync-engine,Eagles2F/sync-engine
|
Test fixture for API clients
|
import pytest, zerorpc
from .base import TestDB
@pytest.fixture(scope='session')
def api_client(config, request):
test = TestDB()
api_server_loc = config.get('API_SERVER_LOC')
from inbox.server.api import API
from inbox.server.util.concurrency import make_zerorpc
test.server = make_zerorpc(API, api_server_loc)
test.client = zerorpc.Client(timeout=5)
test.client.connect(api_server_loc)
request.addfinalizer(test.destroy)
return test.client
|
<commit_before><commit_msg>Test fixture for API clients<commit_after>
|
import pytest, zerorpc
from .base import TestDB
@pytest.fixture(scope='session')
def api_client(config, request):
test = TestDB()
api_server_loc = config.get('API_SERVER_LOC')
from inbox.server.api import API
from inbox.server.util.concurrency import make_zerorpc
test.server = make_zerorpc(API, api_server_loc)
test.client = zerorpc.Client(timeout=5)
test.client.connect(api_server_loc)
request.addfinalizer(test.destroy)
return test.client
|
Test fixture for API clientsimport pytest, zerorpc
from .base import TestDB
@pytest.fixture(scope='session')
def api_client(config, request):
test = TestDB()
api_server_loc = config.get('API_SERVER_LOC')
from inbox.server.api import API
from inbox.server.util.concurrency import make_zerorpc
test.server = make_zerorpc(API, api_server_loc)
test.client = zerorpc.Client(timeout=5)
test.client.connect(api_server_loc)
request.addfinalizer(test.destroy)
return test.client
|
<commit_before><commit_msg>Test fixture for API clients<commit_after>import pytest, zerorpc
from .base import TestDB
@pytest.fixture(scope='session')
def api_client(config, request):
test = TestDB()
api_server_loc = config.get('API_SERVER_LOC')
from inbox.server.api import API
from inbox.server.util.concurrency import make_zerorpc
test.server = make_zerorpc(API, api_server_loc)
test.client = zerorpc.Client(timeout=5)
test.client.connect(api_server_loc)
request.addfinalizer(test.destroy)
return test.client
|
|
aaaaaa747fad0e0ac66190fa0853572681f9443c
|
falcom/test/read_example_file.py
|
falcom/test/read_example_file.py
|
# Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
from os.path import join, dirname
from unittest import TestCase
class ExampleFileTest (TestCase):
def setUp (self):
format_str = getattr(self, "format_str", "{}")
full_filename = format_str.format(self.filename)
files_dir = join(dirname(__file__), "files")
file_path = join(files_dir, full_filename)
with open(file_path, "r") as f:
self.file_data = f.read()
|
Write TestCase subclass that reads an example file
|
Write TestCase subclass that reads an example file
|
Python
|
bsd-3-clause
|
mlibrary/image-conversion-and-validation,mlibrary/image-conversion-and-validation
|
Write TestCase subclass that reads an example file
|
# Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
from os.path import join, dirname
from unittest import TestCase
class ExampleFileTest (TestCase):
def setUp (self):
format_str = getattr(self, "format_str", "{}")
full_filename = format_str.format(self.filename)
files_dir = join(dirname(__file__), "files")
file_path = join(files_dir, full_filename)
with open(file_path, "r") as f:
self.file_data = f.read()
|
<commit_before><commit_msg>Write TestCase subclass that reads an example file<commit_after>
|
# Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
from os.path import join, dirname
from unittest import TestCase
class ExampleFileTest (TestCase):
def setUp (self):
format_str = getattr(self, "format_str", "{}")
full_filename = format_str.format(self.filename)
files_dir = join(dirname(__file__), "files")
file_path = join(files_dir, full_filename)
with open(file_path, "r") as f:
self.file_data = f.read()
|
Write TestCase subclass that reads an example file# Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
from os.path import join, dirname
from unittest import TestCase
class ExampleFileTest (TestCase):
def setUp (self):
format_str = getattr(self, "format_str", "{}")
full_filename = format_str.format(self.filename)
files_dir = join(dirname(__file__), "files")
file_path = join(files_dir, full_filename)
with open(file_path, "r") as f:
self.file_data = f.read()
|
<commit_before><commit_msg>Write TestCase subclass that reads an example file<commit_after># Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
from os.path import join, dirname
from unittest import TestCase
class ExampleFileTest (TestCase):
def setUp (self):
format_str = getattr(self, "format_str", "{}")
full_filename = format_str.format(self.filename)
files_dir = join(dirname(__file__), "files")
file_path = join(files_dir, full_filename)
with open(file_path, "r") as f:
self.file_data = f.read()
|
|
46ad1e9af94df2b2ff38030223f456d07ec10de1
|
tests/test_array.py
|
tests/test_array.py
|
import numpy as np
import sfs
import pytest
linear_cases = [
((3, 1), [[-1, 0, 0], [0, 0, 0], [1, 0, 0]]),
((2, 1), [[-0.5, 0, 0], [0.5, 0, 0]]),
((1, 1), [[0, 0, 0]]),
((3, 0.5), [[-0.5, 0, 0], [0, 0, 0], [0.5, 0, 0]]),
((2, 1, [0.5, 2, 3]), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([0.5, 2, 3])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([[0.5, 2, 3]])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([[0.5, 2, 3]]).transpose()), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.matrix([[0.5, 2, 3]])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.matrix([[0.5, 2, 3]]).transpose()), [[0, 2, 3], [1, 2, 3]]),
]
@pytest.mark.parametrize("args, result", linear_cases)
def test_linear(args, result):
a = sfs.array.linear(*args)
assert a.dtype == np.float64
assert np.all(a == result)
def test_linear_named_args():
a = sfs.array.linear(N=2, dx=0.5, center=[0.25, 1, 2])
assert np.all(a == [[0, 1, 2], [0.5, 1, 2]])
|
Add a first test file
|
Add a first test file
To be used with py.test
|
Python
|
mit
|
AchimTuran/sfs-python,chris-hld/sfs-python,sfstoolbox/sfs-python,chris-hld/sfs-python,sfstoolbox/sfs-python
|
Add a first test file
To be used with py.test
|
import numpy as np
import sfs
import pytest
linear_cases = [
((3, 1), [[-1, 0, 0], [0, 0, 0], [1, 0, 0]]),
((2, 1), [[-0.5, 0, 0], [0.5, 0, 0]]),
((1, 1), [[0, 0, 0]]),
((3, 0.5), [[-0.5, 0, 0], [0, 0, 0], [0.5, 0, 0]]),
((2, 1, [0.5, 2, 3]), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([0.5, 2, 3])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([[0.5, 2, 3]])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([[0.5, 2, 3]]).transpose()), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.matrix([[0.5, 2, 3]])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.matrix([[0.5, 2, 3]]).transpose()), [[0, 2, 3], [1, 2, 3]]),
]
@pytest.mark.parametrize("args, result", linear_cases)
def test_linear(args, result):
a = sfs.array.linear(*args)
assert a.dtype == np.float64
assert np.all(a == result)
def test_linear_named_args():
a = sfs.array.linear(N=2, dx=0.5, center=[0.25, 1, 2])
assert np.all(a == [[0, 1, 2], [0.5, 1, 2]])
|
<commit_before><commit_msg>Add a first test file
To be used with py.test<commit_after>
|
import numpy as np
import sfs
import pytest
linear_cases = [
((3, 1), [[-1, 0, 0], [0, 0, 0], [1, 0, 0]]),
((2, 1), [[-0.5, 0, 0], [0.5, 0, 0]]),
((1, 1), [[0, 0, 0]]),
((3, 0.5), [[-0.5, 0, 0], [0, 0, 0], [0.5, 0, 0]]),
((2, 1, [0.5, 2, 3]), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([0.5, 2, 3])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([[0.5, 2, 3]])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([[0.5, 2, 3]]).transpose()), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.matrix([[0.5, 2, 3]])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.matrix([[0.5, 2, 3]]).transpose()), [[0, 2, 3], [1, 2, 3]]),
]
@pytest.mark.parametrize("args, result", linear_cases)
def test_linear(args, result):
a = sfs.array.linear(*args)
assert a.dtype == np.float64
assert np.all(a == result)
def test_linear_named_args():
a = sfs.array.linear(N=2, dx=0.5, center=[0.25, 1, 2])
assert np.all(a == [[0, 1, 2], [0.5, 1, 2]])
|
Add a first test file
To be used with py.testimport numpy as np
import sfs
import pytest
linear_cases = [
((3, 1), [[-1, 0, 0], [0, 0, 0], [1, 0, 0]]),
((2, 1), [[-0.5, 0, 0], [0.5, 0, 0]]),
((1, 1), [[0, 0, 0]]),
((3, 0.5), [[-0.5, 0, 0], [0, 0, 0], [0.5, 0, 0]]),
((2, 1, [0.5, 2, 3]), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([0.5, 2, 3])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([[0.5, 2, 3]])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([[0.5, 2, 3]]).transpose()), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.matrix([[0.5, 2, 3]])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.matrix([[0.5, 2, 3]]).transpose()), [[0, 2, 3], [1, 2, 3]]),
]
@pytest.mark.parametrize("args, result", linear_cases)
def test_linear(args, result):
a = sfs.array.linear(*args)
assert a.dtype == np.float64
assert np.all(a == result)
def test_linear_named_args():
a = sfs.array.linear(N=2, dx=0.5, center=[0.25, 1, 2])
assert np.all(a == [[0, 1, 2], [0.5, 1, 2]])
|
<commit_before><commit_msg>Add a first test file
To be used with py.test<commit_after>import numpy as np
import sfs
import pytest
linear_cases = [
((3, 1), [[-1, 0, 0], [0, 0, 0], [1, 0, 0]]),
((2, 1), [[-0.5, 0, 0], [0.5, 0, 0]]),
((1, 1), [[0, 0, 0]]),
((3, 0.5), [[-0.5, 0, 0], [0, 0, 0], [0.5, 0, 0]]),
((2, 1, [0.5, 2, 3]), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([0.5, 2, 3])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([[0.5, 2, 3]])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.array([[0.5, 2, 3]]).transpose()), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.matrix([[0.5, 2, 3]])), [[0, 2, 3], [1, 2, 3]]),
((2, 1, np.matrix([[0.5, 2, 3]]).transpose()), [[0, 2, 3], [1, 2, 3]]),
]
@pytest.mark.parametrize("args, result", linear_cases)
def test_linear(args, result):
a = sfs.array.linear(*args)
assert a.dtype == np.float64
assert np.all(a == result)
def test_linear_named_args():
a = sfs.array.linear(N=2, dx=0.5, center=[0.25, 1, 2])
assert np.all(a == [[0, 1, 2], [0.5, 1, 2]])
|
|
b936df053637f170f583f9fcff8cc4edc4e2c696
|
tests/test_green.py
|
tests/test_green.py
|
import datetime
import unittest
import gevent
import gevent.socket
import pymssql
from .helpers import pymssqlconn
class GreenletTests(unittest.TestCase):
def greenlet_run(self, num):
with pymssqlconn() as conn:
cur = conn.cursor()
cur.execute("""
WAITFOR DELAY '00:00:05' -- sleep for 5 seconds
SELECT CURRENT_TIMESTAMP
""")
row = cur.fetchone()
def _run_all_greenlets(self):
greenlets = []
dt1 = datetime.datetime.now()
for i in range(5):
gevent.sleep(1)
greenlets.append(gevent.spawn(self.greenlet_run, i))
gevent.joinall(greenlets)
dt2 = datetime.datetime.now()
return dt2 - dt1
def test_fast(self):
def wait_callback(read_fileno):
gevent.socket.wait_read(read_fileno)
pymssql.set_wait_callback(wait_callback)
elapsed_time = self._run_all_greenlets()
self.assertTrue(
elapsed_time < datetime.timedelta(seconds=20),
'elapsed_time < 20 seconds')
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GreenletTests))
if __name__ == '__main__':
unittest.main()
|
Add test for green query with gevent
|
Add test for green query with gevent
|
Python
|
lgpl-2.1
|
bladams/pymssql,Aloomaio/pymssql,JimDennis/pymssql,klothe/pymssql,klothe/pymssql,Aloomaio/pymssql,zerolugithub/pymssql,bladams/pymssql,pymssql/pymssql,ramiro/pymssql,bladams/pymssql,JimDennis/pymssql,ramiro/pymssql,JimDennis/pymssql,Aloomaio/pymssql,pymssql/pymssql,klothe/pymssql,ramiro/pymssql,zerolugithub/pymssql,zerolugithub/pymssql
|
Add test for green query with gevent
|
import datetime
import unittest
import gevent
import gevent.socket
import pymssql
from .helpers import pymssqlconn
class GreenletTests(unittest.TestCase):
def greenlet_run(self, num):
with pymssqlconn() as conn:
cur = conn.cursor()
cur.execute("""
WAITFOR DELAY '00:00:05' -- sleep for 5 seconds
SELECT CURRENT_TIMESTAMP
""")
row = cur.fetchone()
def _run_all_greenlets(self):
greenlets = []
dt1 = datetime.datetime.now()
for i in range(5):
gevent.sleep(1)
greenlets.append(gevent.spawn(self.greenlet_run, i))
gevent.joinall(greenlets)
dt2 = datetime.datetime.now()
return dt2 - dt1
def test_fast(self):
def wait_callback(read_fileno):
gevent.socket.wait_read(read_fileno)
pymssql.set_wait_callback(wait_callback)
elapsed_time = self._run_all_greenlets()
self.assertTrue(
elapsed_time < datetime.timedelta(seconds=20),
'elapsed_time < 20 seconds')
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GreenletTests))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for green query with gevent<commit_after>
|
import datetime
import unittest
import gevent
import gevent.socket
import pymssql
from .helpers import pymssqlconn
class GreenletTests(unittest.TestCase):
def greenlet_run(self, num):
with pymssqlconn() as conn:
cur = conn.cursor()
cur.execute("""
WAITFOR DELAY '00:00:05' -- sleep for 5 seconds
SELECT CURRENT_TIMESTAMP
""")
row = cur.fetchone()
def _run_all_greenlets(self):
greenlets = []
dt1 = datetime.datetime.now()
for i in range(5):
gevent.sleep(1)
greenlets.append(gevent.spawn(self.greenlet_run, i))
gevent.joinall(greenlets)
dt2 = datetime.datetime.now()
return dt2 - dt1
def test_fast(self):
def wait_callback(read_fileno):
gevent.socket.wait_read(read_fileno)
pymssql.set_wait_callback(wait_callback)
elapsed_time = self._run_all_greenlets()
self.assertTrue(
elapsed_time < datetime.timedelta(seconds=20),
'elapsed_time < 20 seconds')
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GreenletTests))
if __name__ == '__main__':
unittest.main()
|
Add test for green query with geventimport datetime
import unittest
import gevent
import gevent.socket
import pymssql
from .helpers import pymssqlconn
class GreenletTests(unittest.TestCase):
def greenlet_run(self, num):
with pymssqlconn() as conn:
cur = conn.cursor()
cur.execute("""
WAITFOR DELAY '00:00:05' -- sleep for 5 seconds
SELECT CURRENT_TIMESTAMP
""")
row = cur.fetchone()
def _run_all_greenlets(self):
greenlets = []
dt1 = datetime.datetime.now()
for i in range(5):
gevent.sleep(1)
greenlets.append(gevent.spawn(self.greenlet_run, i))
gevent.joinall(greenlets)
dt2 = datetime.datetime.now()
return dt2 - dt1
def test_fast(self):
def wait_callback(read_fileno):
gevent.socket.wait_read(read_fileno)
pymssql.set_wait_callback(wait_callback)
elapsed_time = self._run_all_greenlets()
self.assertTrue(
elapsed_time < datetime.timedelta(seconds=20),
'elapsed_time < 20 seconds')
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GreenletTests))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for green query with gevent<commit_after>import datetime
import unittest
import gevent
import gevent.socket
import pymssql
from .helpers import pymssqlconn
class GreenletTests(unittest.TestCase):
def greenlet_run(self, num):
with pymssqlconn() as conn:
cur = conn.cursor()
cur.execute("""
WAITFOR DELAY '00:00:05' -- sleep for 5 seconds
SELECT CURRENT_TIMESTAMP
""")
row = cur.fetchone()
def _run_all_greenlets(self):
greenlets = []
dt1 = datetime.datetime.now()
for i in range(5):
gevent.sleep(1)
greenlets.append(gevent.spawn(self.greenlet_run, i))
gevent.joinall(greenlets)
dt2 = datetime.datetime.now()
return dt2 - dt1
def test_fast(self):
def wait_callback(read_fileno):
gevent.socket.wait_read(read_fileno)
pymssql.set_wait_callback(wait_callback)
elapsed_time = self._run_all_greenlets()
self.assertTrue(
elapsed_time < datetime.timedelta(seconds=20),
'elapsed_time < 20 seconds')
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GreenletTests))
if __name__ == '__main__':
unittest.main()
|
|
9d0aeb0931b59311a890e02e90ce57b8cbdf42ef
|
tools/context/naive_context_suppression.py
|
tools/context/naive_context_suppression.py
|
#!/usr/bin/env python
import argparse
import numpy as np
from tpn.data_io import tpn_test_iterator
import os
import cPickle
def parse_args():
parser = argparse.ArgumentParser('Naive context suppression: add bonus scores to the top classes.')
parser.add_argument('input_track')
parser.add_argument('output_track')
parser.add_argument('--top_ratio', type=float, default=0.0003,
help='Ratio of top detection. [0.0003]')
parser.add_argument('--top_bonus', type=float, default=0.4,
help='Bonus score for top classes. [0.4]')
parser.add_argument('--score_key', type=str,
help='Key name for detection scores.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
tracks = tpn_test_iterator(args.input_track)
all_scores = np.concatenate([track[args.score_key] for track in tracks])
num_box, num_cls = all_scores.shape
all_cls_idx = np.tile(np.arange(num_cls), (num_box,1))
# remove __background__
all_scores = all_scores[:,1:].flatten()
all_cls_idx = all_cls_idx[:,1:].flatten()
# sort in decending order
sorted_idx = np.argsort(all_scores)[::-1]
n_top = int(max(round(num_box * args.top_ratio), 1))
top_cls= np.unique(all_cls_idx[sorted_idx[:n_top]])
# add bonus scores
if not os.path.isdir(args.output_track):
os.makedirs(args.output_track)
for track_id, track in enumerate(tracks):
scores = track[args.score_key]
scores[:,top_cls] += args.top_bonus
track[args.score_key+'_mcs'] = scores
with open(os.path.join(args.output_track,
'{:06d}.pkl'.format(track_id)), 'wb') as f:
cPickle.dump(track, f, cPickle.HIGHEST_PROTOCOL)
|
Add naive context suppression script.
|
Add naive context suppression script.
|
Python
|
mit
|
myfavouritekk/TPN
|
Add naive context suppression script.
|
#!/usr/bin/env python
import argparse
import numpy as np
from tpn.data_io import tpn_test_iterator
import os
import cPickle
def parse_args():
parser = argparse.ArgumentParser('Naive context suppression: add bonus scores to the top classes.')
parser.add_argument('input_track')
parser.add_argument('output_track')
parser.add_argument('--top_ratio', type=float, default=0.0003,
help='Ratio of top detection. [0.0003]')
parser.add_argument('--top_bonus', type=float, default=0.4,
help='Bonus score for top classes. [0.4]')
parser.add_argument('--score_key', type=str,
help='Key name for detection scores.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
tracks = tpn_test_iterator(args.input_track)
all_scores = np.concatenate([track[args.score_key] for track in tracks])
num_box, num_cls = all_scores.shape
all_cls_idx = np.tile(np.arange(num_cls), (num_box,1))
# remove __background__
all_scores = all_scores[:,1:].flatten()
all_cls_idx = all_cls_idx[:,1:].flatten()
# sort in decending order
sorted_idx = np.argsort(all_scores)[::-1]
n_top = int(max(round(num_box * args.top_ratio), 1))
top_cls= np.unique(all_cls_idx[sorted_idx[:n_top]])
# add bonus scores
if not os.path.isdir(args.output_track):
os.makedirs(args.output_track)
for track_id, track in enumerate(tracks):
scores = track[args.score_key]
scores[:,top_cls] += args.top_bonus
track[args.score_key+'_mcs'] = scores
with open(os.path.join(args.output_track,
'{:06d}.pkl'.format(track_id)), 'wb') as f:
cPickle.dump(track, f, cPickle.HIGHEST_PROTOCOL)
|
<commit_before><commit_msg>Add naive context suppression script.<commit_after>
|
#!/usr/bin/env python
import argparse
import numpy as np
from tpn.data_io import tpn_test_iterator
import os
import cPickle
def parse_args():
parser = argparse.ArgumentParser('Naive context suppression: add bonus scores to the top classes.')
parser.add_argument('input_track')
parser.add_argument('output_track')
parser.add_argument('--top_ratio', type=float, default=0.0003,
help='Ratio of top detection. [0.0003]')
parser.add_argument('--top_bonus', type=float, default=0.4,
help='Bonus score for top classes. [0.4]')
parser.add_argument('--score_key', type=str,
help='Key name for detection scores.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
tracks = tpn_test_iterator(args.input_track)
all_scores = np.concatenate([track[args.score_key] for track in tracks])
num_box, num_cls = all_scores.shape
all_cls_idx = np.tile(np.arange(num_cls), (num_box,1))
# remove __background__
all_scores = all_scores[:,1:].flatten()
all_cls_idx = all_cls_idx[:,1:].flatten()
# sort in decending order
sorted_idx = np.argsort(all_scores)[::-1]
n_top = int(max(round(num_box * args.top_ratio), 1))
top_cls= np.unique(all_cls_idx[sorted_idx[:n_top]])
# add bonus scores
if not os.path.isdir(args.output_track):
os.makedirs(args.output_track)
for track_id, track in enumerate(tracks):
scores = track[args.score_key]
scores[:,top_cls] += args.top_bonus
track[args.score_key+'_mcs'] = scores
with open(os.path.join(args.output_track,
'{:06d}.pkl'.format(track_id)), 'wb') as f:
cPickle.dump(track, f, cPickle.HIGHEST_PROTOCOL)
|
Add naive context suppression script.#!/usr/bin/env python
import argparse
import numpy as np
from tpn.data_io import tpn_test_iterator
import os
import cPickle
def parse_args():
parser = argparse.ArgumentParser('Naive context suppression: add bonus scores to the top classes.')
parser.add_argument('input_track')
parser.add_argument('output_track')
parser.add_argument('--top_ratio', type=float, default=0.0003,
help='Ratio of top detection. [0.0003]')
parser.add_argument('--top_bonus', type=float, default=0.4,
help='Bonus score for top classes. [0.4]')
parser.add_argument('--score_key', type=str,
help='Key name for detection scores.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
tracks = tpn_test_iterator(args.input_track)
all_scores = np.concatenate([track[args.score_key] for track in tracks])
num_box, num_cls = all_scores.shape
all_cls_idx = np.tile(np.arange(num_cls), (num_box,1))
# remove __background__
all_scores = all_scores[:,1:].flatten()
all_cls_idx = all_cls_idx[:,1:].flatten()
# sort in decending order
sorted_idx = np.argsort(all_scores)[::-1]
n_top = int(max(round(num_box * args.top_ratio), 1))
top_cls= np.unique(all_cls_idx[sorted_idx[:n_top]])
# add bonus scores
if not os.path.isdir(args.output_track):
os.makedirs(args.output_track)
for track_id, track in enumerate(tracks):
scores = track[args.score_key]
scores[:,top_cls] += args.top_bonus
track[args.score_key+'_mcs'] = scores
with open(os.path.join(args.output_track,
'{:06d}.pkl'.format(track_id)), 'wb') as f:
cPickle.dump(track, f, cPickle.HIGHEST_PROTOCOL)
|
<commit_before><commit_msg>Add naive context suppression script.<commit_after>#!/usr/bin/env python
import argparse
import numpy as np
from tpn.data_io import tpn_test_iterator
import os
import cPickle
def parse_args():
parser = argparse.ArgumentParser('Naive context suppression: add bonus scores to the top classes.')
parser.add_argument('input_track')
parser.add_argument('output_track')
parser.add_argument('--top_ratio', type=float, default=0.0003,
help='Ratio of top detection. [0.0003]')
parser.add_argument('--top_bonus', type=float, default=0.4,
help='Bonus score for top classes. [0.4]')
parser.add_argument('--score_key', type=str,
help='Key name for detection scores.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
tracks = tpn_test_iterator(args.input_track)
all_scores = np.concatenate([track[args.score_key] for track in tracks])
num_box, num_cls = all_scores.shape
all_cls_idx = np.tile(np.arange(num_cls), (num_box,1))
# remove __background__
all_scores = all_scores[:,1:].flatten()
all_cls_idx = all_cls_idx[:,1:].flatten()
# sort in decending order
sorted_idx = np.argsort(all_scores)[::-1]
n_top = int(max(round(num_box * args.top_ratio), 1))
top_cls= np.unique(all_cls_idx[sorted_idx[:n_top]])
# add bonus scores
if not os.path.isdir(args.output_track):
os.makedirs(args.output_track)
for track_id, track in enumerate(tracks):
scores = track[args.score_key]
scores[:,top_cls] += args.top_bonus
track[args.score_key+'_mcs'] = scores
with open(os.path.join(args.output_track,
'{:06d}.pkl'.format(track_id)), 'wb') as f:
cPickle.dump(track, f, cPickle.HIGHEST_PROTOCOL)
|
|
7017ef29891e979f0cd5c1c397ddf2e393128e85
|
python/ClinicalReportLaunchers/add_variants_to_clinical_report.py
|
python/ClinicalReportLaunchers/add_variants_to_clinical_report.py
|
"""Add variant report variants to a Clinical Report by ID.
"""
import os
import requests
from requests.auth import HTTPBasicAuth
import sys
import json
import argparse
# Load environment variables for request authentication parameters
if "OMICIA_API_PASSWORD" not in os.environ:
sys.exit("OMICIA_API_PASSWORD environment variable missing")
if "OMICIA_API_LOGIN" not in os.environ:
sys.exit("OMICIA_API_LOGIN environment variable missing")
OMICIA_API_LOGIN = os.environ['OMICIA_API_LOGIN']
OMICIA_API_PASSWORD = os.environ['OMICIA_API_PASSWORD']
OMICIA_API_URL = os.environ.get('OMICIA_API_URL', 'https://api.omicia.com')
auth = HTTPBasicAuth(OMICIA_API_LOGIN, OMICIA_API_PASSWORD)
def add_variants_to_cr(cr_id, vmv_ids):
"""Add variants from vmv_base to a clinical report by ID.
"""
# Construct request
url = "{}/reports/{}/add_variants/"
url = url.format(OMICIA_API_URL, cr_id)
# Build the patch payload
url_payload = json.dumps([{"op": "add",
"path": "/variants",
"value": vmv_ids}])
headers = {"content-type": "application/json-patch+json"}
sys.stdout.flush()
result = requests.patch(url, auth=auth, json=url_payload, headers=headers)
return result
def main():
"""Main function. Patch a report variant.
"""
parser = argparse.ArgumentParser(description='Add report variants to an existing clinical report.')
parser.add_argument('cr_id', metavar='clinical_report_id', type=int)
parser.add_argument('variant_ids', metavar='variant_ids', type=str)
args = parser.parse_args()
cr_id = args.cr_id
vmv_base_variant_ids = args.variant_ids
# Take a string of comma-separated vmv ids and make a list out of them
vmv_base_variant_ids = vmv_base_variant_ids.split(",")
response = add_variants_to_cr(cr_id, vmv_base_variant_ids)
try:
sys.stdout.write(response.text)
except KeyError:
sys.stderr.write(response.text)
sys.stdout.write('\n')
if __name__ == "__main__":
main()
|
Add variants to clinical report script
|
Add variants to clinical report script
|
Python
|
mit
|
Omicia/omicia_api_examples,Omicia/omicia_api_examples,Omicia/omicia_api_examples
|
Add variants to clinical report script
|
"""Add variant report variants to a Clinical Report by ID.
"""
import os
import requests
from requests.auth import HTTPBasicAuth
import sys
import json
import argparse
# Load environment variables for request authentication parameters
if "OMICIA_API_PASSWORD" not in os.environ:
sys.exit("OMICIA_API_PASSWORD environment variable missing")
if "OMICIA_API_LOGIN" not in os.environ:
sys.exit("OMICIA_API_LOGIN environment variable missing")
OMICIA_API_LOGIN = os.environ['OMICIA_API_LOGIN']
OMICIA_API_PASSWORD = os.environ['OMICIA_API_PASSWORD']
OMICIA_API_URL = os.environ.get('OMICIA_API_URL', 'https://api.omicia.com')
auth = HTTPBasicAuth(OMICIA_API_LOGIN, OMICIA_API_PASSWORD)
def add_variants_to_cr(cr_id, vmv_ids):
"""Add variants from vmv_base to a clinical report by ID.
"""
# Construct request
url = "{}/reports/{}/add_variants/"
url = url.format(OMICIA_API_URL, cr_id)
# Build the patch payload
url_payload = json.dumps([{"op": "add",
"path": "/variants",
"value": vmv_ids}])
headers = {"content-type": "application/json-patch+json"}
sys.stdout.flush()
result = requests.patch(url, auth=auth, json=url_payload, headers=headers)
return result
def main():
"""Main function. Patch a report variant.
"""
parser = argparse.ArgumentParser(description='Add report variants to an existing clinical report.')
parser.add_argument('cr_id', metavar='clinical_report_id', type=int)
parser.add_argument('variant_ids', metavar='variant_ids', type=str)
args = parser.parse_args()
cr_id = args.cr_id
vmv_base_variant_ids = args.variant_ids
# Take a string of comma-separated vmv ids and make a list out of them
vmv_base_variant_ids = vmv_base_variant_ids.split(",")
response = add_variants_to_cr(cr_id, vmv_base_variant_ids)
try:
sys.stdout.write(response.text)
except KeyError:
sys.stderr.write(response.text)
sys.stdout.write('\n')
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add variants to clinical report script<commit_after>
|
"""Add variant report variants to a Clinical Report by ID.
"""
import os
import requests
from requests.auth import HTTPBasicAuth
import sys
import json
import argparse
# Load environment variables for request authentication parameters
if "OMICIA_API_PASSWORD" not in os.environ:
sys.exit("OMICIA_API_PASSWORD environment variable missing")
if "OMICIA_API_LOGIN" not in os.environ:
sys.exit("OMICIA_API_LOGIN environment variable missing")
OMICIA_API_LOGIN = os.environ['OMICIA_API_LOGIN']
OMICIA_API_PASSWORD = os.environ['OMICIA_API_PASSWORD']
OMICIA_API_URL = os.environ.get('OMICIA_API_URL', 'https://api.omicia.com')
auth = HTTPBasicAuth(OMICIA_API_LOGIN, OMICIA_API_PASSWORD)
def add_variants_to_cr(cr_id, vmv_ids):
"""Add variants from vmv_base to a clinical report by ID.
"""
# Construct request
url = "{}/reports/{}/add_variants/"
url = url.format(OMICIA_API_URL, cr_id)
# Build the patch payload
url_payload = json.dumps([{"op": "add",
"path": "/variants",
"value": vmv_ids}])
headers = {"content-type": "application/json-patch+json"}
sys.stdout.flush()
result = requests.patch(url, auth=auth, json=url_payload, headers=headers)
return result
def main():
"""Main function. Patch a report variant.
"""
parser = argparse.ArgumentParser(description='Add report variants to an existing clinical report.')
parser.add_argument('cr_id', metavar='clinical_report_id', type=int)
parser.add_argument('variant_ids', metavar='variant_ids', type=str)
args = parser.parse_args()
cr_id = args.cr_id
vmv_base_variant_ids = args.variant_ids
# Take a string of comma-separated vmv ids and make a list out of them
vmv_base_variant_ids = vmv_base_variant_ids.split(",")
response = add_variants_to_cr(cr_id, vmv_base_variant_ids)
try:
sys.stdout.write(response.text)
except KeyError:
sys.stderr.write(response.text)
sys.stdout.write('\n')
if __name__ == "__main__":
main()
|
Add variants to clinical report script"""Add variant report variants to a Clinical Report by ID.
"""
import os
import requests
from requests.auth import HTTPBasicAuth
import sys
import json
import argparse
# Load environment variables for request authentication parameters
if "OMICIA_API_PASSWORD" not in os.environ:
sys.exit("OMICIA_API_PASSWORD environment variable missing")
if "OMICIA_API_LOGIN" not in os.environ:
sys.exit("OMICIA_API_LOGIN environment variable missing")
OMICIA_API_LOGIN = os.environ['OMICIA_API_LOGIN']
OMICIA_API_PASSWORD = os.environ['OMICIA_API_PASSWORD']
OMICIA_API_URL = os.environ.get('OMICIA_API_URL', 'https://api.omicia.com')
auth = HTTPBasicAuth(OMICIA_API_LOGIN, OMICIA_API_PASSWORD)
def add_variants_to_cr(cr_id, vmv_ids):
"""Add variants from vmv_base to a clinical report by ID.
"""
# Construct request
url = "{}/reports/{}/add_variants/"
url = url.format(OMICIA_API_URL, cr_id)
# Build the patch payload
url_payload = json.dumps([{"op": "add",
"path": "/variants",
"value": vmv_ids}])
headers = {"content-type": "application/json-patch+json"}
sys.stdout.flush()
result = requests.patch(url, auth=auth, json=url_payload, headers=headers)
return result
def main():
"""Main function. Patch a report variant.
"""
parser = argparse.ArgumentParser(description='Add report variants to an existing clinical report.')
parser.add_argument('cr_id', metavar='clinical_report_id', type=int)
parser.add_argument('variant_ids', metavar='variant_ids', type=str)
args = parser.parse_args()
cr_id = args.cr_id
vmv_base_variant_ids = args.variant_ids
# Take a string of comma-separated vmv ids and make a list out of them
vmv_base_variant_ids = vmv_base_variant_ids.split(",")
response = add_variants_to_cr(cr_id, vmv_base_variant_ids)
try:
sys.stdout.write(response.text)
except KeyError:
sys.stderr.write(response.text)
sys.stdout.write('\n')
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add variants to clinical report script<commit_after>"""Add variant report variants to a Clinical Report by ID.
"""
import os
import requests
from requests.auth import HTTPBasicAuth
import sys
import json
import argparse
# Load environment variables for request authentication parameters
if "OMICIA_API_PASSWORD" not in os.environ:
sys.exit("OMICIA_API_PASSWORD environment variable missing")
if "OMICIA_API_LOGIN" not in os.environ:
sys.exit("OMICIA_API_LOGIN environment variable missing")
OMICIA_API_LOGIN = os.environ['OMICIA_API_LOGIN']
OMICIA_API_PASSWORD = os.environ['OMICIA_API_PASSWORD']
OMICIA_API_URL = os.environ.get('OMICIA_API_URL', 'https://api.omicia.com')
auth = HTTPBasicAuth(OMICIA_API_LOGIN, OMICIA_API_PASSWORD)
def add_variants_to_cr(cr_id, vmv_ids):
"""Add variants from vmv_base to a clinical report by ID.
"""
# Construct request
url = "{}/reports/{}/add_variants/"
url = url.format(OMICIA_API_URL, cr_id)
# Build the patch payload
url_payload = json.dumps([{"op": "add",
"path": "/variants",
"value": vmv_ids}])
headers = {"content-type": "application/json-patch+json"}
sys.stdout.flush()
result = requests.patch(url, auth=auth, json=url_payload, headers=headers)
return result
def main():
"""Main function. Patch a report variant.
"""
parser = argparse.ArgumentParser(description='Add report variants to an existing clinical report.')
parser.add_argument('cr_id', metavar='clinical_report_id', type=int)
parser.add_argument('variant_ids', metavar='variant_ids', type=str)
args = parser.parse_args()
cr_id = args.cr_id
vmv_base_variant_ids = args.variant_ids
# Take a string of comma-separated vmv ids and make a list out of them
vmv_base_variant_ids = vmv_base_variant_ids.split(",")
response = add_variants_to_cr(cr_id, vmv_base_variant_ids)
try:
sys.stdout.write(response.text)
except KeyError:
sys.stderr.write(response.text)
sys.stdout.write('\n')
if __name__ == "__main__":
main()
|
|
1477c3ed2b163001f0427cb6288c4fdd49456930
|
examples/blur_faces_on_webcam.py
|
examples/blur_faces_on_webcam.py
|
import face_recognition
import cv2
# This is a demo of blurring faces in video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Initialize some variables
face_locations = []
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face detection processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(small_frame, model="cnn")
# Display the results
for top, right, bottom, left in face_locations:
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Extract the region of the image that contains the face
face_image = frame[top:bottom, left:right]
# Blur the face image
face_image = cv2.GaussianBlur(face_image, (99, 99), 30)
# Put the blurred face region back into the frame image
frame[top:bottom, left:right] = face_image
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
|
Add an example of automatically blurring faces in images or videos
|
Add an example of automatically blurring faces in images or videos
|
Python
|
mit
|
ageitgey/face_recognition
|
Add an example of automatically blurring faces in images or videos
|
import face_recognition
import cv2
# This is a demo of blurring faces in video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Initialize some variables
face_locations = []
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face detection processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(small_frame, model="cnn")
# Display the results
for top, right, bottom, left in face_locations:
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Extract the region of the image that contains the face
face_image = frame[top:bottom, left:right]
# Blur the face image
face_image = cv2.GaussianBlur(face_image, (99, 99), 30)
# Put the blurred face region back into the frame image
frame[top:bottom, left:right] = face_image
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add an example of automatically blurring faces in images or videos<commit_after>
|
import face_recognition
import cv2
# This is a demo of blurring faces in video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Initialize some variables
face_locations = []
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face detection processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(small_frame, model="cnn")
# Display the results
for top, right, bottom, left in face_locations:
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Extract the region of the image that contains the face
face_image = frame[top:bottom, left:right]
# Blur the face image
face_image = cv2.GaussianBlur(face_image, (99, 99), 30)
# Put the blurred face region back into the frame image
frame[top:bottom, left:right] = face_image
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
|
Add an example of automatically blurring faces in images or videosimport face_recognition
import cv2
# This is a demo of blurring faces in video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Initialize some variables
face_locations = []
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face detection processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(small_frame, model="cnn")
# Display the results
for top, right, bottom, left in face_locations:
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Extract the region of the image that contains the face
face_image = frame[top:bottom, left:right]
# Blur the face image
face_image = cv2.GaussianBlur(face_image, (99, 99), 30)
# Put the blurred face region back into the frame image
frame[top:bottom, left:right] = face_image
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add an example of automatically blurring faces in images or videos<commit_after>import face_recognition
import cv2
# This is a demo of blurring faces in video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Initialize some variables
face_locations = []
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face detection processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(small_frame, model="cnn")
# Display the results
for top, right, bottom, left in face_locations:
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Extract the region of the image that contains the face
face_image = frame[top:bottom, left:right]
# Blur the face image
face_image = cv2.GaussianBlur(face_image, (99, 99), 30)
# Put the blurred face region back into the frame image
frame[top:bottom, left:right] = face_image
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
|
|
a3b046089a4ccc62286c62367847f39d8581fece
|
corehq/apps/sms/management/commands/set_default_dup_number_option.py
|
corehq/apps/sms/management/commands/set_default_dup_number_option.py
|
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from corehq.apps.domain.models import Domain
from corehq.apps.sms.models import SMSLog
class Command(BaseCommand):
args = ""
help = ("Sets a default value for all domains' "
"send_to_duplicated_case_numbers property")
def handle(self, *args, **options):
for domain in Domain.get_all():
count = SMSLog.count_by_domain(domain.name)
if count > 0:
if not domain.send_to_duplicated_case_numbers:
# if not True, explicitly set to False
print "Setting %s to False" % domain.name
domain.send_to_duplicated_case_numbers = False
domain.save()
else:
print "Setting %s to True" % domain.name
domain.send_to_duplicated_case_numbers = True
domain.save()
|
Add management command for setting default value of send to dup number option
|
Add management command for setting default value of send to dup number option
|
Python
|
bsd-3-clause
|
qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq
|
Add management command for setting default value of send to dup number option
|
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from corehq.apps.domain.models import Domain
from corehq.apps.sms.models import SMSLog
class Command(BaseCommand):
args = ""
help = ("Sets a default value for all domains' "
"send_to_duplicated_case_numbers property")
def handle(self, *args, **options):
for domain in Domain.get_all():
count = SMSLog.count_by_domain(domain.name)
if count > 0:
if not domain.send_to_duplicated_case_numbers:
# if not True, explicitly set to False
print "Setting %s to False" % domain.name
domain.send_to_duplicated_case_numbers = False
domain.save()
else:
print "Setting %s to True" % domain.name
domain.send_to_duplicated_case_numbers = True
domain.save()
|
<commit_before><commit_msg>Add management command for setting default value of send to dup number option<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from corehq.apps.domain.models import Domain
from corehq.apps.sms.models import SMSLog
class Command(BaseCommand):
args = ""
help = ("Sets a default value for all domains' "
"send_to_duplicated_case_numbers property")
def handle(self, *args, **options):
for domain in Domain.get_all():
count = SMSLog.count_by_domain(domain.name)
if count > 0:
if not domain.send_to_duplicated_case_numbers:
# if not True, explicitly set to False
print "Setting %s to False" % domain.name
domain.send_to_duplicated_case_numbers = False
domain.save()
else:
print "Setting %s to True" % domain.name
domain.send_to_duplicated_case_numbers = True
domain.save()
|
Add management command for setting default value of send to dup number optionfrom django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from corehq.apps.domain.models import Domain
from corehq.apps.sms.models import SMSLog
class Command(BaseCommand):
args = ""
help = ("Sets a default value for all domains' "
"send_to_duplicated_case_numbers property")
def handle(self, *args, **options):
for domain in Domain.get_all():
count = SMSLog.count_by_domain(domain.name)
if count > 0:
if not domain.send_to_duplicated_case_numbers:
# if not True, explicitly set to False
print "Setting %s to False" % domain.name
domain.send_to_duplicated_case_numbers = False
domain.save()
else:
print "Setting %s to True" % domain.name
domain.send_to_duplicated_case_numbers = True
domain.save()
|
<commit_before><commit_msg>Add management command for setting default value of send to dup number option<commit_after>from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from corehq.apps.domain.models import Domain
from corehq.apps.sms.models import SMSLog
class Command(BaseCommand):
args = ""
help = ("Sets a default value for all domains' "
"send_to_duplicated_case_numbers property")
def handle(self, *args, **options):
for domain in Domain.get_all():
count = SMSLog.count_by_domain(domain.name)
if count > 0:
if not domain.send_to_duplicated_case_numbers:
# if not True, explicitly set to False
print "Setting %s to False" % domain.name
domain.send_to_duplicated_case_numbers = False
domain.save()
else:
print "Setting %s to True" % domain.name
domain.send_to_duplicated_case_numbers = True
domain.save()
|
|
3aeca09940f5e42a45df55efc2ac5df0cdd291e4
|
flexx/ui/examples/stayin_alive.py
|
flexx/ui/examples/stayin_alive.py
|
"""
Example that demonstrates/tests how objects survive synchronisation
jitter even if Python and JS are busy. We tried hard to make this
as painless as possible in Flexx, which is why this example may look
a bit dull. But the fact that this works is not trivial :)
What happens is that the ``the_thing`` property is set in the
``init()``. This will sync to JS, and then back again to Py (syncing
always ends at the Python side, for eventual synchronicity). However,
in the mean time, Python has set the property again, so by the time
that the prop gets synced back to Python, the first Thing is not there
anymore, and would be deleted (if we had not taken measures to prevent
that), which would cause problems.
"""
import time
from flexx import app, event, ui
class Thing(app.Model):
@event.prop
def value(self, v):
return v
class Example(ui.Widget):
def init(self):
self.the_thing = Thing(value=2)
@event.prop
def foo(self, v=0):
print('in foo setter')
return v
@event.connect('the_thing')
def on_the_thing(self, *events):
for ev in events:
print('the thing became %s with value %s' %
(ev.new_value.id, ev.new_value.value))
@event.connect('foo')
def on_foo(self, *events):
print('sleep in Py')
time.sleep(10)
print('Done sleeping in Py')
class Both:
@event.prop
def the_thing(self, v):
assert isinstance(v, Thing)
return v
class JS:
def init(self):
print('sleep in JS')
self.sleep(10)
print('Done sleeping in JS')
def sleep(self, t):
import time
etime = time.time() + t
while time.time() < etime:
pass
m = app.launch(Example)
with m:
m.the_thing = Thing(value=3)
print('starting event loop')
app.run()
|
Add example to test keep alive
|
Add example to test keep alive
|
Python
|
bsd-2-clause
|
JohnLunzer/flexx,JohnLunzer/flexx,zoofIO/flexx,JohnLunzer/flexx,zoofIO/flexx,jrversteegh/flexx,jrversteegh/flexx
|
Add example to test keep alive
|
"""
Example that demonstrates/tests how objects survive synchronisation
jitter even if Python and JS are busy. We tried hard to make this
as painless as possible in Flexx, which is why this example may look
a bit dull. But the fact that this works is not trivial :)
What happens is that the ``the_thing`` property is set in the
``init()``. This will sync to JS, and then back again to Py (syncing
always ends at the Python side, for eventual synchronicity). However,
in the mean time, Python has set the property again, so by the time
that the prop gets synced back to Python, the first Thing is not there
anymore, and would be deleted (if we had not taken measures to prevent
that), which would cause problems.
"""
import time
from flexx import app, event, ui
class Thing(app.Model):
@event.prop
def value(self, v):
return v
class Example(ui.Widget):
def init(self):
self.the_thing = Thing(value=2)
@event.prop
def foo(self, v=0):
print('in foo setter')
return v
@event.connect('the_thing')
def on_the_thing(self, *events):
for ev in events:
print('the thing became %s with value %s' %
(ev.new_value.id, ev.new_value.value))
@event.connect('foo')
def on_foo(self, *events):
print('sleep in Py')
time.sleep(10)
print('Done sleeping in Py')
class Both:
@event.prop
def the_thing(self, v):
assert isinstance(v, Thing)
return v
class JS:
def init(self):
print('sleep in JS')
self.sleep(10)
print('Done sleeping in JS')
def sleep(self, t):
import time
etime = time.time() + t
while time.time() < etime:
pass
m = app.launch(Example)
with m:
m.the_thing = Thing(value=3)
print('starting event loop')
app.run()
|
<commit_before><commit_msg>Add example to test keep alive<commit_after>
|
"""
Example that demonstrates/tests how objects survive synchronisation
jitter even if Python and JS are busy. We tried hard to make this
as painless as possible in Flexx, which is why this example may look
a bit dull. But the fact that this works is not trivial :)
What happens is that the ``the_thing`` property is set in the
``init()``. This will sync to JS, and then back again to Py (syncing
always ends at the Python side, for eventual synchronicity). However,
in the mean time, Python has set the property again, so by the time
that the prop gets synced back to Python, the first Thing is not there
anymore, and would be deleted (if we had not taken measures to prevent
that), which would cause problems.
"""
import time
from flexx import app, event, ui
class Thing(app.Model):
@event.prop
def value(self, v):
return v
class Example(ui.Widget):
def init(self):
self.the_thing = Thing(value=2)
@event.prop
def foo(self, v=0):
print('in foo setter')
return v
@event.connect('the_thing')
def on_the_thing(self, *events):
for ev in events:
print('the thing became %s with value %s' %
(ev.new_value.id, ev.new_value.value))
@event.connect('foo')
def on_foo(self, *events):
print('sleep in Py')
time.sleep(10)
print('Done sleeping in Py')
class Both:
@event.prop
def the_thing(self, v):
assert isinstance(v, Thing)
return v
class JS:
def init(self):
print('sleep in JS')
self.sleep(10)
print('Done sleeping in JS')
def sleep(self, t):
import time
etime = time.time() + t
while time.time() < etime:
pass
m = app.launch(Example)
with m:
m.the_thing = Thing(value=3)
print('starting event loop')
app.run()
|
Add example to test keep alive"""
Example that demonstrates/tests how objects survive synchronisation
jitter even if Python and JS are busy. We tried hard to make this
as painless as possible in Flexx, which is why this example may look
a bit dull. But the fact that this works is not trivial :)
What happens is that the ``the_thing`` property is set in the
``init()``. This will sync to JS, and then back again to Py (syncing
always ends at the Python side, for eventual synchronicity). However,
in the mean time, Python has set the property again, so by the time
that the prop gets synced back to Python, the first Thing is not there
anymore, and would be deleted (if we had not taken measures to prevent
that), which would cause problems.
"""
import time
from flexx import app, event, ui
class Thing(app.Model):
@event.prop
def value(self, v):
return v
class Example(ui.Widget):
def init(self):
self.the_thing = Thing(value=2)
@event.prop
def foo(self, v=0):
print('in foo setter')
return v
@event.connect('the_thing')
def on_the_thing(self, *events):
for ev in events:
print('the thing became %s with value %s' %
(ev.new_value.id, ev.new_value.value))
@event.connect('foo')
def on_foo(self, *events):
print('sleep in Py')
time.sleep(10)
print('Done sleeping in Py')
class Both:
@event.prop
def the_thing(self, v):
assert isinstance(v, Thing)
return v
class JS:
def init(self):
print('sleep in JS')
self.sleep(10)
print('Done sleeping in JS')
def sleep(self, t):
import time
etime = time.time() + t
while time.time() < etime:
pass
m = app.launch(Example)
with m:
m.the_thing = Thing(value=3)
print('starting event loop')
app.run()
|
<commit_before><commit_msg>Add example to test keep alive<commit_after>"""
Example that demonstrates/tests how objects survive synchronisation
jitter even if Python and JS are busy. We tried hard to make this
as painless as possible in Flexx, which is why this example may look
a bit dull. But the fact that this works is not trivial :)
What happens is that the ``the_thing`` property is set in the
``init()``. This will sync to JS, and then back again to Py (syncing
always ends at the Python side, for eventual synchronicity). However,
in the mean time, Python has set the property again, so by the time
that the prop gets synced back to Python, the first Thing is not there
anymore, and would be deleted (if we had not taken measures to prevent
that), which would cause problems.
"""
import time
from flexx import app, event, ui
class Thing(app.Model):
@event.prop
def value(self, v):
return v
class Example(ui.Widget):
def init(self):
self.the_thing = Thing(value=2)
@event.prop
def foo(self, v=0):
print('in foo setter')
return v
@event.connect('the_thing')
def on_the_thing(self, *events):
for ev in events:
print('the thing became %s with value %s' %
(ev.new_value.id, ev.new_value.value))
@event.connect('foo')
def on_foo(self, *events):
print('sleep in Py')
time.sleep(10)
print('Done sleeping in Py')
class Both:
@event.prop
def the_thing(self, v):
assert isinstance(v, Thing)
return v
class JS:
def init(self):
print('sleep in JS')
self.sleep(10)
print('Done sleeping in JS')
def sleep(self, t):
import time
etime = time.time() + t
while time.time() < etime:
pass
m = app.launch(Example)
with m:
m.the_thing = Thing(value=3)
print('starting event loop')
app.run()
|
|
5ada28bf4eeda038fa1a930dba94cbd00f42a69e
|
two_pass_example.py
|
two_pass_example.py
|
#!/usr/bin/env python3
#
# Generates two heat maps from the same data and composites them together.
#
# Copyright 2022 Seth Golub http://www.sethoscope.net/heatmap/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import logging
import sys
import heatmap as hm
import PIL
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
# This is just an example of doing two heatmaps and compositing the
# results, all in one python process. Some of the config is here in
# code, and some needs to be passed on the command line, which is not
# ideal. It could all be moved to new command line arguments with
# defaults, or could all get put into code.
#
# If run with these arguments:
#
# -W 1200 -o out.png -P equirectangular test-data/graffiti.coords
#
# it produces output similar to the graffiti example on the website.
def main():
logging.basicConfig(format='%(relativeCreated)8d ms // %(message)s')
config = hm.Configuration(use_defaults=False)
parser = config.argparser
parser.description = 'make a 2-pass composite heatmap'
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
config.set_from_options(args)
config.fill_missing()
logging.debug('python version %s' % str(sys.version))
config.decay = 0.3
config.kernel = hm.LinearKernel(5)
config.background = None
matrix = hm.process_shapes(config)
matrix = matrix.finalized()
image1 = hm.ImageMaker(config).make_image(matrix)
config.decay = 0.95
config.kernel = hm.LinearKernel(30)
config.background = 'black'
matrix = hm.process_shapes(config)
matrix = matrix.finalized()
image2 = hm.ImageMaker(config).make_image(matrix)
image2.putalpha(255)
logging.debug(f'image2.mode: {image2.mode}')
image = PIL.Image.alpha_composite(image2, image1)
image.save(args.output)
if __name__ == '__main__':
main()
|
Add example of doing a 2-pass composite heatmap in Python.
|
Add example of doing a 2-pass composite heatmap in Python.
|
Python
|
agpl-3.0
|
sethoscope/heatmap,sethoscope/heatmap
|
Add example of doing a 2-pass composite heatmap in Python.
|
#!/usr/bin/env python3
#
# Generates two heat maps from the same data and composites them together.
#
# Copyright 2022 Seth Golub http://www.sethoscope.net/heatmap/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import logging
import sys
import heatmap as hm
import PIL
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
# This is just an example of doing two heatmaps and compositing the
# results, all in one python process. Some of the config is here in
# code, and some needs to be passed on the command line, which is not
# ideal. It could all be moved to new command line arguments with
# defaults, or could all get put into code.
#
# If run with these arguments:
#
# -W 1200 -o out.png -P equirectangular test-data/graffiti.coords
#
# it produces output similar to the graffiti example on the website.
def main():
logging.basicConfig(format='%(relativeCreated)8d ms // %(message)s')
config = hm.Configuration(use_defaults=False)
parser = config.argparser
parser.description = 'make a 2-pass composite heatmap'
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
config.set_from_options(args)
config.fill_missing()
logging.debug('python version %s' % str(sys.version))
config.decay = 0.3
config.kernel = hm.LinearKernel(5)
config.background = None
matrix = hm.process_shapes(config)
matrix = matrix.finalized()
image1 = hm.ImageMaker(config).make_image(matrix)
config.decay = 0.95
config.kernel = hm.LinearKernel(30)
config.background = 'black'
matrix = hm.process_shapes(config)
matrix = matrix.finalized()
image2 = hm.ImageMaker(config).make_image(matrix)
image2.putalpha(255)
logging.debug(f'image2.mode: {image2.mode}')
image = PIL.Image.alpha_composite(image2, image1)
image.save(args.output)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example of doing a 2-pass composite heatmap in Python.<commit_after>
|
#!/usr/bin/env python3
#
# Generates two heat maps from the same data and composites them together.
#
# Copyright 2022 Seth Golub http://www.sethoscope.net/heatmap/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import logging
import sys
import heatmap as hm
import PIL
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
# This is just an example of doing two heatmaps and compositing the
# results, all in one python process. Some of the config is here in
# code, and some needs to be passed on the command line, which is not
# ideal. It could all be moved to new command line arguments with
# defaults, or could all get put into code.
#
# If run with these arguments:
#
# -W 1200 -o out.png -P equirectangular test-data/graffiti.coords
#
# it produces output similar to the graffiti example on the website.
def main():
logging.basicConfig(format='%(relativeCreated)8d ms // %(message)s')
config = hm.Configuration(use_defaults=False)
parser = config.argparser
parser.description = 'make a 2-pass composite heatmap'
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
config.set_from_options(args)
config.fill_missing()
logging.debug('python version %s' % str(sys.version))
config.decay = 0.3
config.kernel = hm.LinearKernel(5)
config.background = None
matrix = hm.process_shapes(config)
matrix = matrix.finalized()
image1 = hm.ImageMaker(config).make_image(matrix)
config.decay = 0.95
config.kernel = hm.LinearKernel(30)
config.background = 'black'
matrix = hm.process_shapes(config)
matrix = matrix.finalized()
image2 = hm.ImageMaker(config).make_image(matrix)
image2.putalpha(255)
logging.debug(f'image2.mode: {image2.mode}')
image = PIL.Image.alpha_composite(image2, image1)
image.save(args.output)
if __name__ == '__main__':
main()
|
Add example of doing a 2-pass composite heatmap in Python.#!/usr/bin/env python3
#
# Generates two heat maps from the same data and composites them together.
#
# Copyright 2022 Seth Golub http://www.sethoscope.net/heatmap/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import logging
import sys
import heatmap as hm
import PIL
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
# This is just an example of doing two heatmaps and compositing the
# results, all in one python process. Some of the config is here in
# code, and some needs to be passed on the command line, which is not
# ideal. It could all be moved to new command line arguments with
# defaults, or could all get put into code.
#
# If run with these arguments:
#
# -W 1200 -o out.png -P equirectangular test-data/graffiti.coords
#
# it produces output similar to the graffiti example on the website.
def main():
logging.basicConfig(format='%(relativeCreated)8d ms // %(message)s')
config = hm.Configuration(use_defaults=False)
parser = config.argparser
parser.description = 'make a 2-pass composite heatmap'
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
config.set_from_options(args)
config.fill_missing()
logging.debug('python version %s' % str(sys.version))
config.decay = 0.3
config.kernel = hm.LinearKernel(5)
config.background = None
matrix = hm.process_shapes(config)
matrix = matrix.finalized()
image1 = hm.ImageMaker(config).make_image(matrix)
config.decay = 0.95
config.kernel = hm.LinearKernel(30)
config.background = 'black'
matrix = hm.process_shapes(config)
matrix = matrix.finalized()
image2 = hm.ImageMaker(config).make_image(matrix)
image2.putalpha(255)
logging.debug(f'image2.mode: {image2.mode}')
image = PIL.Image.alpha_composite(image2, image1)
image.save(args.output)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example of doing a 2-pass composite heatmap in Python.<commit_after>#!/usr/bin/env python3
#
# Generates two heat maps from the same data and composites them together.
#
# Copyright 2022 Seth Golub http://www.sethoscope.net/heatmap/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import logging
import sys
import heatmap as hm
import PIL
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
# This is just an example of doing two heatmaps and compositing the
# results, all in one python process. Some of the config is here in
# code, and some needs to be passed on the command line, which is not
# ideal. It could all be moved to new command line arguments with
# defaults, or could all get put into code.
#
# If run with these arguments:
#
# -W 1200 -o out.png -P equirectangular test-data/graffiti.coords
#
# it produces output similar to the graffiti example on the website.
def main():
logging.basicConfig(format='%(relativeCreated)8d ms // %(message)s')
config = hm.Configuration(use_defaults=False)
parser = config.argparser
parser.description = 'make a 2-pass composite heatmap'
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
config.set_from_options(args)
config.fill_missing()
logging.debug('python version %s' % str(sys.version))
config.decay = 0.3
config.kernel = hm.LinearKernel(5)
config.background = None
matrix = hm.process_shapes(config)
matrix = matrix.finalized()
image1 = hm.ImageMaker(config).make_image(matrix)
config.decay = 0.95
config.kernel = hm.LinearKernel(30)
config.background = 'black'
matrix = hm.process_shapes(config)
matrix = matrix.finalized()
image2 = hm.ImageMaker(config).make_image(matrix)
image2.putalpha(255)
logging.debug(f'image2.mode: {image2.mode}')
image = PIL.Image.alpha_composite(image2, image1)
image.save(args.output)
if __name__ == '__main__':
main()
|
|
85b81a3b4ae5b62b8cbf5993445385531a48c530
|
Regression/DecisionTreeRegression/regularDecisionTreeRegression.py
|
Regression/DecisionTreeRegression/regularDecisionTreeRegression.py
|
# -*- coding: utf-8 -*-
"""Decision Tree regression for machine learning.
Decision tree builds regression or classification models in the form of a tree
structure. It brakes down a dataset into smaller and smaller subsets while at
the same time an associated decision tree is incrementally developed. The final
result is a tree with decision nodes and leaf nodes.
Example:
$ python regularDecisionTreeRegression.py
Todo:
*
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
# from sklearn.preprocessing import StandardScaler
# from sklearn.model_selection import train_test_split
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
features = dataset.iloc[:, 1:2].values
labels = dataset.iloc[:, 2].values
# Splitting the Dataset into a Training set and a Test set
"""feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)"""
# Feature scaling, normalize scale is important. Especially on algorithms
# involving euclidian distance. Two main feature scaling formulas are:
# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))
# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))
"""sc_feature = StandardScaler()
feature_train = sc_feature.fit_transform(feature_train)
feature_test = sc_feature.transform(feature_test)
sc_labels = StandardScaler()
labels_train = sc_labels.fit_transform(labels_train)
labels_test = sc_labels.transform(labels_test)
"""
# Fit the Decision Tree Regression to the dataset
regressor = DecisionTreeRegressor(random_state=0) # random_state for debug
regressor.fit(features, labels)
# Predict new result with the Decision Tree Regression
y_pred = regressor.predict(6.5)
# Visualising the regression results with higher resolution
x_grid = np.arange(min(features), max(features), 0.1)
x_grid = x_grid.reshape((len(x_grid), 1))
plt.scatter(features, labels, color='r')
plt.plot(x_grid, regressor.predict(x_grid), color='b')
plt.title('Truth or Bluff (Decision Tree Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
Add Decision Tree Regression in Python
|
Add Decision Tree Regression in Python
|
Python
|
mit
|
a-holm/MachinelearningAlgorithms,a-holm/MachinelearningAlgorithms
|
Add Decision Tree Regression in Python
|
# -*- coding: utf-8 -*-
"""Decision Tree regression for machine learning.
Decision tree builds regression or classification models in the form of a tree
structure. It brakes down a dataset into smaller and smaller subsets while at
the same time an associated decision tree is incrementally developed. The final
result is a tree with decision nodes and leaf nodes.
Example:
$ python regularDecisionTreeRegression.py
Todo:
*
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
# from sklearn.preprocessing import StandardScaler
# from sklearn.model_selection import train_test_split
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
features = dataset.iloc[:, 1:2].values
labels = dataset.iloc[:, 2].values
# Splitting the Dataset into a Training set and a Test set
"""feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)"""
# Feature scaling, normalize scale is important. Especially on algorithms
# involving euclidian distance. Two main feature scaling formulas are:
# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))
# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))
"""sc_feature = StandardScaler()
feature_train = sc_feature.fit_transform(feature_train)
feature_test = sc_feature.transform(feature_test)
sc_labels = StandardScaler()
labels_train = sc_labels.fit_transform(labels_train)
labels_test = sc_labels.transform(labels_test)
"""
# Fit the Decision Tree Regression to the dataset
regressor = DecisionTreeRegressor(random_state=0) # random_state for debug
regressor.fit(features, labels)
# Predict new result with the Decision Tree Regression
y_pred = regressor.predict(6.5)
# Visualising the regression results with higher resolution
x_grid = np.arange(min(features), max(features), 0.1)
x_grid = x_grid.reshape((len(x_grid), 1))
plt.scatter(features, labels, color='r')
plt.plot(x_grid, regressor.predict(x_grid), color='b')
plt.title('Truth or Bluff (Decision Tree Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
<commit_before><commit_msg>Add Decision Tree Regression in Python<commit_after>
|
# -*- coding: utf-8 -*-
"""Decision Tree regression for machine learning.
Decision tree builds regression or classification models in the form of a tree
structure. It brakes down a dataset into smaller and smaller subsets while at
the same time an associated decision tree is incrementally developed. The final
result is a tree with decision nodes and leaf nodes.
Example:
$ python regularDecisionTreeRegression.py
Todo:
*
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
# from sklearn.preprocessing import StandardScaler
# from sklearn.model_selection import train_test_split
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
features = dataset.iloc[:, 1:2].values
labels = dataset.iloc[:, 2].values
# Splitting the Dataset into a Training set and a Test set
"""feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)"""
# Feature scaling, normalize scale is important. Especially on algorithms
# involving euclidian distance. Two main feature scaling formulas are:
# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))
# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))
"""sc_feature = StandardScaler()
feature_train = sc_feature.fit_transform(feature_train)
feature_test = sc_feature.transform(feature_test)
sc_labels = StandardScaler()
labels_train = sc_labels.fit_transform(labels_train)
labels_test = sc_labels.transform(labels_test)
"""
# Fit the Decision Tree Regression to the dataset
regressor = DecisionTreeRegressor(random_state=0) # random_state for debug
regressor.fit(features, labels)
# Predict new result with the Decision Tree Regression
y_pred = regressor.predict(6.5)
# Visualising the regression results with higher resolution
x_grid = np.arange(min(features), max(features), 0.1)
x_grid = x_grid.reshape((len(x_grid), 1))
plt.scatter(features, labels, color='r')
plt.plot(x_grid, regressor.predict(x_grid), color='b')
plt.title('Truth or Bluff (Decision Tree Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
Add Decision Tree Regression in Python# -*- coding: utf-8 -*-
"""Decision Tree regression for machine learning.
Decision tree builds regression or classification models in the form of a tree
structure. It brakes down a dataset into smaller and smaller subsets while at
the same time an associated decision tree is incrementally developed. The final
result is a tree with decision nodes and leaf nodes.
Example:
$ python regularDecisionTreeRegression.py
Todo:
*
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
# from sklearn.preprocessing import StandardScaler
# from sklearn.model_selection import train_test_split
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
features = dataset.iloc[:, 1:2].values
labels = dataset.iloc[:, 2].values
# Splitting the Dataset into a Training set and a Test set
"""feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)"""
# Feature scaling, normalize scale is important. Especially on algorithms
# involving euclidian distance. Two main feature scaling formulas are:
# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))
# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))
"""sc_feature = StandardScaler()
feature_train = sc_feature.fit_transform(feature_train)
feature_test = sc_feature.transform(feature_test)
sc_labels = StandardScaler()
labels_train = sc_labels.fit_transform(labels_train)
labels_test = sc_labels.transform(labels_test)
"""
# Fit the Decision Tree Regression to the dataset
regressor = DecisionTreeRegressor(random_state=0) # random_state for debug
regressor.fit(features, labels)
# Predict new result with the Decision Tree Regression
y_pred = regressor.predict(6.5)
# Visualising the regression results with higher resolution
x_grid = np.arange(min(features), max(features), 0.1)
x_grid = x_grid.reshape((len(x_grid), 1))
plt.scatter(features, labels, color='r')
plt.plot(x_grid, regressor.predict(x_grid), color='b')
plt.title('Truth or Bluff (Decision Tree Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
<commit_before><commit_msg>Add Decision Tree Regression in Python<commit_after># -*- coding: utf-8 -*-
"""Decision Tree regression for machine learning.
Decision tree builds regression or classification models in the form of a tree
structure. It brakes down a dataset into smaller and smaller subsets while at
the same time an associated decision tree is incrementally developed. The final
result is a tree with decision nodes and leaf nodes.
Example:
$ python regularDecisionTreeRegression.py
Todo:
*
"""
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
# from sklearn.preprocessing import StandardScaler
# from sklearn.model_selection import train_test_split
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
features = dataset.iloc[:, 1:2].values
labels = dataset.iloc[:, 2].values
# Splitting the Dataset into a Training set and a Test set
"""feature_train, feature_test, label_train, label_test = train_test_split(
features, labels, test_size=0.2)"""
# Feature scaling, normalize scale is important. Especially on algorithms
# involving euclidian distance. Two main feature scaling formulas are:
# Standardisation: x_stand = (x-mean(x))/(standard_deviation(x))
# Normalisation: x_norm = (x-min(x))/(max(x)-min(x))
"""sc_feature = StandardScaler()
feature_train = sc_feature.fit_transform(feature_train)
feature_test = sc_feature.transform(feature_test)
sc_labels = StandardScaler()
labels_train = sc_labels.fit_transform(labels_train)
labels_test = sc_labels.transform(labels_test)
"""
# Fit the Decision Tree Regression to the dataset
regressor = DecisionTreeRegressor(random_state=0) # random_state for debug
regressor.fit(features, labels)
# Predict new result with the Decision Tree Regression
y_pred = regressor.predict(6.5)
# Visualising the regression results with higher resolution
x_grid = np.arange(min(features), max(features), 0.1)
x_grid = x_grid.reshape((len(x_grid), 1))
plt.scatter(features, labels, color='r')
plt.plot(x_grid, regressor.predict(x_grid), color='b')
plt.title('Truth or Bluff (Decision Tree Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
|
4f6968888f12d3e8bba407f6701b06c357d1130b
|
openfisca_core/scripts/xml_to_json/xml_to_json_country_template.py
|
openfisca_core/scripts/xml_to_json/xml_to_json_country_template.py
|
# -*- coding: utf-8 -*-
''' xml_to_json_country_template.py : Parse XML parameter files for Country-Template and convert them to YAML files. Comments are NOT transformed.
Usage :
`python xml_to_json_country_template.py output_dir`
or just (output is written in a directory called `yaml_parameters`):
`python xml_to_json_country_template.py`
'''
import sys
import os
from openfisca_country_template import CountryTaxBenefitSystem, COUNTRY_DIR
from openfisca_core.scripts.xml_to_json import xml_to_json
tax_benefit_system = CountryTaxBenefitSystem()
if len(sys.argv) > 1:
target_path = sys.argv[1]
else:
target_path = 'yaml_parameters'
param_dir = os.path.join(COUNTRY_DIR, 'parameters')
param_files = [
'__root__.xml',
'benefits.xml',
'general.xml',
'taxes.xml',
]
legislation_xml_info_list = [
(os.path.join(param_dir, param_file), [])
for param_file in param_files
]
xml_to_json.write_legislation(legislation_xml_info_list, target_path)
|
Add script to transform CountryTemplate legilation
|
Add script to transform CountryTemplate legilation
|
Python
|
agpl-3.0
|
openfisca/openfisca-core,openfisca/openfisca-core
|
Add script to transform CountryTemplate legilation
|
# -*- coding: utf-8 -*-
''' xml_to_json_country_template.py : Parse XML parameter files for Country-Template and convert them to YAML files. Comments are NOT transformed.
Usage :
`python xml_to_json_country_template.py output_dir`
or just (output is written in a directory called `yaml_parameters`):
`python xml_to_json_country_template.py`
'''
import sys
import os
from openfisca_country_template import CountryTaxBenefitSystem, COUNTRY_DIR
from openfisca_core.scripts.xml_to_json import xml_to_json
tax_benefit_system = CountryTaxBenefitSystem()
if len(sys.argv) > 1:
target_path = sys.argv[1]
else:
target_path = 'yaml_parameters'
param_dir = os.path.join(COUNTRY_DIR, 'parameters')
param_files = [
'__root__.xml',
'benefits.xml',
'general.xml',
'taxes.xml',
]
legislation_xml_info_list = [
(os.path.join(param_dir, param_file), [])
for param_file in param_files
]
xml_to_json.write_legislation(legislation_xml_info_list, target_path)
|
<commit_before><commit_msg>Add script to transform CountryTemplate legilation<commit_after>
|
# -*- coding: utf-8 -*-
''' xml_to_json_country_template.py : Parse XML parameter files for Country-Template and convert them to YAML files. Comments are NOT transformed.
Usage :
`python xml_to_json_country_template.py output_dir`
or just (output is written in a directory called `yaml_parameters`):
`python xml_to_json_country_template.py`
'''
import sys
import os
from openfisca_country_template import CountryTaxBenefitSystem, COUNTRY_DIR
from openfisca_core.scripts.xml_to_json import xml_to_json
tax_benefit_system = CountryTaxBenefitSystem()
if len(sys.argv) > 1:
target_path = sys.argv[1]
else:
target_path = 'yaml_parameters'
param_dir = os.path.join(COUNTRY_DIR, 'parameters')
param_files = [
'__root__.xml',
'benefits.xml',
'general.xml',
'taxes.xml',
]
legislation_xml_info_list = [
(os.path.join(param_dir, param_file), [])
for param_file in param_files
]
xml_to_json.write_legislation(legislation_xml_info_list, target_path)
|
Add script to transform CountryTemplate legilation# -*- coding: utf-8 -*-
''' xml_to_json_country_template.py : Parse XML parameter files for Country-Template and convert them to YAML files. Comments are NOT transformed.
Usage :
`python xml_to_json_country_template.py output_dir`
or just (output is written in a directory called `yaml_parameters`):
`python xml_to_json_country_template.py`
'''
import sys
import os
from openfisca_country_template import CountryTaxBenefitSystem, COUNTRY_DIR
from openfisca_core.scripts.xml_to_json import xml_to_json
tax_benefit_system = CountryTaxBenefitSystem()
if len(sys.argv) > 1:
target_path = sys.argv[1]
else:
target_path = 'yaml_parameters'
param_dir = os.path.join(COUNTRY_DIR, 'parameters')
param_files = [
'__root__.xml',
'benefits.xml',
'general.xml',
'taxes.xml',
]
legislation_xml_info_list = [
(os.path.join(param_dir, param_file), [])
for param_file in param_files
]
xml_to_json.write_legislation(legislation_xml_info_list, target_path)
|
<commit_before><commit_msg>Add script to transform CountryTemplate legilation<commit_after># -*- coding: utf-8 -*-
''' xml_to_json_country_template.py : Parse XML parameter files for Country-Template and convert them to YAML files. Comments are NOT transformed.
Usage :
`python xml_to_json_country_template.py output_dir`
or just (output is written in a directory called `yaml_parameters`):
`python xml_to_json_country_template.py`
'''
import sys
import os
from openfisca_country_template import CountryTaxBenefitSystem, COUNTRY_DIR
from openfisca_core.scripts.xml_to_json import xml_to_json
tax_benefit_system = CountryTaxBenefitSystem()
if len(sys.argv) > 1:
target_path = sys.argv[1]
else:
target_path = 'yaml_parameters'
param_dir = os.path.join(COUNTRY_DIR, 'parameters')
param_files = [
'__root__.xml',
'benefits.xml',
'general.xml',
'taxes.xml',
]
legislation_xml_info_list = [
(os.path.join(param_dir, param_file), [])
for param_file in param_files
]
xml_to_json.write_legislation(legislation_xml_info_list, target_path)
|
|
6a04fd060d400e062db7d7233b614caf3bb1aa44
|
misc_scripts/create_civic_vcf.py
|
misc_scripts/create_civic_vcf.py
|
from civicpy import civic
from civicpy.exports import VCFWriter
import argparse
parser = argparse.ArgumentParser(description='Create CIViC VCF')
parser.add_argument('vcf_path')
args = parser.parse_args()
with open(args.vcf_path, "w") as fh:
writer = VCFWriter(fh)
for variant in civic.get_all_variants():
if variant.coordinates.chromosome and variant.coordinates.start and variant.coordinates.reference_bases and variant.coordinates.variant_bases:
try:
writer.addrecord(variant)
except (ValueError,NotImplementedError):
pass
writer.writerecords()
|
Add script to write CIViC VCF
|
Add script to write CIViC VCF
|
Python
|
mit
|
genome/civic-server,genome/civic-server,genome/civic-server,genome/civic-server,genome/civic-server
|
Add script to write CIViC VCF
|
from civicpy import civic
from civicpy.exports import VCFWriter
import argparse
parser = argparse.ArgumentParser(description='Create CIViC VCF')
parser.add_argument('vcf_path')
args = parser.parse_args()
with open(args.vcf_path, "w") as fh:
writer = VCFWriter(fh)
for variant in civic.get_all_variants():
if variant.coordinates.chromosome and variant.coordinates.start and variant.coordinates.reference_bases and variant.coordinates.variant_bases:
try:
writer.addrecord(variant)
except (ValueError,NotImplementedError):
pass
writer.writerecords()
|
<commit_before><commit_msg>Add script to write CIViC VCF<commit_after>
|
from civicpy import civic
from civicpy.exports import VCFWriter
import argparse
parser = argparse.ArgumentParser(description='Create CIViC VCF')
parser.add_argument('vcf_path')
args = parser.parse_args()
with open(args.vcf_path, "w") as fh:
writer = VCFWriter(fh)
for variant in civic.get_all_variants():
if variant.coordinates.chromosome and variant.coordinates.start and variant.coordinates.reference_bases and variant.coordinates.variant_bases:
try:
writer.addrecord(variant)
except (ValueError,NotImplementedError):
pass
writer.writerecords()
|
Add script to write CIViC VCFfrom civicpy import civic
from civicpy.exports import VCFWriter
import argparse
parser = argparse.ArgumentParser(description='Create CIViC VCF')
parser.add_argument('vcf_path')
args = parser.parse_args()
with open(args.vcf_path, "w") as fh:
writer = VCFWriter(fh)
for variant in civic.get_all_variants():
if variant.coordinates.chromosome and variant.coordinates.start and variant.coordinates.reference_bases and variant.coordinates.variant_bases:
try:
writer.addrecord(variant)
except (ValueError,NotImplementedError):
pass
writer.writerecords()
|
<commit_before><commit_msg>Add script to write CIViC VCF<commit_after>from civicpy import civic
from civicpy.exports import VCFWriter
import argparse
parser = argparse.ArgumentParser(description='Create CIViC VCF')
parser.add_argument('vcf_path')
args = parser.parse_args()
with open(args.vcf_path, "w") as fh:
writer = VCFWriter(fh)
for variant in civic.get_all_variants():
if variant.coordinates.chromosome and variant.coordinates.start and variant.coordinates.reference_bases and variant.coordinates.variant_bases:
try:
writer.addrecord(variant)
except (ValueError,NotImplementedError):
pass
writer.writerecords()
|
|
8f71a4824b6db1a4e7d9d25335e6d4af595d79cf
|
nanpy/examples/lcd_createchar.py
|
nanpy/examples/lcd_createchar.py
|
#!/usr/bin/env python
from nanpy.lcd import Lcd
if __name__ == '__main__':
pins = [7, 8, 9, 10, 11, 12]
cols, rows = 16, 2
lcd = Lcd(pins, [cols, rows])
smiley= [
0b00000,
0b10001,
0b00000,
0b00000,
0b10001,
0b01110,
0b00000,
0b00000]
lcd.createChar(0,smiley)
lcd.setCursor(0,0)
lcd.write(0)
|
Add example for LCD createChar
|
Add example for LCD createChar
|
Python
|
mit
|
joppi/nanpy,nanpy/nanpy
|
Add example for LCD createChar
|
#!/usr/bin/env python
from nanpy.lcd import Lcd
if __name__ == '__main__':
pins = [7, 8, 9, 10, 11, 12]
cols, rows = 16, 2
lcd = Lcd(pins, [cols, rows])
smiley= [
0b00000,
0b10001,
0b00000,
0b00000,
0b10001,
0b01110,
0b00000,
0b00000]
lcd.createChar(0,smiley)
lcd.setCursor(0,0)
lcd.write(0)
|
<commit_before><commit_msg>Add example for LCD createChar<commit_after>
|
#!/usr/bin/env python
from nanpy.lcd import Lcd
if __name__ == '__main__':
pins = [7, 8, 9, 10, 11, 12]
cols, rows = 16, 2
lcd = Lcd(pins, [cols, rows])
smiley= [
0b00000,
0b10001,
0b00000,
0b00000,
0b10001,
0b01110,
0b00000,
0b00000]
lcd.createChar(0,smiley)
lcd.setCursor(0,0)
lcd.write(0)
|
Add example for LCD createChar#!/usr/bin/env python
from nanpy.lcd import Lcd
if __name__ == '__main__':
pins = [7, 8, 9, 10, 11, 12]
cols, rows = 16, 2
lcd = Lcd(pins, [cols, rows])
smiley= [
0b00000,
0b10001,
0b00000,
0b00000,
0b10001,
0b01110,
0b00000,
0b00000]
lcd.createChar(0,smiley)
lcd.setCursor(0,0)
lcd.write(0)
|
<commit_before><commit_msg>Add example for LCD createChar<commit_after>#!/usr/bin/env python
from nanpy.lcd import Lcd
if __name__ == '__main__':
pins = [7, 8, 9, 10, 11, 12]
cols, rows = 16, 2
lcd = Lcd(pins, [cols, rows])
smiley= [
0b00000,
0b10001,
0b00000,
0b00000,
0b10001,
0b01110,
0b00000,
0b00000]
lcd.createChar(0,smiley)
lcd.setCursor(0,0)
lcd.write(0)
|
|
9db2999edfbe6b2813412287cbc7146374277246
|
tests/test_cli.py
|
tests/test_cli.py
|
# -*- coding: utf-8 -*-
"""Command Line Interface test cases."""
import os
import tempfile
import unittest
from esis.cli import (
clean,
count,
index,
parse_arguments,
search,
)
class ParseArgumentsTest(unittest.TestCase):
"""Parse arguments test case."""
def test_index_command(self):
"""Search command."""
temp_directory = tempfile.mkdtemp()
try:
args = parse_arguments(['index', temp_directory])
self.assertEqual(args.directory, temp_directory)
self.assertEqual(args.func, index)
finally:
os.rmdir(temp_directory)
def test_search_command(self):
"""Search command."""
args = parse_arguments(['search', 'query'])
self.assertEqual(args.query, 'query')
self.assertEqual(args.func, search)
def test_count_command(self):
"""Count command."""
args = parse_arguments(['count'])
self.assertEqual(args.func, count)
def test_clean_command(self):
"""Clean command."""
args = parse_arguments(['clean'])
self.assertEqual(args.func, clean)
|
Add parse arguments test cases
|
Add parse arguments test cases
|
Python
|
mit
|
jcollado/esis
|
Add parse arguments test cases
|
# -*- coding: utf-8 -*-
"""Command Line Interface test cases."""
import os
import tempfile
import unittest
from esis.cli import (
clean,
count,
index,
parse_arguments,
search,
)
class ParseArgumentsTest(unittest.TestCase):
"""Parse arguments test case."""
def test_index_command(self):
"""Search command."""
temp_directory = tempfile.mkdtemp()
try:
args = parse_arguments(['index', temp_directory])
self.assertEqual(args.directory, temp_directory)
self.assertEqual(args.func, index)
finally:
os.rmdir(temp_directory)
def test_search_command(self):
"""Search command."""
args = parse_arguments(['search', 'query'])
self.assertEqual(args.query, 'query')
self.assertEqual(args.func, search)
def test_count_command(self):
"""Count command."""
args = parse_arguments(['count'])
self.assertEqual(args.func, count)
def test_clean_command(self):
"""Clean command."""
args = parse_arguments(['clean'])
self.assertEqual(args.func, clean)
|
<commit_before><commit_msg>Add parse arguments test cases<commit_after>
|
# -*- coding: utf-8 -*-
"""Command Line Interface test cases."""
import os
import tempfile
import unittest
from esis.cli import (
clean,
count,
index,
parse_arguments,
search,
)
class ParseArgumentsTest(unittest.TestCase):
"""Parse arguments test case."""
def test_index_command(self):
"""Search command."""
temp_directory = tempfile.mkdtemp()
try:
args = parse_arguments(['index', temp_directory])
self.assertEqual(args.directory, temp_directory)
self.assertEqual(args.func, index)
finally:
os.rmdir(temp_directory)
def test_search_command(self):
"""Search command."""
args = parse_arguments(['search', 'query'])
self.assertEqual(args.query, 'query')
self.assertEqual(args.func, search)
def test_count_command(self):
"""Count command."""
args = parse_arguments(['count'])
self.assertEqual(args.func, count)
def test_clean_command(self):
"""Clean command."""
args = parse_arguments(['clean'])
self.assertEqual(args.func, clean)
|
Add parse arguments test cases# -*- coding: utf-8 -*-
"""Command Line Interface test cases."""
import os
import tempfile
import unittest
from esis.cli import (
clean,
count,
index,
parse_arguments,
search,
)
class ParseArgumentsTest(unittest.TestCase):
"""Parse arguments test case."""
def test_index_command(self):
"""Search command."""
temp_directory = tempfile.mkdtemp()
try:
args = parse_arguments(['index', temp_directory])
self.assertEqual(args.directory, temp_directory)
self.assertEqual(args.func, index)
finally:
os.rmdir(temp_directory)
def test_search_command(self):
"""Search command."""
args = parse_arguments(['search', 'query'])
self.assertEqual(args.query, 'query')
self.assertEqual(args.func, search)
def test_count_command(self):
"""Count command."""
args = parse_arguments(['count'])
self.assertEqual(args.func, count)
def test_clean_command(self):
"""Clean command."""
args = parse_arguments(['clean'])
self.assertEqual(args.func, clean)
|
<commit_before><commit_msg>Add parse arguments test cases<commit_after># -*- coding: utf-8 -*-
"""Command Line Interface test cases."""
import os
import tempfile
import unittest
from esis.cli import (
clean,
count,
index,
parse_arguments,
search,
)
class ParseArgumentsTest(unittest.TestCase):
"""Parse arguments test case."""
def test_index_command(self):
"""Search command."""
temp_directory = tempfile.mkdtemp()
try:
args = parse_arguments(['index', temp_directory])
self.assertEqual(args.directory, temp_directory)
self.assertEqual(args.func, index)
finally:
os.rmdir(temp_directory)
def test_search_command(self):
"""Search command."""
args = parse_arguments(['search', 'query'])
self.assertEqual(args.query, 'query')
self.assertEqual(args.func, search)
def test_count_command(self):
"""Count command."""
args = parse_arguments(['count'])
self.assertEqual(args.func, count)
def test_clean_command(self):
"""Clean command."""
args = parse_arguments(['clean'])
self.assertEqual(args.func, clean)
|
|
3bb268fcf28082df3d65f78d703938dccabac51b
|
scripts/win32-setup-svn-root.py
|
scripts/win32-setup-svn-root.py
|
import os
import sys
VERSION = '0.5.2'
def process_template(input, output=None, vardict={}):
if output is None and input.endswith('.in'):
output = input[:-3]
data = open(input).read()
for key, value in vardict.items():
data = data.replace(key, value)
open(output, 'w').write(data)
scriptdir = os.path.dirname(__file__)
svnroot = os.path.abspath(os.path.join(scriptdir, '..'))
vardict = {
'@LIBDIR@': os.path.join(svnroot),
'@VERSION@': VERSION,
}
process_template(os.path.join(svnroot, 'bin', 'flumotion-admin.in'),
os.path.join(svnroot, 'bin', 'flumotion-admin.py'), vardict=vardict)
process_template(os.path.join(svnroot, 'flumotion', 'configure', 'uninstalled.py.in'), vardict=vardict)
|
Add a new script to setup a win32 svn root
|
Add a new script to setup a win32 svn root
|
Python
|
lgpl-2.1
|
flumotion-mirror/flumotion,timvideos/flumotion,Flumotion/flumotion,timvideos/flumotion,Flumotion/flumotion,Flumotion/flumotion,flumotion-mirror/flumotion,timvideos/flumotion,Flumotion/flumotion
|
Add a new script to setup a win32 svn root
|
import os
import sys
VERSION = '0.5.2'
def process_template(input, output=None, vardict={}):
if output is None and input.endswith('.in'):
output = input[:-3]
data = open(input).read()
for key, value in vardict.items():
data = data.replace(key, value)
open(output, 'w').write(data)
scriptdir = os.path.dirname(__file__)
svnroot = os.path.abspath(os.path.join(scriptdir, '..'))
vardict = {
'@LIBDIR@': os.path.join(svnroot),
'@VERSION@': VERSION,
}
process_template(os.path.join(svnroot, 'bin', 'flumotion-admin.in'),
os.path.join(svnroot, 'bin', 'flumotion-admin.py'), vardict=vardict)
process_template(os.path.join(svnroot, 'flumotion', 'configure', 'uninstalled.py.in'), vardict=vardict)
|
<commit_before><commit_msg>Add a new script to setup a win32 svn root<commit_after>
|
import os
import sys
VERSION = '0.5.2'
def process_template(input, output=None, vardict={}):
if output is None and input.endswith('.in'):
output = input[:-3]
data = open(input).read()
for key, value in vardict.items():
data = data.replace(key, value)
open(output, 'w').write(data)
scriptdir = os.path.dirname(__file__)
svnroot = os.path.abspath(os.path.join(scriptdir, '..'))
vardict = {
'@LIBDIR@': os.path.join(svnroot),
'@VERSION@': VERSION,
}
process_template(os.path.join(svnroot, 'bin', 'flumotion-admin.in'),
os.path.join(svnroot, 'bin', 'flumotion-admin.py'), vardict=vardict)
process_template(os.path.join(svnroot, 'flumotion', 'configure', 'uninstalled.py.in'), vardict=vardict)
|
Add a new script to setup a win32 svn rootimport os
import sys
VERSION = '0.5.2'
def process_template(input, output=None, vardict={}):
if output is None and input.endswith('.in'):
output = input[:-3]
data = open(input).read()
for key, value in vardict.items():
data = data.replace(key, value)
open(output, 'w').write(data)
scriptdir = os.path.dirname(__file__)
svnroot = os.path.abspath(os.path.join(scriptdir, '..'))
vardict = {
'@LIBDIR@': os.path.join(svnroot),
'@VERSION@': VERSION,
}
process_template(os.path.join(svnroot, 'bin', 'flumotion-admin.in'),
os.path.join(svnroot, 'bin', 'flumotion-admin.py'), vardict=vardict)
process_template(os.path.join(svnroot, 'flumotion', 'configure', 'uninstalled.py.in'), vardict=vardict)
|
<commit_before><commit_msg>Add a new script to setup a win32 svn root<commit_after>import os
import sys
VERSION = '0.5.2'
def process_template(input, output=None, vardict={}):
if output is None and input.endswith('.in'):
output = input[:-3]
data = open(input).read()
for key, value in vardict.items():
data = data.replace(key, value)
open(output, 'w').write(data)
scriptdir = os.path.dirname(__file__)
svnroot = os.path.abspath(os.path.join(scriptdir, '..'))
vardict = {
'@LIBDIR@': os.path.join(svnroot),
'@VERSION@': VERSION,
}
process_template(os.path.join(svnroot, 'bin', 'flumotion-admin.in'),
os.path.join(svnroot, 'bin', 'flumotion-admin.py'), vardict=vardict)
process_template(os.path.join(svnroot, 'flumotion', 'configure', 'uninstalled.py.in'), vardict=vardict)
|
|
423b3ba67561d3bc9017566ce5598f973713270c
|
glitter/blocks/call_to_action/migrations/0003_delete_empty_blocks.py
|
glitter/blocks/call_to_action/migrations/0003_delete_empty_blocks.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def remove_empty_blocks(apps, schema_editor):
CallToActionBlock = apps.get_model('glitter_call_to_action', 'CallToActionBlock')
ContentBlock = apps.get_model('glitter', 'ContentBlock')
empty_blocks = CallToActionBlock.objects.filter(title='')
content_block_ids = empty_blocks.values_list('content_block_id', flat=True)
ContentBlock.objects.filter(id__in=content_block_ids).delete()
empty_blocks.delete()
class Migration(migrations.Migration):
dependencies = [
('glitter_call_to_action', '0002_linkfield'),
('glitter', '0001_initial'),
]
operations = [
migrations.RunPython(remove_empty_blocks, reverse_code=migrations.RunPython.noop),
]
|
Clean up empty call to action blocks
|
Clean up empty call to action blocks
|
Python
|
bsd-3-clause
|
developersociety/django-glitter,blancltd/django-glitter,blancltd/django-glitter,blancltd/django-glitter,developersociety/django-glitter,developersociety/django-glitter
|
Clean up empty call to action blocks
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def remove_empty_blocks(apps, schema_editor):
CallToActionBlock = apps.get_model('glitter_call_to_action', 'CallToActionBlock')
ContentBlock = apps.get_model('glitter', 'ContentBlock')
empty_blocks = CallToActionBlock.objects.filter(title='')
content_block_ids = empty_blocks.values_list('content_block_id', flat=True)
ContentBlock.objects.filter(id__in=content_block_ids).delete()
empty_blocks.delete()
class Migration(migrations.Migration):
dependencies = [
('glitter_call_to_action', '0002_linkfield'),
('glitter', '0001_initial'),
]
operations = [
migrations.RunPython(remove_empty_blocks, reverse_code=migrations.RunPython.noop),
]
|
<commit_before><commit_msg>Clean up empty call to action blocks<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def remove_empty_blocks(apps, schema_editor):
CallToActionBlock = apps.get_model('glitter_call_to_action', 'CallToActionBlock')
ContentBlock = apps.get_model('glitter', 'ContentBlock')
empty_blocks = CallToActionBlock.objects.filter(title='')
content_block_ids = empty_blocks.values_list('content_block_id', flat=True)
ContentBlock.objects.filter(id__in=content_block_ids).delete()
empty_blocks.delete()
class Migration(migrations.Migration):
dependencies = [
('glitter_call_to_action', '0002_linkfield'),
('glitter', '0001_initial'),
]
operations = [
migrations.RunPython(remove_empty_blocks, reverse_code=migrations.RunPython.noop),
]
|
Clean up empty call to action blocks# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def remove_empty_blocks(apps, schema_editor):
CallToActionBlock = apps.get_model('glitter_call_to_action', 'CallToActionBlock')
ContentBlock = apps.get_model('glitter', 'ContentBlock')
empty_blocks = CallToActionBlock.objects.filter(title='')
content_block_ids = empty_blocks.values_list('content_block_id', flat=True)
ContentBlock.objects.filter(id__in=content_block_ids).delete()
empty_blocks.delete()
class Migration(migrations.Migration):
dependencies = [
('glitter_call_to_action', '0002_linkfield'),
('glitter', '0001_initial'),
]
operations = [
migrations.RunPython(remove_empty_blocks, reverse_code=migrations.RunPython.noop),
]
|
<commit_before><commit_msg>Clean up empty call to action blocks<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def remove_empty_blocks(apps, schema_editor):
CallToActionBlock = apps.get_model('glitter_call_to_action', 'CallToActionBlock')
ContentBlock = apps.get_model('glitter', 'ContentBlock')
empty_blocks = CallToActionBlock.objects.filter(title='')
content_block_ids = empty_blocks.values_list('content_block_id', flat=True)
ContentBlock.objects.filter(id__in=content_block_ids).delete()
empty_blocks.delete()
class Migration(migrations.Migration):
dependencies = [
('glitter_call_to_action', '0002_linkfield'),
('glitter', '0001_initial'),
]
operations = [
migrations.RunPython(remove_empty_blocks, reverse_code=migrations.RunPython.noop),
]
|
|
431a7221780b36862876ef7f64d47bbb0082ef48
|
neuroanalysis/tests/test_spike_detection.py
|
neuroanalysis/tests/test_spike_detection.py
|
import numpy as np
from neuroanalysis.data import Recording, Trace
from neuroanalysis.neuronsim.model_cell import ModelCell
from neuroanalysis.units import pA, mV, MOhm, pF, us, ms
from neuroanalysis.spike_detection import detect_evoked_spike
def test_spike_detection():
# Need to fill this function up with many more tests, especially
# measuring against real data.
dt = 10*us
start = 5*ms
duration = 2*ms
pulse_edges = int(start / dt), int((start+duration) / dt)
resp = create_test_pulse(start=5*ms, pamp=100*pA, pdur=2*ms, mode='ic', dt=dt)
spike = detect_evoked_spike(resp, pulse_edges)
assert spike is None
resp = create_test_pulse(start=5*ms, pamp=1000*pA, pdur=2*ms, mode='ic', dt=dt)
spike = detect_evoked_spike(resp, pulse_edges)
assert spike is not None
model_cell = ModelCell()
def create_test_pulse(start=5*ms, pdur=10*ms, pamp=-10*pA, mode='ic', dt=10*us, r_access=10*MOhm, c_soma=5*pF, noise=5*pA):
# update patch pipette access resistance
model_cell.clamp.ra = r_access
# update noise amplitude
model_cell.mechs['noise'].stdev = noise
# make pulse array
duration = start + pdur * 3
pulse = np.zeros(int(duration / dt))
pstart = int(start / dt)
pstop = pstart + int(pdur / dt)
pulse[pstart:pstop] = pamp
# simulate response
result = model_cell.test(Trace(pulse, dt), mode)
return result
if __name__ == '__main__':
import pyqtgraph as pg
plt = pg.plot(labels={'left': ('Vm', 'V'), 'bottom': ('time', 's')})
dt = 10*us
start = 5*ms
duration = 2*ms
pulse_edges = int(start / dt), int((start+duration) / dt)
# Iterate over a series of increasing pulse amplitudes
for amp in np.arange(50*pA, 500*pA, 50*pA):
# Simulate pulse response
resp = create_test_pulse(start=start, pamp=amp, pdur=duration, mode='ic', r_access=100*MOhm)
# Test spike detection
spike = detect_evoked_spike(resp, pulse_edges)
print(spike)
pen = 'r' if spike is None else 'g'
# plot in green if a spike was detected
pri = resp['primary']
pri.t0 = 0
plt.plot(pri.time_values, pri.data, pen=pen)
# redraw after every new test
pg.QtGui.QApplication.processEvents()
|
Add stub for spike detection unit test
|
Add stub for spike detection unit test
|
Python
|
mit
|
campagnola/neuroanalysis
|
Add stub for spike detection unit test
|
import numpy as np
from neuroanalysis.data import Recording, Trace
from neuroanalysis.neuronsim.model_cell import ModelCell
from neuroanalysis.units import pA, mV, MOhm, pF, us, ms
from neuroanalysis.spike_detection import detect_evoked_spike
def test_spike_detection():
# Need to fill this function up with many more tests, especially
# measuring against real data.
dt = 10*us
start = 5*ms
duration = 2*ms
pulse_edges = int(start / dt), int((start+duration) / dt)
resp = create_test_pulse(start=5*ms, pamp=100*pA, pdur=2*ms, mode='ic', dt=dt)
spike = detect_evoked_spike(resp, pulse_edges)
assert spike is None
resp = create_test_pulse(start=5*ms, pamp=1000*pA, pdur=2*ms, mode='ic', dt=dt)
spike = detect_evoked_spike(resp, pulse_edges)
assert spike is not None
model_cell = ModelCell()
def create_test_pulse(start=5*ms, pdur=10*ms, pamp=-10*pA, mode='ic', dt=10*us, r_access=10*MOhm, c_soma=5*pF, noise=5*pA):
# update patch pipette access resistance
model_cell.clamp.ra = r_access
# update noise amplitude
model_cell.mechs['noise'].stdev = noise
# make pulse array
duration = start + pdur * 3
pulse = np.zeros(int(duration / dt))
pstart = int(start / dt)
pstop = pstart + int(pdur / dt)
pulse[pstart:pstop] = pamp
# simulate response
result = model_cell.test(Trace(pulse, dt), mode)
return result
if __name__ == '__main__':
import pyqtgraph as pg
plt = pg.plot(labels={'left': ('Vm', 'V'), 'bottom': ('time', 's')})
dt = 10*us
start = 5*ms
duration = 2*ms
pulse_edges = int(start / dt), int((start+duration) / dt)
# Iterate over a series of increasing pulse amplitudes
for amp in np.arange(50*pA, 500*pA, 50*pA):
# Simulate pulse response
resp = create_test_pulse(start=start, pamp=amp, pdur=duration, mode='ic', r_access=100*MOhm)
# Test spike detection
spike = detect_evoked_spike(resp, pulse_edges)
print(spike)
pen = 'r' if spike is None else 'g'
# plot in green if a spike was detected
pri = resp['primary']
pri.t0 = 0
plt.plot(pri.time_values, pri.data, pen=pen)
# redraw after every new test
pg.QtGui.QApplication.processEvents()
|
<commit_before><commit_msg>Add stub for spike detection unit test<commit_after>
|
import numpy as np
from neuroanalysis.data import Recording, Trace
from neuroanalysis.neuronsim.model_cell import ModelCell
from neuroanalysis.units import pA, mV, MOhm, pF, us, ms
from neuroanalysis.spike_detection import detect_evoked_spike
def test_spike_detection():
# Need to fill this function up with many more tests, especially
# measuring against real data.
dt = 10*us
start = 5*ms
duration = 2*ms
pulse_edges = int(start / dt), int((start+duration) / dt)
resp = create_test_pulse(start=5*ms, pamp=100*pA, pdur=2*ms, mode='ic', dt=dt)
spike = detect_evoked_spike(resp, pulse_edges)
assert spike is None
resp = create_test_pulse(start=5*ms, pamp=1000*pA, pdur=2*ms, mode='ic', dt=dt)
spike = detect_evoked_spike(resp, pulse_edges)
assert spike is not None
model_cell = ModelCell()
def create_test_pulse(start=5*ms, pdur=10*ms, pamp=-10*pA, mode='ic', dt=10*us, r_access=10*MOhm, c_soma=5*pF, noise=5*pA):
# update patch pipette access resistance
model_cell.clamp.ra = r_access
# update noise amplitude
model_cell.mechs['noise'].stdev = noise
# make pulse array
duration = start + pdur * 3
pulse = np.zeros(int(duration / dt))
pstart = int(start / dt)
pstop = pstart + int(pdur / dt)
pulse[pstart:pstop] = pamp
# simulate response
result = model_cell.test(Trace(pulse, dt), mode)
return result
if __name__ == '__main__':
import pyqtgraph as pg
plt = pg.plot(labels={'left': ('Vm', 'V'), 'bottom': ('time', 's')})
dt = 10*us
start = 5*ms
duration = 2*ms
pulse_edges = int(start / dt), int((start+duration) / dt)
# Iterate over a series of increasing pulse amplitudes
for amp in np.arange(50*pA, 500*pA, 50*pA):
# Simulate pulse response
resp = create_test_pulse(start=start, pamp=amp, pdur=duration, mode='ic', r_access=100*MOhm)
# Test spike detection
spike = detect_evoked_spike(resp, pulse_edges)
print(spike)
pen = 'r' if spike is None else 'g'
# plot in green if a spike was detected
pri = resp['primary']
pri.t0 = 0
plt.plot(pri.time_values, pri.data, pen=pen)
# redraw after every new test
pg.QtGui.QApplication.processEvents()
|
Add stub for spike detection unit testimport numpy as np
from neuroanalysis.data import Recording, Trace
from neuroanalysis.neuronsim.model_cell import ModelCell
from neuroanalysis.units import pA, mV, MOhm, pF, us, ms
from neuroanalysis.spike_detection import detect_evoked_spike
def test_spike_detection():
# Need to fill this function up with many more tests, especially
# measuring against real data.
dt = 10*us
start = 5*ms
duration = 2*ms
pulse_edges = int(start / dt), int((start+duration) / dt)
resp = create_test_pulse(start=5*ms, pamp=100*pA, pdur=2*ms, mode='ic', dt=dt)
spike = detect_evoked_spike(resp, pulse_edges)
assert spike is None
resp = create_test_pulse(start=5*ms, pamp=1000*pA, pdur=2*ms, mode='ic', dt=dt)
spike = detect_evoked_spike(resp, pulse_edges)
assert spike is not None
model_cell = ModelCell()
def create_test_pulse(start=5*ms, pdur=10*ms, pamp=-10*pA, mode='ic', dt=10*us, r_access=10*MOhm, c_soma=5*pF, noise=5*pA):
# update patch pipette access resistance
model_cell.clamp.ra = r_access
# update noise amplitude
model_cell.mechs['noise'].stdev = noise
# make pulse array
duration = start + pdur * 3
pulse = np.zeros(int(duration / dt))
pstart = int(start / dt)
pstop = pstart + int(pdur / dt)
pulse[pstart:pstop] = pamp
# simulate response
result = model_cell.test(Trace(pulse, dt), mode)
return result
if __name__ == '__main__':
import pyqtgraph as pg
plt = pg.plot(labels={'left': ('Vm', 'V'), 'bottom': ('time', 's')})
dt = 10*us
start = 5*ms
duration = 2*ms
pulse_edges = int(start / dt), int((start+duration) / dt)
# Iterate over a series of increasing pulse amplitudes
for amp in np.arange(50*pA, 500*pA, 50*pA):
# Simulate pulse response
resp = create_test_pulse(start=start, pamp=amp, pdur=duration, mode='ic', r_access=100*MOhm)
# Test spike detection
spike = detect_evoked_spike(resp, pulse_edges)
print(spike)
pen = 'r' if spike is None else 'g'
# plot in green if a spike was detected
pri = resp['primary']
pri.t0 = 0
plt.plot(pri.time_values, pri.data, pen=pen)
# redraw after every new test
pg.QtGui.QApplication.processEvents()
|
<commit_before><commit_msg>Add stub for spike detection unit test<commit_after>import numpy as np
from neuroanalysis.data import Recording, Trace
from neuroanalysis.neuronsim.model_cell import ModelCell
from neuroanalysis.units import pA, mV, MOhm, pF, us, ms
from neuroanalysis.spike_detection import detect_evoked_spike
def test_spike_detection():
# Need to fill this function up with many more tests, especially
# measuring against real data.
dt = 10*us
start = 5*ms
duration = 2*ms
pulse_edges = int(start / dt), int((start+duration) / dt)
resp = create_test_pulse(start=5*ms, pamp=100*pA, pdur=2*ms, mode='ic', dt=dt)
spike = detect_evoked_spike(resp, pulse_edges)
assert spike is None
resp = create_test_pulse(start=5*ms, pamp=1000*pA, pdur=2*ms, mode='ic', dt=dt)
spike = detect_evoked_spike(resp, pulse_edges)
assert spike is not None
model_cell = ModelCell()
def create_test_pulse(start=5*ms, pdur=10*ms, pamp=-10*pA, mode='ic', dt=10*us, r_access=10*MOhm, c_soma=5*pF, noise=5*pA):
# update patch pipette access resistance
model_cell.clamp.ra = r_access
# update noise amplitude
model_cell.mechs['noise'].stdev = noise
# make pulse array
duration = start + pdur * 3
pulse = np.zeros(int(duration / dt))
pstart = int(start / dt)
pstop = pstart + int(pdur / dt)
pulse[pstart:pstop] = pamp
# simulate response
result = model_cell.test(Trace(pulse, dt), mode)
return result
if __name__ == '__main__':
import pyqtgraph as pg
plt = pg.plot(labels={'left': ('Vm', 'V'), 'bottom': ('time', 's')})
dt = 10*us
start = 5*ms
duration = 2*ms
pulse_edges = int(start / dt), int((start+duration) / dt)
# Iterate over a series of increasing pulse amplitudes
for amp in np.arange(50*pA, 500*pA, 50*pA):
# Simulate pulse response
resp = create_test_pulse(start=start, pamp=amp, pdur=duration, mode='ic', r_access=100*MOhm)
# Test spike detection
spike = detect_evoked_spike(resp, pulse_edges)
print(spike)
pen = 'r' if spike is None else 'g'
# plot in green if a spike was detected
pri = resp['primary']
pri.t0 = 0
plt.plot(pri.time_values, pri.data, pen=pen)
# redraw after every new test
pg.QtGui.QApplication.processEvents()
|
|
2f6aa42fb768f0e97e84192f99eb82c90442f959
|
PerfTest.py
|
PerfTest.py
|
__author__ = 'Frank'
import pycurl
import json
from datetime import datetime, timezone, timedelta
import random
try:
# python 3
from urllib.parse import urlencode
except ImportError:
# python 2
from urllib import urlencode
#curl -X PUT --header "Content-Type: application/json" --header "Accept: application/json" -d "{
# \"id\": 1,
# \"timeStamp\": \"2016-02-15T11:23:18+01:00\"
#}" "http://localhost:8080/workorder"
def put_order(id, timestamp="2016-02-15T11:23:18+01:00"):
payload = {"id": id, "timeStamp" : timestamp}
json_payload = json.dumps(payload)
c = pycurl.Curl()
c.setopt(c.URL, "http://localhost:8080/workorder")
c.setopt(c.HTTPHEADER, ["Content-Type: application/json", 'Accept: application/json'])
c.setopt(c.CUSTOMREQUEST, "PUT")
c.setopt(c.POSTFIELDS, json_payload)
c.perform()
resp_code = c.getinfo(c.HTTP_CODE)
if resp_code != 201:
print("Error code %s.", resp_code)
raise ValueError('Some error here')
try:
for i in range(10000052, 11000000):
offset = random.randint(1, 10000)
time = datetime.now(timezone.utc).astimezone() - timedelta(hours=offset)
put_order(i, time.isoformat())
except ValueError:
print("Max number is %s.", i)
|
Add simple Python Stress test script
|
Add simple Python Stress test script
|
Python
|
apache-2.0
|
frankfarrell/SNOWBALL-MAGIC-19851014,frankfarrell/SNOWBALL-MAGIC-19851014
|
Add simple Python Stress test script
|
__author__ = 'Frank'
import pycurl
import json
from datetime import datetime, timezone, timedelta
import random
try:
# python 3
from urllib.parse import urlencode
except ImportError:
# python 2
from urllib import urlencode
#curl -X PUT --header "Content-Type: application/json" --header "Accept: application/json" -d "{
# \"id\": 1,
# \"timeStamp\": \"2016-02-15T11:23:18+01:00\"
#}" "http://localhost:8080/workorder"
def put_order(id, timestamp="2016-02-15T11:23:18+01:00"):
payload = {"id": id, "timeStamp" : timestamp}
json_payload = json.dumps(payload)
c = pycurl.Curl()
c.setopt(c.URL, "http://localhost:8080/workorder")
c.setopt(c.HTTPHEADER, ["Content-Type: application/json", 'Accept: application/json'])
c.setopt(c.CUSTOMREQUEST, "PUT")
c.setopt(c.POSTFIELDS, json_payload)
c.perform()
resp_code = c.getinfo(c.HTTP_CODE)
if resp_code != 201:
print("Error code %s.", resp_code)
raise ValueError('Some error here')
try:
for i in range(10000052, 11000000):
offset = random.randint(1, 10000)
time = datetime.now(timezone.utc).astimezone() - timedelta(hours=offset)
put_order(i, time.isoformat())
except ValueError:
print("Max number is %s.", i)
|
<commit_before><commit_msg>Add simple Python Stress test script<commit_after>
|
__author__ = 'Frank'
import pycurl
import json
from datetime import datetime, timezone, timedelta
import random
try:
# python 3
from urllib.parse import urlencode
except ImportError:
# python 2
from urllib import urlencode
#curl -X PUT --header "Content-Type: application/json" --header "Accept: application/json" -d "{
# \"id\": 1,
# \"timeStamp\": \"2016-02-15T11:23:18+01:00\"
#}" "http://localhost:8080/workorder"
def put_order(id, timestamp="2016-02-15T11:23:18+01:00"):
payload = {"id": id, "timeStamp" : timestamp}
json_payload = json.dumps(payload)
c = pycurl.Curl()
c.setopt(c.URL, "http://localhost:8080/workorder")
c.setopt(c.HTTPHEADER, ["Content-Type: application/json", 'Accept: application/json'])
c.setopt(c.CUSTOMREQUEST, "PUT")
c.setopt(c.POSTFIELDS, json_payload)
c.perform()
resp_code = c.getinfo(c.HTTP_CODE)
if resp_code != 201:
print("Error code %s.", resp_code)
raise ValueError('Some error here')
try:
for i in range(10000052, 11000000):
offset = random.randint(1, 10000)
time = datetime.now(timezone.utc).astimezone() - timedelta(hours=offset)
put_order(i, time.isoformat())
except ValueError:
print("Max number is %s.", i)
|
Add simple Python Stress test script__author__ = 'Frank'
import pycurl
import json
from datetime import datetime, timezone, timedelta
import random
try:
# python 3
from urllib.parse import urlencode
except ImportError:
# python 2
from urllib import urlencode
#curl -X PUT --header "Content-Type: application/json" --header "Accept: application/json" -d "{
# \"id\": 1,
# \"timeStamp\": \"2016-02-15T11:23:18+01:00\"
#}" "http://localhost:8080/workorder"
def put_order(id, timestamp="2016-02-15T11:23:18+01:00"):
payload = {"id": id, "timeStamp" : timestamp}
json_payload = json.dumps(payload)
c = pycurl.Curl()
c.setopt(c.URL, "http://localhost:8080/workorder")
c.setopt(c.HTTPHEADER, ["Content-Type: application/json", 'Accept: application/json'])
c.setopt(c.CUSTOMREQUEST, "PUT")
c.setopt(c.POSTFIELDS, json_payload)
c.perform()
resp_code = c.getinfo(c.HTTP_CODE)
if resp_code != 201:
print("Error code %s.", resp_code)
raise ValueError('Some error here')
try:
for i in range(10000052, 11000000):
offset = random.randint(1, 10000)
time = datetime.now(timezone.utc).astimezone() - timedelta(hours=offset)
put_order(i, time.isoformat())
except ValueError:
print("Max number is %s.", i)
|
<commit_before><commit_msg>Add simple Python Stress test script<commit_after>__author__ = 'Frank'
import pycurl
import json
from datetime import datetime, timezone, timedelta
import random
try:
# python 3
from urllib.parse import urlencode
except ImportError:
# python 2
from urllib import urlencode
#curl -X PUT --header "Content-Type: application/json" --header "Accept: application/json" -d "{
# \"id\": 1,
# \"timeStamp\": \"2016-02-15T11:23:18+01:00\"
#}" "http://localhost:8080/workorder"
def put_order(id, timestamp="2016-02-15T11:23:18+01:00"):
payload = {"id": id, "timeStamp" : timestamp}
json_payload = json.dumps(payload)
c = pycurl.Curl()
c.setopt(c.URL, "http://localhost:8080/workorder")
c.setopt(c.HTTPHEADER, ["Content-Type: application/json", 'Accept: application/json'])
c.setopt(c.CUSTOMREQUEST, "PUT")
c.setopt(c.POSTFIELDS, json_payload)
c.perform()
resp_code = c.getinfo(c.HTTP_CODE)
if resp_code != 201:
print("Error code %s.", resp_code)
raise ValueError('Some error here')
try:
for i in range(10000052, 11000000):
offset = random.randint(1, 10000)
time = datetime.now(timezone.utc).astimezone() - timedelta(hours=offset)
put_order(i, time.isoformat())
except ValueError:
print("Max number is %s.", i)
|
|
8267097f9befa2e949531436d8299525ac9a34d5
|
bitbots_body_behavior/src/bitbots_body_behavior/decisions/reached_movebase_goal.py
|
bitbots_body_behavior/src/bitbots_body_behavior/decisions/reached_movebase_goal.py
|
import numpy as np
from dynamic_stack_decider.abstract_decision_element import AbstractDecisionElement
class ReachedMovebaseGoalPosition(AbstractDecisionElement):
def __init__(self, blackboard, dsd, parameters=None):
super(ReachedMovebaseGoalPosition, self).__init__(blackboard, dsd, parameters)
self.threshould = parameters['thres']
def perform(self, reevaluate=False):
"""
Determines whether we are near the movebase goal
:param reevaluate:
:return:
"""
if self.blackboard.pathfinding.goal is None or self.blackboard.pathfinding.current_pose is None:
return "NO"
goal = np.array([self.blackboard.pathfinding.goal.pose.position.x, self.blackboard.pathfinding.goal.pose.position.y])
position = np.array([self.blackboard.pathfinding.current_pose.pose.position.x, self.blackboard.pathfinding.current_pose.pose.position.y])
if np.linalg.norm(goal - position) < self.threshould:
return 'YES'
return 'NO'
def get_reevaluate(self):
return True
|
Add reached movebase goal desision
|
Add reached movebase goal desision
|
Python
|
bsd-3-clause
|
bit-bots/bitbots_behaviour
|
Add reached movebase goal desision
|
import numpy as np
from dynamic_stack_decider.abstract_decision_element import AbstractDecisionElement
class ReachedMovebaseGoalPosition(AbstractDecisionElement):
def __init__(self, blackboard, dsd, parameters=None):
super(ReachedMovebaseGoalPosition, self).__init__(blackboard, dsd, parameters)
self.threshould = parameters['thres']
def perform(self, reevaluate=False):
"""
Determines whether we are near the movebase goal
:param reevaluate:
:return:
"""
if self.blackboard.pathfinding.goal is None or self.blackboard.pathfinding.current_pose is None:
return "NO"
goal = np.array([self.blackboard.pathfinding.goal.pose.position.x, self.blackboard.pathfinding.goal.pose.position.y])
position = np.array([self.blackboard.pathfinding.current_pose.pose.position.x, self.blackboard.pathfinding.current_pose.pose.position.y])
if np.linalg.norm(goal - position) < self.threshould:
return 'YES'
return 'NO'
def get_reevaluate(self):
return True
|
<commit_before><commit_msg>Add reached movebase goal desision<commit_after>
|
import numpy as np
from dynamic_stack_decider.abstract_decision_element import AbstractDecisionElement
class ReachedMovebaseGoalPosition(AbstractDecisionElement):
def __init__(self, blackboard, dsd, parameters=None):
super(ReachedMovebaseGoalPosition, self).__init__(blackboard, dsd, parameters)
self.threshould = parameters['thres']
def perform(self, reevaluate=False):
"""
Determines whether we are near the movebase goal
:param reevaluate:
:return:
"""
if self.blackboard.pathfinding.goal is None or self.blackboard.pathfinding.current_pose is None:
return "NO"
goal = np.array([self.blackboard.pathfinding.goal.pose.position.x, self.blackboard.pathfinding.goal.pose.position.y])
position = np.array([self.blackboard.pathfinding.current_pose.pose.position.x, self.blackboard.pathfinding.current_pose.pose.position.y])
if np.linalg.norm(goal - position) < self.threshould:
return 'YES'
return 'NO'
def get_reevaluate(self):
return True
|
Add reached movebase goal desisionimport numpy as np
from dynamic_stack_decider.abstract_decision_element import AbstractDecisionElement
class ReachedMovebaseGoalPosition(AbstractDecisionElement):
def __init__(self, blackboard, dsd, parameters=None):
super(ReachedMovebaseGoalPosition, self).__init__(blackboard, dsd, parameters)
self.threshould = parameters['thres']
def perform(self, reevaluate=False):
"""
Determines whether we are near the movebase goal
:param reevaluate:
:return:
"""
if self.blackboard.pathfinding.goal is None or self.blackboard.pathfinding.current_pose is None:
return "NO"
goal = np.array([self.blackboard.pathfinding.goal.pose.position.x, self.blackboard.pathfinding.goal.pose.position.y])
position = np.array([self.blackboard.pathfinding.current_pose.pose.position.x, self.blackboard.pathfinding.current_pose.pose.position.y])
if np.linalg.norm(goal - position) < self.threshould:
return 'YES'
return 'NO'
def get_reevaluate(self):
return True
|
<commit_before><commit_msg>Add reached movebase goal desision<commit_after>import numpy as np
from dynamic_stack_decider.abstract_decision_element import AbstractDecisionElement
class ReachedMovebaseGoalPosition(AbstractDecisionElement):
def __init__(self, blackboard, dsd, parameters=None):
super(ReachedMovebaseGoalPosition, self).__init__(blackboard, dsd, parameters)
self.threshould = parameters['thres']
def perform(self, reevaluate=False):
"""
Determines whether we are near the movebase goal
:param reevaluate:
:return:
"""
if self.blackboard.pathfinding.goal is None or self.blackboard.pathfinding.current_pose is None:
return "NO"
goal = np.array([self.blackboard.pathfinding.goal.pose.position.x, self.blackboard.pathfinding.goal.pose.position.y])
position = np.array([self.blackboard.pathfinding.current_pose.pose.position.x, self.blackboard.pathfinding.current_pose.pose.position.y])
if np.linalg.norm(goal - position) < self.threshould:
return 'YES'
return 'NO'
def get_reevaluate(self):
return True
|
|
3135e75954fa5aa91e1cd0970167f3699a04dcd0
|
py/split-linked-list-in-parts.py
|
py/split-linked-list-in-parts.py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
n = 0
cur = root
while cur:
n += 1
cur = cur.next
group_size, rem = n / k, n % k
ans = []
cur = root
prev = None
for group in xrange(k):
gs = group_size + int(group < rem)
ans.append(cur)
while gs > 0:
prev, cur = cur, cur.next
gs -= 1
if prev:
prev.next = None
return ans
|
Add py solution for 725. Split Linked List in Parts
|
Add py solution for 725. Split Linked List in Parts
725. Split Linked List in Parts: https://leetcode.com/problems/split-linked-list-in-parts/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 725. Split Linked List in Parts
725. Split Linked List in Parts: https://leetcode.com/problems/split-linked-list-in-parts/
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
n = 0
cur = root
while cur:
n += 1
cur = cur.next
group_size, rem = n / k, n % k
ans = []
cur = root
prev = None
for group in xrange(k):
gs = group_size + int(group < rem)
ans.append(cur)
while gs > 0:
prev, cur = cur, cur.next
gs -= 1
if prev:
prev.next = None
return ans
|
<commit_before><commit_msg>Add py solution for 725. Split Linked List in Parts
725. Split Linked List in Parts: https://leetcode.com/problems/split-linked-list-in-parts/<commit_after>
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
n = 0
cur = root
while cur:
n += 1
cur = cur.next
group_size, rem = n / k, n % k
ans = []
cur = root
prev = None
for group in xrange(k):
gs = group_size + int(group < rem)
ans.append(cur)
while gs > 0:
prev, cur = cur, cur.next
gs -= 1
if prev:
prev.next = None
return ans
|
Add py solution for 725. Split Linked List in Parts
725. Split Linked List in Parts: https://leetcode.com/problems/split-linked-list-in-parts/# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
n = 0
cur = root
while cur:
n += 1
cur = cur.next
group_size, rem = n / k, n % k
ans = []
cur = root
prev = None
for group in xrange(k):
gs = group_size + int(group < rem)
ans.append(cur)
while gs > 0:
prev, cur = cur, cur.next
gs -= 1
if prev:
prev.next = None
return ans
|
<commit_before><commit_msg>Add py solution for 725. Split Linked List in Parts
725. Split Linked List in Parts: https://leetcode.com/problems/split-linked-list-in-parts/<commit_after># Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
n = 0
cur = root
while cur:
n += 1
cur = cur.next
group_size, rem = n / k, n % k
ans = []
cur = root
prev = None
for group in xrange(k):
gs = group_size + int(group < rem)
ans.append(cur)
while gs > 0:
prev, cur = cur, cur.next
gs -= 1
if prev:
prev.next = None
return ans
|
|
f6f8e724cae70f722885736a9db204e18aa1846a
|
tests/test_04_ignore_unknown.py
|
tests/test_04_ignore_unknown.py
|
"""Test the ignore-unknown-dependency command line option.
"""
import pytest
def test_no_ignore(ctestdir):
"""No command line option, e.g. ignore-unknown-dependency is not set.
Explicitly select only a single test that depends on another one.
Since the other test has not been run at all, the selected test
will be skipped.
"""
ctestdir.makepyfile("""
import pytest
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pass
@pytest.mark.dependency()
def test_c():
pass
@pytest.mark.dependency(depends=["test_c"])
def test_d():
pass
""")
result = ctestdir.runpytest("--verbose", "test_no_ignore.py::test_d")
result.assert_outcomes(passed=0, skipped=1, failed=0)
result.stdout.fnmatch_lines("""
*::test_d SKIPPED
""")
def test_ignore(ctestdir):
"""Set the ignore-unknown-dependency command line option.
Explicitly select only a single test that depends on another one.
The other test has not been run at all, but since unknown
dependencies will be ignored, the selected test will be run
nevertheless.
"""
ctestdir.makepyfile("""
import pytest
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pass
@pytest.mark.dependency()
def test_c():
pass
@pytest.mark.dependency(depends=["test_c"])
def test_d():
pass
""")
result = ctestdir.runpytest("--verbose", "--ignore-unknown-dependency",
"test_ignore.py::test_d")
result.assert_outcomes(passed=1, skipped=0, failed=0)
result.stdout.fnmatch_lines("""
*::test_d PASSED
""")
|
Add a test for the ignore-unknown-dependency command line option.
|
Add a test for the ignore-unknown-dependency command line option.
|
Python
|
apache-2.0
|
RKrahl/pytest-dependency
|
Add a test for the ignore-unknown-dependency command line option.
|
"""Test the ignore-unknown-dependency command line option.
"""
import pytest
def test_no_ignore(ctestdir):
"""No command line option, e.g. ignore-unknown-dependency is not set.
Explicitly select only a single test that depends on another one.
Since the other test has not been run at all, the selected test
will be skipped.
"""
ctestdir.makepyfile("""
import pytest
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pass
@pytest.mark.dependency()
def test_c():
pass
@pytest.mark.dependency(depends=["test_c"])
def test_d():
pass
""")
result = ctestdir.runpytest("--verbose", "test_no_ignore.py::test_d")
result.assert_outcomes(passed=0, skipped=1, failed=0)
result.stdout.fnmatch_lines("""
*::test_d SKIPPED
""")
def test_ignore(ctestdir):
"""Set the ignore-unknown-dependency command line option.
Explicitly select only a single test that depends on another one.
The other test has not been run at all, but since unknown
dependencies will be ignored, the selected test will be run
nevertheless.
"""
ctestdir.makepyfile("""
import pytest
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pass
@pytest.mark.dependency()
def test_c():
pass
@pytest.mark.dependency(depends=["test_c"])
def test_d():
pass
""")
result = ctestdir.runpytest("--verbose", "--ignore-unknown-dependency",
"test_ignore.py::test_d")
result.assert_outcomes(passed=1, skipped=0, failed=0)
result.stdout.fnmatch_lines("""
*::test_d PASSED
""")
|
<commit_before><commit_msg>Add a test for the ignore-unknown-dependency command line option.<commit_after>
|
"""Test the ignore-unknown-dependency command line option.
"""
import pytest
def test_no_ignore(ctestdir):
"""No command line option, e.g. ignore-unknown-dependency is not set.
Explicitly select only a single test that depends on another one.
Since the other test has not been run at all, the selected test
will be skipped.
"""
ctestdir.makepyfile("""
import pytest
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pass
@pytest.mark.dependency()
def test_c():
pass
@pytest.mark.dependency(depends=["test_c"])
def test_d():
pass
""")
result = ctestdir.runpytest("--verbose", "test_no_ignore.py::test_d")
result.assert_outcomes(passed=0, skipped=1, failed=0)
result.stdout.fnmatch_lines("""
*::test_d SKIPPED
""")
def test_ignore(ctestdir):
"""Set the ignore-unknown-dependency command line option.
Explicitly select only a single test that depends on another one.
The other test has not been run at all, but since unknown
dependencies will be ignored, the selected test will be run
nevertheless.
"""
ctestdir.makepyfile("""
import pytest
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pass
@pytest.mark.dependency()
def test_c():
pass
@pytest.mark.dependency(depends=["test_c"])
def test_d():
pass
""")
result = ctestdir.runpytest("--verbose", "--ignore-unknown-dependency",
"test_ignore.py::test_d")
result.assert_outcomes(passed=1, skipped=0, failed=0)
result.stdout.fnmatch_lines("""
*::test_d PASSED
""")
|
Add a test for the ignore-unknown-dependency command line option."""Test the ignore-unknown-dependency command line option.
"""
import pytest
def test_no_ignore(ctestdir):
"""No command line option, e.g. ignore-unknown-dependency is not set.
Explicitly select only a single test that depends on another one.
Since the other test has not been run at all, the selected test
will be skipped.
"""
ctestdir.makepyfile("""
import pytest
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pass
@pytest.mark.dependency()
def test_c():
pass
@pytest.mark.dependency(depends=["test_c"])
def test_d():
pass
""")
result = ctestdir.runpytest("--verbose", "test_no_ignore.py::test_d")
result.assert_outcomes(passed=0, skipped=1, failed=0)
result.stdout.fnmatch_lines("""
*::test_d SKIPPED
""")
def test_ignore(ctestdir):
"""Set the ignore-unknown-dependency command line option.
Explicitly select only a single test that depends on another one.
The other test has not been run at all, but since unknown
dependencies will be ignored, the selected test will be run
nevertheless.
"""
ctestdir.makepyfile("""
import pytest
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pass
@pytest.mark.dependency()
def test_c():
pass
@pytest.mark.dependency(depends=["test_c"])
def test_d():
pass
""")
result = ctestdir.runpytest("--verbose", "--ignore-unknown-dependency",
"test_ignore.py::test_d")
result.assert_outcomes(passed=1, skipped=0, failed=0)
result.stdout.fnmatch_lines("""
*::test_d PASSED
""")
|
<commit_before><commit_msg>Add a test for the ignore-unknown-dependency command line option.<commit_after>"""Test the ignore-unknown-dependency command line option.
"""
import pytest
def test_no_ignore(ctestdir):
"""No command line option, e.g. ignore-unknown-dependency is not set.
Explicitly select only a single test that depends on another one.
Since the other test has not been run at all, the selected test
will be skipped.
"""
ctestdir.makepyfile("""
import pytest
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pass
@pytest.mark.dependency()
def test_c():
pass
@pytest.mark.dependency(depends=["test_c"])
def test_d():
pass
""")
result = ctestdir.runpytest("--verbose", "test_no_ignore.py::test_d")
result.assert_outcomes(passed=0, skipped=1, failed=0)
result.stdout.fnmatch_lines("""
*::test_d SKIPPED
""")
def test_ignore(ctestdir):
"""Set the ignore-unknown-dependency command line option.
Explicitly select only a single test that depends on another one.
The other test has not been run at all, but since unknown
dependencies will be ignored, the selected test will be run
nevertheless.
"""
ctestdir.makepyfile("""
import pytest
@pytest.mark.dependency()
def test_a():
pass
@pytest.mark.dependency()
def test_b():
pass
@pytest.mark.dependency()
def test_c():
pass
@pytest.mark.dependency(depends=["test_c"])
def test_d():
pass
""")
result = ctestdir.runpytest("--verbose", "--ignore-unknown-dependency",
"test_ignore.py::test_d")
result.assert_outcomes(passed=1, skipped=0, failed=0)
result.stdout.fnmatch_lines("""
*::test_d PASSED
""")
|
|
f457ab08212da6c320c3156e397ae2f02e95435b
|
tests/test_libnit_translator.py
|
tests/test_libnit_translator.py
|
#Jerry B. backer
#11/06/2012
#test for TUFTanslator and libnit_lister
#Uses test_simple server
import libnit_listener
from tuf_api_translator import TUFTranslator
def main():
test = TUFTranslator("127.0.0.1")
new_listener = libnit_listener.LibnitListener(test, debug_mode = True)
new_listener.serve_forever()
if __name__ == "__main__":
main()
|
Test for TUFTranslator and network call interposition
|
Test for TUFTranslator and network call interposition
|
Python
|
mit
|
monzum/tuf-legacy,monzum/tuf-legacy,monzum/tuf-legacy
|
Test for TUFTranslator and network call interposition
|
#Jerry B. backer
#11/06/2012
#test for TUFTanslator and libnit_lister
#Uses test_simple server
import libnit_listener
from tuf_api_translator import TUFTranslator
def main():
test = TUFTranslator("127.0.0.1")
new_listener = libnit_listener.LibnitListener(test, debug_mode = True)
new_listener.serve_forever()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Test for TUFTranslator and network call interposition<commit_after>
|
#Jerry B. backer
#11/06/2012
#test for TUFTanslator and libnit_lister
#Uses test_simple server
import libnit_listener
from tuf_api_translator import TUFTranslator
def main():
test = TUFTranslator("127.0.0.1")
new_listener = libnit_listener.LibnitListener(test, debug_mode = True)
new_listener.serve_forever()
if __name__ == "__main__":
main()
|
Test for TUFTranslator and network call interposition#Jerry B. backer
#11/06/2012
#test for TUFTanslator and libnit_lister
#Uses test_simple server
import libnit_listener
from tuf_api_translator import TUFTranslator
def main():
test = TUFTranslator("127.0.0.1")
new_listener = libnit_listener.LibnitListener(test, debug_mode = True)
new_listener.serve_forever()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Test for TUFTranslator and network call interposition<commit_after>#Jerry B. backer
#11/06/2012
#test for TUFTanslator and libnit_lister
#Uses test_simple server
import libnit_listener
from tuf_api_translator import TUFTranslator
def main():
test = TUFTranslator("127.0.0.1")
new_listener = libnit_listener.LibnitListener(test, debug_mode = True)
new_listener.serve_forever()
if __name__ == "__main__":
main()
|
|
1f02167e1d2134dd83cb7f81ce6c4e95b9c85eb5
|
studygroups/management/commands/add_team_to_learning_circles.py
|
studygroups/management/commands/add_team_to_learning_circles.py
|
from django.core.management.base import BaseCommand, CommandError
from studygroups.models import StudyGroup
import requests
class Command(BaseCommand):
help = 'Associate learning circles to the team of the facilitator'
def handle(self, *args, **options):
study_groups = StudyGroup.objects.active()
for study_group in study_groups:
if study_group.facilitator.teammembership_set.active().count():
study_group.team = study_group.facilitator.teammembership_set.active().first().team
study_group.save()
print("Added study group to team {}: {}".format(study_group.id, study_group.team_id))
|
Add django task to link learning circles to teams
|
Add django task to link learning circles to teams
|
Python
|
mit
|
p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles
|
Add django task to link learning circles to teams
|
from django.core.management.base import BaseCommand, CommandError
from studygroups.models import StudyGroup
import requests
class Command(BaseCommand):
help = 'Associate learning circles to the team of the facilitator'
def handle(self, *args, **options):
study_groups = StudyGroup.objects.active()
for study_group in study_groups:
if study_group.facilitator.teammembership_set.active().count():
study_group.team = study_group.facilitator.teammembership_set.active().first().team
study_group.save()
print("Added study group to team {}: {}".format(study_group.id, study_group.team_id))
|
<commit_before><commit_msg>Add django task to link learning circles to teams<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from studygroups.models import StudyGroup
import requests
class Command(BaseCommand):
help = 'Associate learning circles to the team of the facilitator'
def handle(self, *args, **options):
study_groups = StudyGroup.objects.active()
for study_group in study_groups:
if study_group.facilitator.teammembership_set.active().count():
study_group.team = study_group.facilitator.teammembership_set.active().first().team
study_group.save()
print("Added study group to team {}: {}".format(study_group.id, study_group.team_id))
|
Add django task to link learning circles to teamsfrom django.core.management.base import BaseCommand, CommandError
from studygroups.models import StudyGroup
import requests
class Command(BaseCommand):
help = 'Associate learning circles to the team of the facilitator'
def handle(self, *args, **options):
study_groups = StudyGroup.objects.active()
for study_group in study_groups:
if study_group.facilitator.teammembership_set.active().count():
study_group.team = study_group.facilitator.teammembership_set.active().first().team
study_group.save()
print("Added study group to team {}: {}".format(study_group.id, study_group.team_id))
|
<commit_before><commit_msg>Add django task to link learning circles to teams<commit_after>from django.core.management.base import BaseCommand, CommandError
from studygroups.models import StudyGroup
import requests
class Command(BaseCommand):
help = 'Associate learning circles to the team of the facilitator'
def handle(self, *args, **options):
study_groups = StudyGroup.objects.active()
for study_group in study_groups:
if study_group.facilitator.teammembership_set.active().count():
study_group.team = study_group.facilitator.teammembership_set.active().first().team
study_group.save()
print("Added study group to team {}: {}".format(study_group.id, study_group.team_id))
|
|
93df9699c1ac06d35a72532b0e36f9d687f5fb67
|
py/implement-magic-dictionary.py
|
py/implement-magic-dictionary.py
|
from collections import defaultdict
class Node(object):
def __init__(self):
self.end = False
self._not = defaultdict(set)
self.children = defaultdict(Node)
class MagicDictionary(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = Node()
def buildDict(self, dict):
"""
Build a dictionary through a list of words
:type dict: List[str]
:rtype: void
"""
def dfs(depth, s, rem, out):
if depth == len(s):
if rem == 0:
yield out
else:
if rem:
out.append(s[depth].upper())
for x in dfs(depth + 1, s, 0, out):
yield x
out.pop()
out.append(s[depth])
for x in dfs(depth + 1, s, rem, out):
yield x
out.pop()
def insert_word(w):
cur = self.root
for i, c in enumerate(w):
if c.isupper():
cur = cur.children['?']
cur._not[c.lower()].add(''.join(w[i + 1:]))
break
else:
cur = cur.children[c]
cur.end = True
for d in dict:
for word in dfs(0, d, 1, []):
insert_word(word)
def search(self, word):
"""
Returns if there is any word in the trie that equals to the given word after modifying exactly one character
:type word: str
:rtype: bool
"""
def do_search(cur, depth, w):
if depth == len(w):
return cur.end
c = w[depth]
nxt = cur.children.get(c)
if nxt and do_search(nxt, depth + 1, w):
return True
nxt = cur.children.get('?')
if nxt:
for k, s in nxt._not.iteritems():
if k != c and w[depth + 1:] in s:
return True
return False
return do_search(self.root, 0, word)
# Your MagicDictionary object will be instantiated and called as such:
# obj = MagicDictionary()
# obj.buildDict(dict)
# param_2 = obj.search(word)
|
Add py solution for 676. Implement Magic Dictionary
|
Add py solution for 676. Implement Magic Dictionary
676. Implement Magic Dictionary: https://leetcode.com/problems/implement-magic-dictionary/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 676. Implement Magic Dictionary
676. Implement Magic Dictionary: https://leetcode.com/problems/implement-magic-dictionary/
|
from collections import defaultdict
class Node(object):
def __init__(self):
self.end = False
self._not = defaultdict(set)
self.children = defaultdict(Node)
class MagicDictionary(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = Node()
def buildDict(self, dict):
"""
Build a dictionary through a list of words
:type dict: List[str]
:rtype: void
"""
def dfs(depth, s, rem, out):
if depth == len(s):
if rem == 0:
yield out
else:
if rem:
out.append(s[depth].upper())
for x in dfs(depth + 1, s, 0, out):
yield x
out.pop()
out.append(s[depth])
for x in dfs(depth + 1, s, rem, out):
yield x
out.pop()
def insert_word(w):
cur = self.root
for i, c in enumerate(w):
if c.isupper():
cur = cur.children['?']
cur._not[c.lower()].add(''.join(w[i + 1:]))
break
else:
cur = cur.children[c]
cur.end = True
for d in dict:
for word in dfs(0, d, 1, []):
insert_word(word)
def search(self, word):
"""
Returns if there is any word in the trie that equals to the given word after modifying exactly one character
:type word: str
:rtype: bool
"""
def do_search(cur, depth, w):
if depth == len(w):
return cur.end
c = w[depth]
nxt = cur.children.get(c)
if nxt and do_search(nxt, depth + 1, w):
return True
nxt = cur.children.get('?')
if nxt:
for k, s in nxt._not.iteritems():
if k != c and w[depth + 1:] in s:
return True
return False
return do_search(self.root, 0, word)
# Your MagicDictionary object will be instantiated and called as such:
# obj = MagicDictionary()
# obj.buildDict(dict)
# param_2 = obj.search(word)
|
<commit_before><commit_msg>Add py solution for 676. Implement Magic Dictionary
676. Implement Magic Dictionary: https://leetcode.com/problems/implement-magic-dictionary/<commit_after>
|
from collections import defaultdict
class Node(object):
def __init__(self):
self.end = False
self._not = defaultdict(set)
self.children = defaultdict(Node)
class MagicDictionary(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = Node()
def buildDict(self, dict):
"""
Build a dictionary through a list of words
:type dict: List[str]
:rtype: void
"""
def dfs(depth, s, rem, out):
if depth == len(s):
if rem == 0:
yield out
else:
if rem:
out.append(s[depth].upper())
for x in dfs(depth + 1, s, 0, out):
yield x
out.pop()
out.append(s[depth])
for x in dfs(depth + 1, s, rem, out):
yield x
out.pop()
def insert_word(w):
cur = self.root
for i, c in enumerate(w):
if c.isupper():
cur = cur.children['?']
cur._not[c.lower()].add(''.join(w[i + 1:]))
break
else:
cur = cur.children[c]
cur.end = True
for d in dict:
for word in dfs(0, d, 1, []):
insert_word(word)
def search(self, word):
"""
Returns if there is any word in the trie that equals to the given word after modifying exactly one character
:type word: str
:rtype: bool
"""
def do_search(cur, depth, w):
if depth == len(w):
return cur.end
c = w[depth]
nxt = cur.children.get(c)
if nxt and do_search(nxt, depth + 1, w):
return True
nxt = cur.children.get('?')
if nxt:
for k, s in nxt._not.iteritems():
if k != c and w[depth + 1:] in s:
return True
return False
return do_search(self.root, 0, word)
# Your MagicDictionary object will be instantiated and called as such:
# obj = MagicDictionary()
# obj.buildDict(dict)
# param_2 = obj.search(word)
|
Add py solution for 676. Implement Magic Dictionary
676. Implement Magic Dictionary: https://leetcode.com/problems/implement-magic-dictionary/from collections import defaultdict
class Node(object):
def __init__(self):
self.end = False
self._not = defaultdict(set)
self.children = defaultdict(Node)
class MagicDictionary(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = Node()
def buildDict(self, dict):
"""
Build a dictionary through a list of words
:type dict: List[str]
:rtype: void
"""
def dfs(depth, s, rem, out):
if depth == len(s):
if rem == 0:
yield out
else:
if rem:
out.append(s[depth].upper())
for x in dfs(depth + 1, s, 0, out):
yield x
out.pop()
out.append(s[depth])
for x in dfs(depth + 1, s, rem, out):
yield x
out.pop()
def insert_word(w):
cur = self.root
for i, c in enumerate(w):
if c.isupper():
cur = cur.children['?']
cur._not[c.lower()].add(''.join(w[i + 1:]))
break
else:
cur = cur.children[c]
cur.end = True
for d in dict:
for word in dfs(0, d, 1, []):
insert_word(word)
def search(self, word):
"""
Returns if there is any word in the trie that equals to the given word after modifying exactly one character
:type word: str
:rtype: bool
"""
def do_search(cur, depth, w):
if depth == len(w):
return cur.end
c = w[depth]
nxt = cur.children.get(c)
if nxt and do_search(nxt, depth + 1, w):
return True
nxt = cur.children.get('?')
if nxt:
for k, s in nxt._not.iteritems():
if k != c and w[depth + 1:] in s:
return True
return False
return do_search(self.root, 0, word)
# Your MagicDictionary object will be instantiated and called as such:
# obj = MagicDictionary()
# obj.buildDict(dict)
# param_2 = obj.search(word)
|
<commit_before><commit_msg>Add py solution for 676. Implement Magic Dictionary
676. Implement Magic Dictionary: https://leetcode.com/problems/implement-magic-dictionary/<commit_after>from collections import defaultdict
class Node(object):
def __init__(self):
self.end = False
self._not = defaultdict(set)
self.children = defaultdict(Node)
class MagicDictionary(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = Node()
def buildDict(self, dict):
"""
Build a dictionary through a list of words
:type dict: List[str]
:rtype: void
"""
def dfs(depth, s, rem, out):
if depth == len(s):
if rem == 0:
yield out
else:
if rem:
out.append(s[depth].upper())
for x in dfs(depth + 1, s, 0, out):
yield x
out.pop()
out.append(s[depth])
for x in dfs(depth + 1, s, rem, out):
yield x
out.pop()
def insert_word(w):
cur = self.root
for i, c in enumerate(w):
if c.isupper():
cur = cur.children['?']
cur._not[c.lower()].add(''.join(w[i + 1:]))
break
else:
cur = cur.children[c]
cur.end = True
for d in dict:
for word in dfs(0, d, 1, []):
insert_word(word)
def search(self, word):
"""
Returns if there is any word in the trie that equals to the given word after modifying exactly one character
:type word: str
:rtype: bool
"""
def do_search(cur, depth, w):
if depth == len(w):
return cur.end
c = w[depth]
nxt = cur.children.get(c)
if nxt and do_search(nxt, depth + 1, w):
return True
nxt = cur.children.get('?')
if nxt:
for k, s in nxt._not.iteritems():
if k != c and w[depth + 1:] in s:
return True
return False
return do_search(self.root, 0, word)
# Your MagicDictionary object will be instantiated and called as such:
# obj = MagicDictionary()
# obj.buildDict(dict)
# param_2 = obj.search(word)
|
|
df58e6332c47efdef34021d811d5101fe5c944ea
|
cat/management/commands/importcategories.py
|
cat/management/commands/importcategories.py
|
from django.core.management.base import BaseCommand, CommandError
from cat.models import Category
from openpyxl.reader.excel import load_workbook
def import_categories(filename):
wb = load_workbook(filename = filename)
categories_sheet = wb.get_sheet_by_name(name = 'Categories')
_import_sheet(categories_sheet, skip=("Equipment",))
equipment_sheet = wb.get_sheet_by_name(name="Equipment")
equipment = Category(name="Equipment")
equipment.save()
_import_sheet(equipment_sheet, parent=equipment)
def _import_sheet(sheet, skip=(), parent=None):
for column in sheet.columns:
title = column[0].value
if title in skip:
continue
print title
toplevel = Category(name=title)
toplevel.parent = parent
toplevel.save()
for cell in column[1:]:
if cell.value is None:
break
print " ", cell.value
cat = Category(name=cell.value)
cat.parent = toplevel
cat.save()
class Command(BaseCommand):
help = "Import categories from xlsx"
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("need exactly one argument for categories xlsx file")
filename, = args
import_categories(filename)
|
Add script for importing the new categories from excel.
|
Add script for importing the new categories from excel.
|
Python
|
bsd-3-clause
|
uq-eresearch/uqam,uq-eresearch/uqam,uq-eresearch/uqam,uq-eresearch/uqam
|
Add script for importing the new categories from excel.
|
from django.core.management.base import BaseCommand, CommandError
from cat.models import Category
from openpyxl.reader.excel import load_workbook
def import_categories(filename):
wb = load_workbook(filename = filename)
categories_sheet = wb.get_sheet_by_name(name = 'Categories')
_import_sheet(categories_sheet, skip=("Equipment",))
equipment_sheet = wb.get_sheet_by_name(name="Equipment")
equipment = Category(name="Equipment")
equipment.save()
_import_sheet(equipment_sheet, parent=equipment)
def _import_sheet(sheet, skip=(), parent=None):
for column in sheet.columns:
title = column[0].value
if title in skip:
continue
print title
toplevel = Category(name=title)
toplevel.parent = parent
toplevel.save()
for cell in column[1:]:
if cell.value is None:
break
print " ", cell.value
cat = Category(name=cell.value)
cat.parent = toplevel
cat.save()
class Command(BaseCommand):
help = "Import categories from xlsx"
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("need exactly one argument for categories xlsx file")
filename, = args
import_categories(filename)
|
<commit_before><commit_msg>Add script for importing the new categories from excel.<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from cat.models import Category
from openpyxl.reader.excel import load_workbook
def import_categories(filename):
wb = load_workbook(filename = filename)
categories_sheet = wb.get_sheet_by_name(name = 'Categories')
_import_sheet(categories_sheet, skip=("Equipment",))
equipment_sheet = wb.get_sheet_by_name(name="Equipment")
equipment = Category(name="Equipment")
equipment.save()
_import_sheet(equipment_sheet, parent=equipment)
def _import_sheet(sheet, skip=(), parent=None):
for column in sheet.columns:
title = column[0].value
if title in skip:
continue
print title
toplevel = Category(name=title)
toplevel.parent = parent
toplevel.save()
for cell in column[1:]:
if cell.value is None:
break
print " ", cell.value
cat = Category(name=cell.value)
cat.parent = toplevel
cat.save()
class Command(BaseCommand):
help = "Import categories from xlsx"
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("need exactly one argument for categories xlsx file")
filename, = args
import_categories(filename)
|
Add script for importing the new categories from excel.from django.core.management.base import BaseCommand, CommandError
from cat.models import Category
from openpyxl.reader.excel import load_workbook
def import_categories(filename):
wb = load_workbook(filename = filename)
categories_sheet = wb.get_sheet_by_name(name = 'Categories')
_import_sheet(categories_sheet, skip=("Equipment",))
equipment_sheet = wb.get_sheet_by_name(name="Equipment")
equipment = Category(name="Equipment")
equipment.save()
_import_sheet(equipment_sheet, parent=equipment)
def _import_sheet(sheet, skip=(), parent=None):
for column in sheet.columns:
title = column[0].value
if title in skip:
continue
print title
toplevel = Category(name=title)
toplevel.parent = parent
toplevel.save()
for cell in column[1:]:
if cell.value is None:
break
print " ", cell.value
cat = Category(name=cell.value)
cat.parent = toplevel
cat.save()
class Command(BaseCommand):
help = "Import categories from xlsx"
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("need exactly one argument for categories xlsx file")
filename, = args
import_categories(filename)
|
<commit_before><commit_msg>Add script for importing the new categories from excel.<commit_after>from django.core.management.base import BaseCommand, CommandError
from cat.models import Category
from openpyxl.reader.excel import load_workbook
def import_categories(filename):
wb = load_workbook(filename = filename)
categories_sheet = wb.get_sheet_by_name(name = 'Categories')
_import_sheet(categories_sheet, skip=("Equipment",))
equipment_sheet = wb.get_sheet_by_name(name="Equipment")
equipment = Category(name="Equipment")
equipment.save()
_import_sheet(equipment_sheet, parent=equipment)
def _import_sheet(sheet, skip=(), parent=None):
for column in sheet.columns:
title = column[0].value
if title in skip:
continue
print title
toplevel = Category(name=title)
toplevel.parent = parent
toplevel.save()
for cell in column[1:]:
if cell.value is None:
break
print " ", cell.value
cat = Category(name=cell.value)
cat.parent = toplevel
cat.save()
class Command(BaseCommand):
help = "Import categories from xlsx"
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("need exactly one argument for categories xlsx file")
filename, = args
import_categories(filename)
|
|
db3ff13c74c492341f11a8829c548fda79e34cc2
|
test/Win32/file-is-type-not-func.py
|
test/Win32/file-is-type-not-func.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
This test verifies that file isn't redefined incorrectly as a function on
Windows systems. It's a built-in type and should thus be inheritable.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import TestSCons
test = TestSCons.TestSCons()
if sys.platform != 'win32':
test.skip_test(
'Skipping file-type test on non-Windows platform: %s\n' % sys.platform)
test.write('SConstruct', """
env = Environment()
class AFile(file): pass
""")
# If file is redefined as a function, trying to inherit from it causes SCons to
# die.
test.run(arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Add test for issue-2857 fix.
|
Add test for issue-2857 fix.
The test makes sure that inheriting from file works as it should.
|
Python
|
mit
|
timj/scons,andrewyoung1991/scons,timj/scons,timj/scons,timj/scons,timj/scons,andrewyoung1991/scons,andrewyoung1991/scons,timj/scons,timj/scons,andrewyoung1991/scons,andrewyoung1991/scons,andrewyoung1991/scons,andrewyoung1991/scons,andrewyoung1991/scons,andrewyoung1991/scons,timj/scons,timj/scons
|
Add test for issue-2857 fix.
The test makes sure that inheriting from file works as it should.
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
This test verifies that file isn't redefined incorrectly as a function on
Windows systems. It's a built-in type and should thus be inheritable.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import TestSCons
test = TestSCons.TestSCons()
if sys.platform != 'win32':
test.skip_test(
'Skipping file-type test on non-Windows platform: %s\n' % sys.platform)
test.write('SConstruct', """
env = Environment()
class AFile(file): pass
""")
# If file is redefined as a function, trying to inherit from it causes SCons to
# die.
test.run(arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
<commit_before><commit_msg>Add test for issue-2857 fix.
The test makes sure that inheriting from file works as it should.<commit_after>
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
This test verifies that file isn't redefined incorrectly as a function on
Windows systems. It's a built-in type and should thus be inheritable.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import TestSCons
test = TestSCons.TestSCons()
if sys.platform != 'win32':
test.skip_test(
'Skipping file-type test on non-Windows platform: %s\n' % sys.platform)
test.write('SConstruct', """
env = Environment()
class AFile(file): pass
""")
# If file is redefined as a function, trying to inherit from it causes SCons to
# die.
test.run(arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Add test for issue-2857 fix.
The test makes sure that inheriting from file works as it should.#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
This test verifies that file isn't redefined incorrectly as a function on
Windows systems. It's a built-in type and should thus be inheritable.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import TestSCons
test = TestSCons.TestSCons()
if sys.platform != 'win32':
test.skip_test(
'Skipping file-type test on non-Windows platform: %s\n' % sys.platform)
test.write('SConstruct', """
env = Environment()
class AFile(file): pass
""")
# If file is redefined as a function, trying to inherit from it causes SCons to
# die.
test.run(arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
<commit_before><commit_msg>Add test for issue-2857 fix.
The test makes sure that inheriting from file works as it should.<commit_after>#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
This test verifies that file isn't redefined incorrectly as a function on
Windows systems. It's a built-in type and should thus be inheritable.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import TestSCons
test = TestSCons.TestSCons()
if sys.platform != 'win32':
test.skip_test(
'Skipping file-type test on non-Windows platform: %s\n' % sys.platform)
test.write('SConstruct', """
env = Environment()
class AFile(file): pass
""")
# If file is redefined as a function, trying to inherit from it causes SCons to
# die.
test.run(arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
fb65033cca916faf15158d79e870009eebb823b6
|
pyconca2017/pycon_schedule/migrations/0005_auto_20171005_0914.py
|
pyconca2017/pycon_schedule/migrations/0005_auto_20171005_0914.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-10-05 13:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pycon_schedule', '0004_auto_20171002_2324'),
]
operations = [
migrations.AddField(
model_name='slotevent',
name='content_fr',
field=models.TextField(blank=True, verbose_name='Content (FR)'),
),
migrations.AlterField(
model_name='slotevent',
name='content',
field=models.TextField(blank=True, verbose_name='Content (EN)'),
),
]
|
Add French language option to Slot content.
|
:sparkles: Add French language option to Slot content.
#59
|
Python
|
mit
|
pyconca/2017-web,pyconca/2017-web,pyconca/2017-web,pyconca/2017-web
|
:sparkles: Add French language option to Slot content.
#59
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-10-05 13:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pycon_schedule', '0004_auto_20171002_2324'),
]
operations = [
migrations.AddField(
model_name='slotevent',
name='content_fr',
field=models.TextField(blank=True, verbose_name='Content (FR)'),
),
migrations.AlterField(
model_name='slotevent',
name='content',
field=models.TextField(blank=True, verbose_name='Content (EN)'),
),
]
|
<commit_before><commit_msg>:sparkles: Add French language option to Slot content.
#59<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-10-05 13:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pycon_schedule', '0004_auto_20171002_2324'),
]
operations = [
migrations.AddField(
model_name='slotevent',
name='content_fr',
field=models.TextField(blank=True, verbose_name='Content (FR)'),
),
migrations.AlterField(
model_name='slotevent',
name='content',
field=models.TextField(blank=True, verbose_name='Content (EN)'),
),
]
|
:sparkles: Add French language option to Slot content.
#59# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-10-05 13:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pycon_schedule', '0004_auto_20171002_2324'),
]
operations = [
migrations.AddField(
model_name='slotevent',
name='content_fr',
field=models.TextField(blank=True, verbose_name='Content (FR)'),
),
migrations.AlterField(
model_name='slotevent',
name='content',
field=models.TextField(blank=True, verbose_name='Content (EN)'),
),
]
|
<commit_before><commit_msg>:sparkles: Add French language option to Slot content.
#59<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-10-05 13:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pycon_schedule', '0004_auto_20171002_2324'),
]
operations = [
migrations.AddField(
model_name='slotevent',
name='content_fr',
field=models.TextField(blank=True, verbose_name='Content (FR)'),
),
migrations.AlterField(
model_name='slotevent',
name='content',
field=models.TextField(blank=True, verbose_name='Content (EN)'),
),
]
|
|
b26053cb517aea3885e06c3cc702e5a4d3a595e6
|
pylayers/gis/test/test_convex.py
|
pylayers/gis/test/test_convex.py
|
from pylayers.gis.layout import *
import pylayers.util.geomutil as geu
L=Layout('WHERE1.ini')
L.build()
lc = [(n,L.Gt.node[n]['polyg'].isconvex()) for n in L.Gt.nodes()]
cnc = [n for n in L.Gt.nodes() if not L.Gt.node[n]['polyg'].isconvex()]
fig,ax=L.showG('st',labels=True)
for cy,c in lc:
if c:
print cy,
fig,ax= L.Gt.node[cy]['polyg'].plot(color='blue',alpha=0.5,fig=fig,ax=ax)
else:
fig, ax = L.Gt.node[cy]['polyg'].plot(color='red', alpha=0.5,fig=fig,ax=ax)
|
Add test convex in gis
|
Add test convex in gis
|
Python
|
mit
|
dialounke/pylayers,pylayers/pylayers,dialounke/pylayers,pylayers/pylayers
|
Add test convex in gis
|
from pylayers.gis.layout import *
import pylayers.util.geomutil as geu
L=Layout('WHERE1.ini')
L.build()
lc = [(n,L.Gt.node[n]['polyg'].isconvex()) for n in L.Gt.nodes()]
cnc = [n for n in L.Gt.nodes() if not L.Gt.node[n]['polyg'].isconvex()]
fig,ax=L.showG('st',labels=True)
for cy,c in lc:
if c:
print cy,
fig,ax= L.Gt.node[cy]['polyg'].plot(color='blue',alpha=0.5,fig=fig,ax=ax)
else:
fig, ax = L.Gt.node[cy]['polyg'].plot(color='red', alpha=0.5,fig=fig,ax=ax)
|
<commit_before><commit_msg>Add test convex in gis<commit_after>
|
from pylayers.gis.layout import *
import pylayers.util.geomutil as geu
L=Layout('WHERE1.ini')
L.build()
lc = [(n,L.Gt.node[n]['polyg'].isconvex()) for n in L.Gt.nodes()]
cnc = [n for n in L.Gt.nodes() if not L.Gt.node[n]['polyg'].isconvex()]
fig,ax=L.showG('st',labels=True)
for cy,c in lc:
if c:
print cy,
fig,ax= L.Gt.node[cy]['polyg'].plot(color='blue',alpha=0.5,fig=fig,ax=ax)
else:
fig, ax = L.Gt.node[cy]['polyg'].plot(color='red', alpha=0.5,fig=fig,ax=ax)
|
Add test convex in gisfrom pylayers.gis.layout import *
import pylayers.util.geomutil as geu
L=Layout('WHERE1.ini')
L.build()
lc = [(n,L.Gt.node[n]['polyg'].isconvex()) for n in L.Gt.nodes()]
cnc = [n for n in L.Gt.nodes() if not L.Gt.node[n]['polyg'].isconvex()]
fig,ax=L.showG('st',labels=True)
for cy,c in lc:
if c:
print cy,
fig,ax= L.Gt.node[cy]['polyg'].plot(color='blue',alpha=0.5,fig=fig,ax=ax)
else:
fig, ax = L.Gt.node[cy]['polyg'].plot(color='red', alpha=0.5,fig=fig,ax=ax)
|
<commit_before><commit_msg>Add test convex in gis<commit_after>from pylayers.gis.layout import *
import pylayers.util.geomutil as geu
L=Layout('WHERE1.ini')
L.build()
lc = [(n,L.Gt.node[n]['polyg'].isconvex()) for n in L.Gt.nodes()]
cnc = [n for n in L.Gt.nodes() if not L.Gt.node[n]['polyg'].isconvex()]
fig,ax=L.showG('st',labels=True)
for cy,c in lc:
if c:
print cy,
fig,ax= L.Gt.node[cy]['polyg'].plot(color='blue',alpha=0.5,fig=fig,ax=ax)
else:
fig, ax = L.Gt.node[cy]['polyg'].plot(color='red', alpha=0.5,fig=fig,ax=ax)
|
|
1d617b7b072f65e9ecb0baad87485b6d471edf31
|
tests/unit/test_tokenstorage.py
|
tests/unit/test_tokenstorage.py
|
import json
import pytest
from globus_sdk.tokenstorage import SimpleJSONFileAdapter, SQLiteAdapter
from globus_sdk.version import __version__ as sdkversion
def test_sqlite_reading_bad_config():
adapter = SQLiteAdapter(":memory:")
# inject bad data (array, needs to be dict)
# store_config does not check the input type, just uses json.dumps()
adapter.store_config("foo_conf", [])
with pytest.raises(ValueError, match="reading config data and got non-dict result"):
adapter.read_config("foo_conf")
def test_sqlite_reading_bad_token_data():
adapter = SQLiteAdapter(":memory:")
# inject bad data (array, needs to be dict)
adapter._connection.execute(
"""\
INSERT INTO token_storage(namespace, resource_server, token_data_json)
VALUES (?, ?, ?)""",
(adapter.namespace, "foo_rs", "[]"),
)
with pytest.raises(
ValueError, match="data error: token data was not saved as a dict"
):
adapter.get_token_data("foo_rs")
def test_simplejson_reading_bad_data(tmp_path):
# non-dict data at root
foo_file = tmp_path / "foo.json"
foo_file.write_text('["foobar"]')
foo_adapter = SimpleJSONFileAdapter(str(foo_file))
with pytest.raises(ValueError, match="reading from json file got non-dict data"):
foo_adapter.get_by_resource_server()
# non-dict data in 'by_rs'
bar_file = tmp_path / "bar.json"
bar_file.write_text(
json.dumps(
{"by_rs": [], "format_version": "1.0", "globus-sdk.version": sdkversion}
)
)
bar_adapter = SimpleJSONFileAdapter(str(bar_file))
with pytest.raises(ValueError, match="existing data file is malformed"):
bar_adapter.get_by_resource_server()
def test_simplejson_reading_unsupported_format_version(tmp_path):
# data appears valid, but lists a value for "format_version" which instructs the
# adapter explicitly that it is in a format which is unknown / not supported
foo_file = tmp_path / "foo.json"
foo_file.write_text(
json.dumps(
{"by_rs": {}, "format_version": "0.0", "globus-sdk.version": sdkversion}
)
)
adapter = SimpleJSONFileAdapter(str(foo_file))
with pytest.raises(ValueError, match="existing data file is in an unknown format"):
adapter.get_by_resource_server()
|
Add some unit tests to cover tokenstorage
|
Add some unit tests to cover tokenstorage
The tokenstorage adapters have several untested behaviors regarding
handling of malformed data. Add some unit tests which inject bad data,
to confirm that the handling code is triggered correctly with no
crashfails or other unexpected issues.
|
Python
|
apache-2.0
|
sirosen/globus-sdk-python,globus/globus-sdk-python,globus/globus-sdk-python
|
Add some unit tests to cover tokenstorage
The tokenstorage adapters have several untested behaviors regarding
handling of malformed data. Add some unit tests which inject bad data,
to confirm that the handling code is triggered correctly with no
crashfails or other unexpected issues.
|
import json
import pytest
from globus_sdk.tokenstorage import SimpleJSONFileAdapter, SQLiteAdapter
from globus_sdk.version import __version__ as sdkversion
def test_sqlite_reading_bad_config():
adapter = SQLiteAdapter(":memory:")
# inject bad data (array, needs to be dict)
# store_config does not check the input type, just uses json.dumps()
adapter.store_config("foo_conf", [])
with pytest.raises(ValueError, match="reading config data and got non-dict result"):
adapter.read_config("foo_conf")
def test_sqlite_reading_bad_token_data():
adapter = SQLiteAdapter(":memory:")
# inject bad data (array, needs to be dict)
adapter._connection.execute(
"""\
INSERT INTO token_storage(namespace, resource_server, token_data_json)
VALUES (?, ?, ?)""",
(adapter.namespace, "foo_rs", "[]"),
)
with pytest.raises(
ValueError, match="data error: token data was not saved as a dict"
):
adapter.get_token_data("foo_rs")
def test_simplejson_reading_bad_data(tmp_path):
# non-dict data at root
foo_file = tmp_path / "foo.json"
foo_file.write_text('["foobar"]')
foo_adapter = SimpleJSONFileAdapter(str(foo_file))
with pytest.raises(ValueError, match="reading from json file got non-dict data"):
foo_adapter.get_by_resource_server()
# non-dict data in 'by_rs'
bar_file = tmp_path / "bar.json"
bar_file.write_text(
json.dumps(
{"by_rs": [], "format_version": "1.0", "globus-sdk.version": sdkversion}
)
)
bar_adapter = SimpleJSONFileAdapter(str(bar_file))
with pytest.raises(ValueError, match="existing data file is malformed"):
bar_adapter.get_by_resource_server()
def test_simplejson_reading_unsupported_format_version(tmp_path):
# data appears valid, but lists a value for "format_version" which instructs the
# adapter explicitly that it is in a format which is unknown / not supported
foo_file = tmp_path / "foo.json"
foo_file.write_text(
json.dumps(
{"by_rs": {}, "format_version": "0.0", "globus-sdk.version": sdkversion}
)
)
adapter = SimpleJSONFileAdapter(str(foo_file))
with pytest.raises(ValueError, match="existing data file is in an unknown format"):
adapter.get_by_resource_server()
|
<commit_before><commit_msg>Add some unit tests to cover tokenstorage
The tokenstorage adapters have several untested behaviors regarding
handling of malformed data. Add some unit tests which inject bad data,
to confirm that the handling code is triggered correctly with no
crashfails or other unexpected issues.<commit_after>
|
import json
import pytest
from globus_sdk.tokenstorage import SimpleJSONFileAdapter, SQLiteAdapter
from globus_sdk.version import __version__ as sdkversion
def test_sqlite_reading_bad_config():
adapter = SQLiteAdapter(":memory:")
# inject bad data (array, needs to be dict)
# store_config does not check the input type, just uses json.dumps()
adapter.store_config("foo_conf", [])
with pytest.raises(ValueError, match="reading config data and got non-dict result"):
adapter.read_config("foo_conf")
def test_sqlite_reading_bad_token_data():
adapter = SQLiteAdapter(":memory:")
# inject bad data (array, needs to be dict)
adapter._connection.execute(
"""\
INSERT INTO token_storage(namespace, resource_server, token_data_json)
VALUES (?, ?, ?)""",
(adapter.namespace, "foo_rs", "[]"),
)
with pytest.raises(
ValueError, match="data error: token data was not saved as a dict"
):
adapter.get_token_data("foo_rs")
def test_simplejson_reading_bad_data(tmp_path):
# non-dict data at root
foo_file = tmp_path / "foo.json"
foo_file.write_text('["foobar"]')
foo_adapter = SimpleJSONFileAdapter(str(foo_file))
with pytest.raises(ValueError, match="reading from json file got non-dict data"):
foo_adapter.get_by_resource_server()
# non-dict data in 'by_rs'
bar_file = tmp_path / "bar.json"
bar_file.write_text(
json.dumps(
{"by_rs": [], "format_version": "1.0", "globus-sdk.version": sdkversion}
)
)
bar_adapter = SimpleJSONFileAdapter(str(bar_file))
with pytest.raises(ValueError, match="existing data file is malformed"):
bar_adapter.get_by_resource_server()
def test_simplejson_reading_unsupported_format_version(tmp_path):
# data appears valid, but lists a value for "format_version" which instructs the
# adapter explicitly that it is in a format which is unknown / not supported
foo_file = tmp_path / "foo.json"
foo_file.write_text(
json.dumps(
{"by_rs": {}, "format_version": "0.0", "globus-sdk.version": sdkversion}
)
)
adapter = SimpleJSONFileAdapter(str(foo_file))
with pytest.raises(ValueError, match="existing data file is in an unknown format"):
adapter.get_by_resource_server()
|
Add some unit tests to cover tokenstorage
The tokenstorage adapters have several untested behaviors regarding
handling of malformed data. Add some unit tests which inject bad data,
to confirm that the handling code is triggered correctly with no
crashfails or other unexpected issues.import json
import pytest
from globus_sdk.tokenstorage import SimpleJSONFileAdapter, SQLiteAdapter
from globus_sdk.version import __version__ as sdkversion
def test_sqlite_reading_bad_config():
adapter = SQLiteAdapter(":memory:")
# inject bad data (array, needs to be dict)
# store_config does not check the input type, just uses json.dumps()
adapter.store_config("foo_conf", [])
with pytest.raises(ValueError, match="reading config data and got non-dict result"):
adapter.read_config("foo_conf")
def test_sqlite_reading_bad_token_data():
adapter = SQLiteAdapter(":memory:")
# inject bad data (array, needs to be dict)
adapter._connection.execute(
"""\
INSERT INTO token_storage(namespace, resource_server, token_data_json)
VALUES (?, ?, ?)""",
(adapter.namespace, "foo_rs", "[]"),
)
with pytest.raises(
ValueError, match="data error: token data was not saved as a dict"
):
adapter.get_token_data("foo_rs")
def test_simplejson_reading_bad_data(tmp_path):
# non-dict data at root
foo_file = tmp_path / "foo.json"
foo_file.write_text('["foobar"]')
foo_adapter = SimpleJSONFileAdapter(str(foo_file))
with pytest.raises(ValueError, match="reading from json file got non-dict data"):
foo_adapter.get_by_resource_server()
# non-dict data in 'by_rs'
bar_file = tmp_path / "bar.json"
bar_file.write_text(
json.dumps(
{"by_rs": [], "format_version": "1.0", "globus-sdk.version": sdkversion}
)
)
bar_adapter = SimpleJSONFileAdapter(str(bar_file))
with pytest.raises(ValueError, match="existing data file is malformed"):
bar_adapter.get_by_resource_server()
def test_simplejson_reading_unsupported_format_version(tmp_path):
# data appears valid, but lists a value for "format_version" which instructs the
# adapter explicitly that it is in a format which is unknown / not supported
foo_file = tmp_path / "foo.json"
foo_file.write_text(
json.dumps(
{"by_rs": {}, "format_version": "0.0", "globus-sdk.version": sdkversion}
)
)
adapter = SimpleJSONFileAdapter(str(foo_file))
with pytest.raises(ValueError, match="existing data file is in an unknown format"):
adapter.get_by_resource_server()
|
<commit_before><commit_msg>Add some unit tests to cover tokenstorage
The tokenstorage adapters have several untested behaviors regarding
handling of malformed data. Add some unit tests which inject bad data,
to confirm that the handling code is triggered correctly with no
crashfails or other unexpected issues.<commit_after>import json
import pytest
from globus_sdk.tokenstorage import SimpleJSONFileAdapter, SQLiteAdapter
from globus_sdk.version import __version__ as sdkversion
def test_sqlite_reading_bad_config():
adapter = SQLiteAdapter(":memory:")
# inject bad data (array, needs to be dict)
# store_config does not check the input type, just uses json.dumps()
adapter.store_config("foo_conf", [])
with pytest.raises(ValueError, match="reading config data and got non-dict result"):
adapter.read_config("foo_conf")
def test_sqlite_reading_bad_token_data():
adapter = SQLiteAdapter(":memory:")
# inject bad data (array, needs to be dict)
adapter._connection.execute(
"""\
INSERT INTO token_storage(namespace, resource_server, token_data_json)
VALUES (?, ?, ?)""",
(adapter.namespace, "foo_rs", "[]"),
)
with pytest.raises(
ValueError, match="data error: token data was not saved as a dict"
):
adapter.get_token_data("foo_rs")
def test_simplejson_reading_bad_data(tmp_path):
# non-dict data at root
foo_file = tmp_path / "foo.json"
foo_file.write_text('["foobar"]')
foo_adapter = SimpleJSONFileAdapter(str(foo_file))
with pytest.raises(ValueError, match="reading from json file got non-dict data"):
foo_adapter.get_by_resource_server()
# non-dict data in 'by_rs'
bar_file = tmp_path / "bar.json"
bar_file.write_text(
json.dumps(
{"by_rs": [], "format_version": "1.0", "globus-sdk.version": sdkversion}
)
)
bar_adapter = SimpleJSONFileAdapter(str(bar_file))
with pytest.raises(ValueError, match="existing data file is malformed"):
bar_adapter.get_by_resource_server()
def test_simplejson_reading_unsupported_format_version(tmp_path):
# data appears valid, but lists a value for "format_version" which instructs the
# adapter explicitly that it is in a format which is unknown / not supported
foo_file = tmp_path / "foo.json"
foo_file.write_text(
json.dumps(
{"by_rs": {}, "format_version": "0.0", "globus-sdk.version": sdkversion}
)
)
adapter = SimpleJSONFileAdapter(str(foo_file))
with pytest.raises(ValueError, match="existing data file is in an unknown format"):
adapter.get_by_resource_server()
|
|
974739822bbe80c8841c3aceafd3a49d29b5c1dc
|
custom/icds_reports/management/commands/rebuild_ccs_record_ucr.py
|
custom/icds_reports/management/commands/rebuild_ccs_record_ucr.py
|
from __future__ import absolute_import
from __future__ import unicode_literals
import time
from django.core.management.base import BaseCommand
from corehq.apps.userreports.tasks import _get_config_by_id, _build_indicators
from corehq.apps.change_feed.data_sources import get_document_store_for_doc_type
from custom.icds_reports.models.aggregate import CcsRecordMonthly
from pillowtop.dao.couch import ID_CHUNK_SIZE
class Command(BaseCommand):
help = "Rebuild ccs record cases ucr for pnc_complete"
def handle(self):
indicator_config_id = 'static-ccs_record_cases'
case_type_of_xmlns = 'ccs_record'
config = _get_config_by_id(indicator_config_id)
document_store = get_document_store_for_doc_type(
config.domain, config.referenced_doc_type, case_type_or_xmlns=case_type_of_xmlns
)
doc_ids = CcsRecordMonthly.objects.filter(pnc_complete=1).values('case_id')
relevant_ids = list()
next_event = time.time() + 10
for doc_id in doc_ids:
relevant_ids.append(doc_id['case_id'])
if len(relevant_ids) >= ID_CHUNK_SIZE:
_build_indicators(config, document_store, relevant_ids)
relevant_ids = []
if time.time() > next_event:
print("processed till case %s" % (doc_id['case_id']))
next_event = time.time() + 10
if relevant_ids:
_build_indicators(config, document_store, relevant_ids)
|
Add Mgt command to rebuild ccs records rebuild ccs record cases for pnc_complete=1
|
Add Mgt command to rebuild ccs records
rebuild ccs record cases for pnc_complete=1
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add Mgt command to rebuild ccs records
rebuild ccs record cases for pnc_complete=1
|
from __future__ import absolute_import
from __future__ import unicode_literals
import time
from django.core.management.base import BaseCommand
from corehq.apps.userreports.tasks import _get_config_by_id, _build_indicators
from corehq.apps.change_feed.data_sources import get_document_store_for_doc_type
from custom.icds_reports.models.aggregate import CcsRecordMonthly
from pillowtop.dao.couch import ID_CHUNK_SIZE
class Command(BaseCommand):
help = "Rebuild ccs record cases ucr for pnc_complete"
def handle(self):
indicator_config_id = 'static-ccs_record_cases'
case_type_of_xmlns = 'ccs_record'
config = _get_config_by_id(indicator_config_id)
document_store = get_document_store_for_doc_type(
config.domain, config.referenced_doc_type, case_type_or_xmlns=case_type_of_xmlns
)
doc_ids = CcsRecordMonthly.objects.filter(pnc_complete=1).values('case_id')
relevant_ids = list()
next_event = time.time() + 10
for doc_id in doc_ids:
relevant_ids.append(doc_id['case_id'])
if len(relevant_ids) >= ID_CHUNK_SIZE:
_build_indicators(config, document_store, relevant_ids)
relevant_ids = []
if time.time() > next_event:
print("processed till case %s" % (doc_id['case_id']))
next_event = time.time() + 10
if relevant_ids:
_build_indicators(config, document_store, relevant_ids)
|
<commit_before><commit_msg>Add Mgt command to rebuild ccs records
rebuild ccs record cases for pnc_complete=1<commit_after>
|
from __future__ import absolute_import
from __future__ import unicode_literals
import time
from django.core.management.base import BaseCommand
from corehq.apps.userreports.tasks import _get_config_by_id, _build_indicators
from corehq.apps.change_feed.data_sources import get_document_store_for_doc_type
from custom.icds_reports.models.aggregate import CcsRecordMonthly
from pillowtop.dao.couch import ID_CHUNK_SIZE
class Command(BaseCommand):
help = "Rebuild ccs record cases ucr for pnc_complete"
def handle(self):
indicator_config_id = 'static-ccs_record_cases'
case_type_of_xmlns = 'ccs_record'
config = _get_config_by_id(indicator_config_id)
document_store = get_document_store_for_doc_type(
config.domain, config.referenced_doc_type, case_type_or_xmlns=case_type_of_xmlns
)
doc_ids = CcsRecordMonthly.objects.filter(pnc_complete=1).values('case_id')
relevant_ids = list()
next_event = time.time() + 10
for doc_id in doc_ids:
relevant_ids.append(doc_id['case_id'])
if len(relevant_ids) >= ID_CHUNK_SIZE:
_build_indicators(config, document_store, relevant_ids)
relevant_ids = []
if time.time() > next_event:
print("processed till case %s" % (doc_id['case_id']))
next_event = time.time() + 10
if relevant_ids:
_build_indicators(config, document_store, relevant_ids)
|
Add Mgt command to rebuild ccs records
rebuild ccs record cases for pnc_complete=1from __future__ import absolute_import
from __future__ import unicode_literals
import time
from django.core.management.base import BaseCommand
from corehq.apps.userreports.tasks import _get_config_by_id, _build_indicators
from corehq.apps.change_feed.data_sources import get_document_store_for_doc_type
from custom.icds_reports.models.aggregate import CcsRecordMonthly
from pillowtop.dao.couch import ID_CHUNK_SIZE
class Command(BaseCommand):
help = "Rebuild ccs record cases ucr for pnc_complete"
def handle(self):
indicator_config_id = 'static-ccs_record_cases'
case_type_of_xmlns = 'ccs_record'
config = _get_config_by_id(indicator_config_id)
document_store = get_document_store_for_doc_type(
config.domain, config.referenced_doc_type, case_type_or_xmlns=case_type_of_xmlns
)
doc_ids = CcsRecordMonthly.objects.filter(pnc_complete=1).values('case_id')
relevant_ids = list()
next_event = time.time() + 10
for doc_id in doc_ids:
relevant_ids.append(doc_id['case_id'])
if len(relevant_ids) >= ID_CHUNK_SIZE:
_build_indicators(config, document_store, relevant_ids)
relevant_ids = []
if time.time() > next_event:
print("processed till case %s" % (doc_id['case_id']))
next_event = time.time() + 10
if relevant_ids:
_build_indicators(config, document_store, relevant_ids)
|
<commit_before><commit_msg>Add Mgt command to rebuild ccs records
rebuild ccs record cases for pnc_complete=1<commit_after>from __future__ import absolute_import
from __future__ import unicode_literals
import time
from django.core.management.base import BaseCommand
from corehq.apps.userreports.tasks import _get_config_by_id, _build_indicators
from corehq.apps.change_feed.data_sources import get_document_store_for_doc_type
from custom.icds_reports.models.aggregate import CcsRecordMonthly
from pillowtop.dao.couch import ID_CHUNK_SIZE
class Command(BaseCommand):
help = "Rebuild ccs record cases ucr for pnc_complete"
def handle(self):
indicator_config_id = 'static-ccs_record_cases'
case_type_of_xmlns = 'ccs_record'
config = _get_config_by_id(indicator_config_id)
document_store = get_document_store_for_doc_type(
config.domain, config.referenced_doc_type, case_type_or_xmlns=case_type_of_xmlns
)
doc_ids = CcsRecordMonthly.objects.filter(pnc_complete=1).values('case_id')
relevant_ids = list()
next_event = time.time() + 10
for doc_id in doc_ids:
relevant_ids.append(doc_id['case_id'])
if len(relevant_ids) >= ID_CHUNK_SIZE:
_build_indicators(config, document_store, relevant_ids)
relevant_ids = []
if time.time() > next_event:
print("processed till case %s" % (doc_id['case_id']))
next_event = time.time() + 10
if relevant_ids:
_build_indicators(config, document_store, relevant_ids)
|
|
a5c8ab3637ccf748dce8ea8d8941ba2dc45c0086
|
wikipendium/jitishcron/tasks.py
|
wikipendium/jitishcron/tasks.py
|
from wikipendium.jitishcron.decorators import task
from wikipendium import settings
import subprocess
twenty_four_hours_in_seconds = 60 * 60 * 24
@task(min_interval_in_seconds=twenty_four_hours_in_seconds)
def database_backup():
if not settings.DEBUG:
subprocess.Popen(['venv/bin/python', 'manage.py', 'backup-to-s3'])
|
Add database backup JITishCron task
|
Add database backup JITishCron task
Now the database will be backed up once per day, given activity on the
site. The task is run asynchronously using subprocess.Popen.
This fixes #46.
|
Python
|
apache-2.0
|
stianjensen/wikipendium.no,stianjensen/wikipendium.no,stianjensen/wikipendium.no
|
Add database backup JITishCron task
Now the database will be backed up once per day, given activity on the
site. The task is run asynchronously using subprocess.Popen.
This fixes #46.
|
from wikipendium.jitishcron.decorators import task
from wikipendium import settings
import subprocess
twenty_four_hours_in_seconds = 60 * 60 * 24
@task(min_interval_in_seconds=twenty_four_hours_in_seconds)
def database_backup():
if not settings.DEBUG:
subprocess.Popen(['venv/bin/python', 'manage.py', 'backup-to-s3'])
|
<commit_before><commit_msg>Add database backup JITishCron task
Now the database will be backed up once per day, given activity on the
site. The task is run asynchronously using subprocess.Popen.
This fixes #46.<commit_after>
|
from wikipendium.jitishcron.decorators import task
from wikipendium import settings
import subprocess
twenty_four_hours_in_seconds = 60 * 60 * 24
@task(min_interval_in_seconds=twenty_four_hours_in_seconds)
def database_backup():
if not settings.DEBUG:
subprocess.Popen(['venv/bin/python', 'manage.py', 'backup-to-s3'])
|
Add database backup JITishCron task
Now the database will be backed up once per day, given activity on the
site. The task is run asynchronously using subprocess.Popen.
This fixes #46.from wikipendium.jitishcron.decorators import task
from wikipendium import settings
import subprocess
twenty_four_hours_in_seconds = 60 * 60 * 24
@task(min_interval_in_seconds=twenty_four_hours_in_seconds)
def database_backup():
if not settings.DEBUG:
subprocess.Popen(['venv/bin/python', 'manage.py', 'backup-to-s3'])
|
<commit_before><commit_msg>Add database backup JITishCron task
Now the database will be backed up once per day, given activity on the
site. The task is run asynchronously using subprocess.Popen.
This fixes #46.<commit_after>from wikipendium.jitishcron.decorators import task
from wikipendium import settings
import subprocess
twenty_four_hours_in_seconds = 60 * 60 * 24
@task(min_interval_in_seconds=twenty_four_hours_in_seconds)
def database_backup():
if not settings.DEBUG:
subprocess.Popen(['venv/bin/python', 'manage.py', 'backup-to-s3'])
|
|
f50c5d683cc46c08d887afc8fb3a167edd6bc8fd
|
polling_stations/apps/data_collection/management/commands/import_ceredigion.py
|
polling_stations/apps/data_collection/management/commands/import_ceredigion.py
|
"""
Import Ceredigion
note: this script takes quite a long time to run
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseAddressCsvImporter
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Ceredigion
"""
council_id = 'W06000008'
addresses_name = 'Ceredigion_Addresses_processed.csv'
stations_name = 'Ceredigion_Polling_Stations_processed.csv'
srid = 27700
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
address = "\n".join([
record.address1,
record.address2,
record.address3,
record.address4
])
location = Point(float(record.x_coordinate), float(record.y_coordinate), srid=self.get_srid())
return {
'internal_council_id': record.polling_station_id,
'postcode' : record.postcode,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
address = "\n".join([
record.address1,
record.street_description,
record.town_name,
record.administrative_area
])
return {
'address' : address,
'postcode' : record.postcode_locator,
'polling_station_id': record.polling_station_id
}
|
Add import script for Ceredigion
|
Add import script for Ceredigion
|
Python
|
bsd-3-clause
|
chris48s/UK-Polling-Stations,andylolz/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,andylolz/UK-Polling-Stations,chris48s/UK-Polling-Stations,andylolz/UK-Polling-Stations
|
Add import script for Ceredigion
|
"""
Import Ceredigion
note: this script takes quite a long time to run
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseAddressCsvImporter
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Ceredigion
"""
council_id = 'W06000008'
addresses_name = 'Ceredigion_Addresses_processed.csv'
stations_name = 'Ceredigion_Polling_Stations_processed.csv'
srid = 27700
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
address = "\n".join([
record.address1,
record.address2,
record.address3,
record.address4
])
location = Point(float(record.x_coordinate), float(record.y_coordinate), srid=self.get_srid())
return {
'internal_council_id': record.polling_station_id,
'postcode' : record.postcode,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
address = "\n".join([
record.address1,
record.street_description,
record.town_name,
record.administrative_area
])
return {
'address' : address,
'postcode' : record.postcode_locator,
'polling_station_id': record.polling_station_id
}
|
<commit_before><commit_msg>Add import script for Ceredigion<commit_after>
|
"""
Import Ceredigion
note: this script takes quite a long time to run
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseAddressCsvImporter
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Ceredigion
"""
council_id = 'W06000008'
addresses_name = 'Ceredigion_Addresses_processed.csv'
stations_name = 'Ceredigion_Polling_Stations_processed.csv'
srid = 27700
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
address = "\n".join([
record.address1,
record.address2,
record.address3,
record.address4
])
location = Point(float(record.x_coordinate), float(record.y_coordinate), srid=self.get_srid())
return {
'internal_council_id': record.polling_station_id,
'postcode' : record.postcode,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
address = "\n".join([
record.address1,
record.street_description,
record.town_name,
record.administrative_area
])
return {
'address' : address,
'postcode' : record.postcode_locator,
'polling_station_id': record.polling_station_id
}
|
Add import script for Ceredigion"""
Import Ceredigion
note: this script takes quite a long time to run
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseAddressCsvImporter
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Ceredigion
"""
council_id = 'W06000008'
addresses_name = 'Ceredigion_Addresses_processed.csv'
stations_name = 'Ceredigion_Polling_Stations_processed.csv'
srid = 27700
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
address = "\n".join([
record.address1,
record.address2,
record.address3,
record.address4
])
location = Point(float(record.x_coordinate), float(record.y_coordinate), srid=self.get_srid())
return {
'internal_council_id': record.polling_station_id,
'postcode' : record.postcode,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
address = "\n".join([
record.address1,
record.street_description,
record.town_name,
record.administrative_area
])
return {
'address' : address,
'postcode' : record.postcode_locator,
'polling_station_id': record.polling_station_id
}
|
<commit_before><commit_msg>Add import script for Ceredigion<commit_after>"""
Import Ceredigion
note: this script takes quite a long time to run
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseAddressCsvImporter
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Ceredigion
"""
council_id = 'W06000008'
addresses_name = 'Ceredigion_Addresses_processed.csv'
stations_name = 'Ceredigion_Polling_Stations_processed.csv'
srid = 27700
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
address = "\n".join([
record.address1,
record.address2,
record.address3,
record.address4
])
location = Point(float(record.x_coordinate), float(record.y_coordinate), srid=self.get_srid())
return {
'internal_council_id': record.polling_station_id,
'postcode' : record.postcode,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
address = "\n".join([
record.address1,
record.street_description,
record.town_name,
record.administrative_area
])
return {
'address' : address,
'postcode' : record.postcode_locator,
'polling_station_id': record.polling_station_id
}
|
|
6ca1fc012e1fb2a3bfc231af1bab3edfba6a30d0
|
pinax/stripe/tests/test_hooks.py
|
pinax/stripe/tests/test_hooks.py
|
import decimal
from django.test import TestCase
from django.contrib.auth import get_user_model
from mock import patch
from ..hooks import DefaultHookSet
from ..proxies import ChargeProxy, CustomerProxy
class HooksTestCase(TestCase):
def setUp(self):
self.User = get_user_model()
self.user = self.User.objects.create_user(
username="patrick",
email="paltman@eldarion.com"
)
self.customer = CustomerProxy.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
self.hookset = DefaultHookSet()
def test_adjust_subscription_quantity(self):
new_qty = self.hookset.adjust_subscription_quantity(customer=None, plan=None, quantity=3)
self.assertEquals(new_qty, 3)
def test_adjust_subscription_quantity_none(self):
new_qty = self.hookset.adjust_subscription_quantity(customer=None, plan=None, quantity=None)
self.assertEquals(new_qty, 1)
def test_trial_period(self):
period = self.hookset.trial_period(self.user, "some plan")
self.assertIsNone(period)
def test_send_receipt(self):
charge = ChargeProxy.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
source="card_01",
amount=decimal.Decimal("10.00"),
currency="usd",
paid=True,
refunded=False,
disputed=False,
receipt_sent=False
)
self.hookset.send_receipt(charge)
self.assertTrue(ChargeProxy.objects.get(pk=charge.pk).receipt_sent)
def test_send_receipt_already_sent(self):
charge = ChargeProxy.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
source="card_01",
amount=decimal.Decimal("10.00"),
currency="usd",
paid=True,
refunded=False,
disputed=False,
receipt_sent=True
)
self.hookset.send_receipt(charge)
self.assertTrue(ChargeProxy.objects.get(pk=charge.pk).receipt_sent)
|
Add some tests for hooks
|
Add some tests for hooks
|
Python
|
mit
|
pinax/django-stripe-payments
|
Add some tests for hooks
|
import decimal
from django.test import TestCase
from django.contrib.auth import get_user_model
from mock import patch
from ..hooks import DefaultHookSet
from ..proxies import ChargeProxy, CustomerProxy
class HooksTestCase(TestCase):
def setUp(self):
self.User = get_user_model()
self.user = self.User.objects.create_user(
username="patrick",
email="paltman@eldarion.com"
)
self.customer = CustomerProxy.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
self.hookset = DefaultHookSet()
def test_adjust_subscription_quantity(self):
new_qty = self.hookset.adjust_subscription_quantity(customer=None, plan=None, quantity=3)
self.assertEquals(new_qty, 3)
def test_adjust_subscription_quantity_none(self):
new_qty = self.hookset.adjust_subscription_quantity(customer=None, plan=None, quantity=None)
self.assertEquals(new_qty, 1)
def test_trial_period(self):
period = self.hookset.trial_period(self.user, "some plan")
self.assertIsNone(period)
def test_send_receipt(self):
charge = ChargeProxy.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
source="card_01",
amount=decimal.Decimal("10.00"),
currency="usd",
paid=True,
refunded=False,
disputed=False,
receipt_sent=False
)
self.hookset.send_receipt(charge)
self.assertTrue(ChargeProxy.objects.get(pk=charge.pk).receipt_sent)
def test_send_receipt_already_sent(self):
charge = ChargeProxy.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
source="card_01",
amount=decimal.Decimal("10.00"),
currency="usd",
paid=True,
refunded=False,
disputed=False,
receipt_sent=True
)
self.hookset.send_receipt(charge)
self.assertTrue(ChargeProxy.objects.get(pk=charge.pk).receipt_sent)
|
<commit_before><commit_msg>Add some tests for hooks<commit_after>
|
import decimal
from django.test import TestCase
from django.contrib.auth import get_user_model
from mock import patch
from ..hooks import DefaultHookSet
from ..proxies import ChargeProxy, CustomerProxy
class HooksTestCase(TestCase):
def setUp(self):
self.User = get_user_model()
self.user = self.User.objects.create_user(
username="patrick",
email="paltman@eldarion.com"
)
self.customer = CustomerProxy.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
self.hookset = DefaultHookSet()
def test_adjust_subscription_quantity(self):
new_qty = self.hookset.adjust_subscription_quantity(customer=None, plan=None, quantity=3)
self.assertEquals(new_qty, 3)
def test_adjust_subscription_quantity_none(self):
new_qty = self.hookset.adjust_subscription_quantity(customer=None, plan=None, quantity=None)
self.assertEquals(new_qty, 1)
def test_trial_period(self):
period = self.hookset.trial_period(self.user, "some plan")
self.assertIsNone(period)
def test_send_receipt(self):
charge = ChargeProxy.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
source="card_01",
amount=decimal.Decimal("10.00"),
currency="usd",
paid=True,
refunded=False,
disputed=False,
receipt_sent=False
)
self.hookset.send_receipt(charge)
self.assertTrue(ChargeProxy.objects.get(pk=charge.pk).receipt_sent)
def test_send_receipt_already_sent(self):
charge = ChargeProxy.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
source="card_01",
amount=decimal.Decimal("10.00"),
currency="usd",
paid=True,
refunded=False,
disputed=False,
receipt_sent=True
)
self.hookset.send_receipt(charge)
self.assertTrue(ChargeProxy.objects.get(pk=charge.pk).receipt_sent)
|
Add some tests for hooksimport decimal
from django.test import TestCase
from django.contrib.auth import get_user_model
from mock import patch
from ..hooks import DefaultHookSet
from ..proxies import ChargeProxy, CustomerProxy
class HooksTestCase(TestCase):
def setUp(self):
self.User = get_user_model()
self.user = self.User.objects.create_user(
username="patrick",
email="paltman@eldarion.com"
)
self.customer = CustomerProxy.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
self.hookset = DefaultHookSet()
def test_adjust_subscription_quantity(self):
new_qty = self.hookset.adjust_subscription_quantity(customer=None, plan=None, quantity=3)
self.assertEquals(new_qty, 3)
def test_adjust_subscription_quantity_none(self):
new_qty = self.hookset.adjust_subscription_quantity(customer=None, plan=None, quantity=None)
self.assertEquals(new_qty, 1)
def test_trial_period(self):
period = self.hookset.trial_period(self.user, "some plan")
self.assertIsNone(period)
def test_send_receipt(self):
charge = ChargeProxy.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
source="card_01",
amount=decimal.Decimal("10.00"),
currency="usd",
paid=True,
refunded=False,
disputed=False,
receipt_sent=False
)
self.hookset.send_receipt(charge)
self.assertTrue(ChargeProxy.objects.get(pk=charge.pk).receipt_sent)
def test_send_receipt_already_sent(self):
charge = ChargeProxy.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
source="card_01",
amount=decimal.Decimal("10.00"),
currency="usd",
paid=True,
refunded=False,
disputed=False,
receipt_sent=True
)
self.hookset.send_receipt(charge)
self.assertTrue(ChargeProxy.objects.get(pk=charge.pk).receipt_sent)
|
<commit_before><commit_msg>Add some tests for hooks<commit_after>import decimal
from django.test import TestCase
from django.contrib.auth import get_user_model
from mock import patch
from ..hooks import DefaultHookSet
from ..proxies import ChargeProxy, CustomerProxy
class HooksTestCase(TestCase):
def setUp(self):
self.User = get_user_model()
self.user = self.User.objects.create_user(
username="patrick",
email="paltman@eldarion.com"
)
self.customer = CustomerProxy.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx"
)
self.hookset = DefaultHookSet()
def test_adjust_subscription_quantity(self):
new_qty = self.hookset.adjust_subscription_quantity(customer=None, plan=None, quantity=3)
self.assertEquals(new_qty, 3)
def test_adjust_subscription_quantity_none(self):
new_qty = self.hookset.adjust_subscription_quantity(customer=None, plan=None, quantity=None)
self.assertEquals(new_qty, 1)
def test_trial_period(self):
period = self.hookset.trial_period(self.user, "some plan")
self.assertIsNone(period)
def test_send_receipt(self):
charge = ChargeProxy.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
source="card_01",
amount=decimal.Decimal("10.00"),
currency="usd",
paid=True,
refunded=False,
disputed=False,
receipt_sent=False
)
self.hookset.send_receipt(charge)
self.assertTrue(ChargeProxy.objects.get(pk=charge.pk).receipt_sent)
def test_send_receipt_already_sent(self):
charge = ChargeProxy.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
source="card_01",
amount=decimal.Decimal("10.00"),
currency="usd",
paid=True,
refunded=False,
disputed=False,
receipt_sent=True
)
self.hookset.send_receipt(charge)
self.assertTrue(ChargeProxy.objects.get(pk=charge.pk).receipt_sent)
|
|
74f4359ce3afddc3fab122e0aa741a4b0b276821
|
spreadflow_core/test/test_eventdispatcher.py
|
spreadflow_core/test/test_eventdispatcher.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from mock import Mock
from testtools import TestCase
from spreadflow_core.eventdispatcher import EventDispatcher
class TestEvent(object):
pass
class OtherEvent(object):
pass
class EventDispatcherTestCase(TestCase):
def test_manage_listeners(self):
dispatcher = EventDispatcher()
listeners = list(dispatcher.get_listeners(TestEvent))
self.assertEqual(len(listeners), 0)
test_callback_prio_0_cb_0 = Mock()
test_callback_prio_0_cb_1 = Mock()
test_callback_prio_1_cb_0 = Mock()
other_callback_prio_2_cb_0 = Mock()
# Register callbacks, first priority 1 ...
key_prio_1_cb_0 = dispatcher.add_listener(TestEvent, 1, test_callback_prio_1_cb_0)
# ... aftwerwards priority 0 ...
key_prio_0_cb_0 = dispatcher.add_listener(TestEvent, 0, test_callback_prio_0_cb_0)
key_prio_0_cb_1 = dispatcher.add_listener(TestEvent, 0, test_callback_prio_0_cb_1)
# ... and finally priority 2 for another event.
key_prio_2_cb_0 = dispatcher.add_listener(OtherEvent, 2, other_callback_prio_2_cb_0)
# Collect callbacks from listeners list.
actual_handlers = []
for priority, group in dispatcher.get_listeners(TestEvent):
callbacks = [handler.callback for key, handler in group]
actual_handlers.append((priority, callbacks))
expected_handlers = [
(0, [test_callback_prio_0_cb_0, test_callback_prio_0_cb_1]),
(1, [test_callback_prio_1_cb_0]),
]
self.assertEqual(expected_handlers, actual_handlers)
# Remove one listener.
dispatcher.remove_listener(key_prio_0_cb_0)
# Collect callbacks from listeners list.
actual_handlers = []
for priority, group in dispatcher.get_listeners(TestEvent):
callbacks = [handler.callback for key, handler in group]
actual_handlers.append((priority, callbacks))
expected_handlers = [
(0, [test_callback_prio_0_cb_1]),
(1, [test_callback_prio_1_cb_0]),
]
self.assertEqual(expected_handlers, actual_handlers)
|
Add some tests for event dispatcher
|
Add some tests for event dispatcher
|
Python
|
mit
|
znerol/spreadflow-core,spreadflow/spreadflow-core
|
Add some tests for event dispatcher
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from mock import Mock
from testtools import TestCase
from spreadflow_core.eventdispatcher import EventDispatcher
class TestEvent(object):
pass
class OtherEvent(object):
pass
class EventDispatcherTestCase(TestCase):
def test_manage_listeners(self):
dispatcher = EventDispatcher()
listeners = list(dispatcher.get_listeners(TestEvent))
self.assertEqual(len(listeners), 0)
test_callback_prio_0_cb_0 = Mock()
test_callback_prio_0_cb_1 = Mock()
test_callback_prio_1_cb_0 = Mock()
other_callback_prio_2_cb_0 = Mock()
# Register callbacks, first priority 1 ...
key_prio_1_cb_0 = dispatcher.add_listener(TestEvent, 1, test_callback_prio_1_cb_0)
# ... aftwerwards priority 0 ...
key_prio_0_cb_0 = dispatcher.add_listener(TestEvent, 0, test_callback_prio_0_cb_0)
key_prio_0_cb_1 = dispatcher.add_listener(TestEvent, 0, test_callback_prio_0_cb_1)
# ... and finally priority 2 for another event.
key_prio_2_cb_0 = dispatcher.add_listener(OtherEvent, 2, other_callback_prio_2_cb_0)
# Collect callbacks from listeners list.
actual_handlers = []
for priority, group in dispatcher.get_listeners(TestEvent):
callbacks = [handler.callback for key, handler in group]
actual_handlers.append((priority, callbacks))
expected_handlers = [
(0, [test_callback_prio_0_cb_0, test_callback_prio_0_cb_1]),
(1, [test_callback_prio_1_cb_0]),
]
self.assertEqual(expected_handlers, actual_handlers)
# Remove one listener.
dispatcher.remove_listener(key_prio_0_cb_0)
# Collect callbacks from listeners list.
actual_handlers = []
for priority, group in dispatcher.get_listeners(TestEvent):
callbacks = [handler.callback for key, handler in group]
actual_handlers.append((priority, callbacks))
expected_handlers = [
(0, [test_callback_prio_0_cb_1]),
(1, [test_callback_prio_1_cb_0]),
]
self.assertEqual(expected_handlers, actual_handlers)
|
<commit_before><commit_msg>Add some tests for event dispatcher<commit_after>
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from mock import Mock
from testtools import TestCase
from spreadflow_core.eventdispatcher import EventDispatcher
class TestEvent(object):
pass
class OtherEvent(object):
pass
class EventDispatcherTestCase(TestCase):
def test_manage_listeners(self):
dispatcher = EventDispatcher()
listeners = list(dispatcher.get_listeners(TestEvent))
self.assertEqual(len(listeners), 0)
test_callback_prio_0_cb_0 = Mock()
test_callback_prio_0_cb_1 = Mock()
test_callback_prio_1_cb_0 = Mock()
other_callback_prio_2_cb_0 = Mock()
# Register callbacks, first priority 1 ...
key_prio_1_cb_0 = dispatcher.add_listener(TestEvent, 1, test_callback_prio_1_cb_0)
# ... aftwerwards priority 0 ...
key_prio_0_cb_0 = dispatcher.add_listener(TestEvent, 0, test_callback_prio_0_cb_0)
key_prio_0_cb_1 = dispatcher.add_listener(TestEvent, 0, test_callback_prio_0_cb_1)
# ... and finally priority 2 for another event.
key_prio_2_cb_0 = dispatcher.add_listener(OtherEvent, 2, other_callback_prio_2_cb_0)
# Collect callbacks from listeners list.
actual_handlers = []
for priority, group in dispatcher.get_listeners(TestEvent):
callbacks = [handler.callback for key, handler in group]
actual_handlers.append((priority, callbacks))
expected_handlers = [
(0, [test_callback_prio_0_cb_0, test_callback_prio_0_cb_1]),
(1, [test_callback_prio_1_cb_0]),
]
self.assertEqual(expected_handlers, actual_handlers)
# Remove one listener.
dispatcher.remove_listener(key_prio_0_cb_0)
# Collect callbacks from listeners list.
actual_handlers = []
for priority, group in dispatcher.get_listeners(TestEvent):
callbacks = [handler.callback for key, handler in group]
actual_handlers.append((priority, callbacks))
expected_handlers = [
(0, [test_callback_prio_0_cb_1]),
(1, [test_callback_prio_1_cb_0]),
]
self.assertEqual(expected_handlers, actual_handlers)
|
Add some tests for event dispatcherfrom __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from mock import Mock
from testtools import TestCase
from spreadflow_core.eventdispatcher import EventDispatcher
class TestEvent(object):
pass
class OtherEvent(object):
pass
class EventDispatcherTestCase(TestCase):
def test_manage_listeners(self):
dispatcher = EventDispatcher()
listeners = list(dispatcher.get_listeners(TestEvent))
self.assertEqual(len(listeners), 0)
test_callback_prio_0_cb_0 = Mock()
test_callback_prio_0_cb_1 = Mock()
test_callback_prio_1_cb_0 = Mock()
other_callback_prio_2_cb_0 = Mock()
# Register callbacks, first priority 1 ...
key_prio_1_cb_0 = dispatcher.add_listener(TestEvent, 1, test_callback_prio_1_cb_0)
# ... aftwerwards priority 0 ...
key_prio_0_cb_0 = dispatcher.add_listener(TestEvent, 0, test_callback_prio_0_cb_0)
key_prio_0_cb_1 = dispatcher.add_listener(TestEvent, 0, test_callback_prio_0_cb_1)
# ... and finally priority 2 for another event.
key_prio_2_cb_0 = dispatcher.add_listener(OtherEvent, 2, other_callback_prio_2_cb_0)
# Collect callbacks from listeners list.
actual_handlers = []
for priority, group in dispatcher.get_listeners(TestEvent):
callbacks = [handler.callback for key, handler in group]
actual_handlers.append((priority, callbacks))
expected_handlers = [
(0, [test_callback_prio_0_cb_0, test_callback_prio_0_cb_1]),
(1, [test_callback_prio_1_cb_0]),
]
self.assertEqual(expected_handlers, actual_handlers)
# Remove one listener.
dispatcher.remove_listener(key_prio_0_cb_0)
# Collect callbacks from listeners list.
actual_handlers = []
for priority, group in dispatcher.get_listeners(TestEvent):
callbacks = [handler.callback for key, handler in group]
actual_handlers.append((priority, callbacks))
expected_handlers = [
(0, [test_callback_prio_0_cb_1]),
(1, [test_callback_prio_1_cb_0]),
]
self.assertEqual(expected_handlers, actual_handlers)
|
<commit_before><commit_msg>Add some tests for event dispatcher<commit_after>from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from mock import Mock
from testtools import TestCase
from spreadflow_core.eventdispatcher import EventDispatcher
class TestEvent(object):
pass
class OtherEvent(object):
pass
class EventDispatcherTestCase(TestCase):
def test_manage_listeners(self):
dispatcher = EventDispatcher()
listeners = list(dispatcher.get_listeners(TestEvent))
self.assertEqual(len(listeners), 0)
test_callback_prio_0_cb_0 = Mock()
test_callback_prio_0_cb_1 = Mock()
test_callback_prio_1_cb_0 = Mock()
other_callback_prio_2_cb_0 = Mock()
# Register callbacks, first priority 1 ...
key_prio_1_cb_0 = dispatcher.add_listener(TestEvent, 1, test_callback_prio_1_cb_0)
# ... aftwerwards priority 0 ...
key_prio_0_cb_0 = dispatcher.add_listener(TestEvent, 0, test_callback_prio_0_cb_0)
key_prio_0_cb_1 = dispatcher.add_listener(TestEvent, 0, test_callback_prio_0_cb_1)
# ... and finally priority 2 for another event.
key_prio_2_cb_0 = dispatcher.add_listener(OtherEvent, 2, other_callback_prio_2_cb_0)
# Collect callbacks from listeners list.
actual_handlers = []
for priority, group in dispatcher.get_listeners(TestEvent):
callbacks = [handler.callback for key, handler in group]
actual_handlers.append((priority, callbacks))
expected_handlers = [
(0, [test_callback_prio_0_cb_0, test_callback_prio_0_cb_1]),
(1, [test_callback_prio_1_cb_0]),
]
self.assertEqual(expected_handlers, actual_handlers)
# Remove one listener.
dispatcher.remove_listener(key_prio_0_cb_0)
# Collect callbacks from listeners list.
actual_handlers = []
for priority, group in dispatcher.get_listeners(TestEvent):
callbacks = [handler.callback for key, handler in group]
actual_handlers.append((priority, callbacks))
expected_handlers = [
(0, [test_callback_prio_0_cb_1]),
(1, [test_callback_prio_1_cb_0]),
]
self.assertEqual(expected_handlers, actual_handlers)
|
|
c7c54f6d3afdd8b63775a4f2177275817143d423
|
tests/TestProfileRequirements.py
|
tests/TestProfileRequirements.py
|
import configparser #To read the profiles.
import os #To join paths.
import pytest
## Makes sure that the variants for the Ultimaker 3 Extended are exactly the
# same as for the Ultimaker 3.
#
# Once we have specialised profiles or a mechanism to inherit variants too, we
# may remove this test and have different profiles for the extended where
# needed, but until then we should keep them the same. It's happened all too
# often that we updated the variants for the UM3 but forgot about the UM3E.
@pytest.mark.parametrize("um3_file, um3e_file", [
#List the corresponding files below.
("ultimaker3_aa0.8.inst.cfg", "ultimaker3_extended_aa0.8.inst.cfg"),
("ultimaker3_aa04.inst.cfg", "ultimaker3_extended_aa04.inst.cfg"),
("ultimaker3_bb0.8.inst.cfg", "ultimaker3_extended_bb0.8.inst.cfg"),
("ultimaker3_bb04.inst.cfg", "ultimaker3_extended_bb04.inst.cfg")
])
def test_ultimaker3extended_variants(um3_file, um3e_file):
directory = os.path.join(os.path.dirname(__file__), "..", "resources", "variants") #TODO: Hardcoded path relative to this test file.
um3 = configparser.ConfigParser()
um3.read_file(open(os.path.join(directory, um3_file)))
um3e = configparser.ConfigParser()
um3e.read_file(open(os.path.join(directory, um3e_file)))
assert um3["values"] == um3e["values"]
|
Add test to ensure that UM3 and UM3E variants are kept the same
|
Add test to ensure that UM3 and UM3E variants are kept the same
It's a bit chunky, but functional.
|
Python
|
agpl-3.0
|
Curahelper/Cura,Curahelper/Cura
|
Add test to ensure that UM3 and UM3E variants are kept the same
It's a bit chunky, but functional.
|
import configparser #To read the profiles.
import os #To join paths.
import pytest
## Makes sure that the variants for the Ultimaker 3 Extended are exactly the
# same as for the Ultimaker 3.
#
# Once we have specialised profiles or a mechanism to inherit variants too, we
# may remove this test and have different profiles for the extended where
# needed, but until then we should keep them the same. It's happened all too
# often that we updated the variants for the UM3 but forgot about the UM3E.
@pytest.mark.parametrize("um3_file, um3e_file", [
#List the corresponding files below.
("ultimaker3_aa0.8.inst.cfg", "ultimaker3_extended_aa0.8.inst.cfg"),
("ultimaker3_aa04.inst.cfg", "ultimaker3_extended_aa04.inst.cfg"),
("ultimaker3_bb0.8.inst.cfg", "ultimaker3_extended_bb0.8.inst.cfg"),
("ultimaker3_bb04.inst.cfg", "ultimaker3_extended_bb04.inst.cfg")
])
def test_ultimaker3extended_variants(um3_file, um3e_file):
directory = os.path.join(os.path.dirname(__file__), "..", "resources", "variants") #TODO: Hardcoded path relative to this test file.
um3 = configparser.ConfigParser()
um3.read_file(open(os.path.join(directory, um3_file)))
um3e = configparser.ConfigParser()
um3e.read_file(open(os.path.join(directory, um3e_file)))
assert um3["values"] == um3e["values"]
|
<commit_before><commit_msg>Add test to ensure that UM3 and UM3E variants are kept the same
It's a bit chunky, but functional.<commit_after>
|
import configparser #To read the profiles.
import os #To join paths.
import pytest
## Makes sure that the variants for the Ultimaker 3 Extended are exactly the
# same as for the Ultimaker 3.
#
# Once we have specialised profiles or a mechanism to inherit variants too, we
# may remove this test and have different profiles for the extended where
# needed, but until then we should keep them the same. It's happened all too
# often that we updated the variants for the UM3 but forgot about the UM3E.
@pytest.mark.parametrize("um3_file, um3e_file", [
#List the corresponding files below.
("ultimaker3_aa0.8.inst.cfg", "ultimaker3_extended_aa0.8.inst.cfg"),
("ultimaker3_aa04.inst.cfg", "ultimaker3_extended_aa04.inst.cfg"),
("ultimaker3_bb0.8.inst.cfg", "ultimaker3_extended_bb0.8.inst.cfg"),
("ultimaker3_bb04.inst.cfg", "ultimaker3_extended_bb04.inst.cfg")
])
def test_ultimaker3extended_variants(um3_file, um3e_file):
directory = os.path.join(os.path.dirname(__file__), "..", "resources", "variants") #TODO: Hardcoded path relative to this test file.
um3 = configparser.ConfigParser()
um3.read_file(open(os.path.join(directory, um3_file)))
um3e = configparser.ConfigParser()
um3e.read_file(open(os.path.join(directory, um3e_file)))
assert um3["values"] == um3e["values"]
|
Add test to ensure that UM3 and UM3E variants are kept the same
It's a bit chunky, but functional.import configparser #To read the profiles.
import os #To join paths.
import pytest
## Makes sure that the variants for the Ultimaker 3 Extended are exactly the
# same as for the Ultimaker 3.
#
# Once we have specialised profiles or a mechanism to inherit variants too, we
# may remove this test and have different profiles for the extended where
# needed, but until then we should keep them the same. It's happened all too
# often that we updated the variants for the UM3 but forgot about the UM3E.
@pytest.mark.parametrize("um3_file, um3e_file", [
#List the corresponding files below.
("ultimaker3_aa0.8.inst.cfg", "ultimaker3_extended_aa0.8.inst.cfg"),
("ultimaker3_aa04.inst.cfg", "ultimaker3_extended_aa04.inst.cfg"),
("ultimaker3_bb0.8.inst.cfg", "ultimaker3_extended_bb0.8.inst.cfg"),
("ultimaker3_bb04.inst.cfg", "ultimaker3_extended_bb04.inst.cfg")
])
def test_ultimaker3extended_variants(um3_file, um3e_file):
directory = os.path.join(os.path.dirname(__file__), "..", "resources", "variants") #TODO: Hardcoded path relative to this test file.
um3 = configparser.ConfigParser()
um3.read_file(open(os.path.join(directory, um3_file)))
um3e = configparser.ConfigParser()
um3e.read_file(open(os.path.join(directory, um3e_file)))
assert um3["values"] == um3e["values"]
|
<commit_before><commit_msg>Add test to ensure that UM3 and UM3E variants are kept the same
It's a bit chunky, but functional.<commit_after>import configparser #To read the profiles.
import os #To join paths.
import pytest
## Makes sure that the variants for the Ultimaker 3 Extended are exactly the
# same as for the Ultimaker 3.
#
# Once we have specialised profiles or a mechanism to inherit variants too, we
# may remove this test and have different profiles for the extended where
# needed, but until then we should keep them the same. It's happened all too
# often that we updated the variants for the UM3 but forgot about the UM3E.
@pytest.mark.parametrize("um3_file, um3e_file", [
#List the corresponding files below.
("ultimaker3_aa0.8.inst.cfg", "ultimaker3_extended_aa0.8.inst.cfg"),
("ultimaker3_aa04.inst.cfg", "ultimaker3_extended_aa04.inst.cfg"),
("ultimaker3_bb0.8.inst.cfg", "ultimaker3_extended_bb0.8.inst.cfg"),
("ultimaker3_bb04.inst.cfg", "ultimaker3_extended_bb04.inst.cfg")
])
def test_ultimaker3extended_variants(um3_file, um3e_file):
directory = os.path.join(os.path.dirname(__file__), "..", "resources", "variants") #TODO: Hardcoded path relative to this test file.
um3 = configparser.ConfigParser()
um3.read_file(open(os.path.join(directory, um3_file)))
um3e = configparser.ConfigParser()
um3e.read_file(open(os.path.join(directory, um3e_file)))
assert um3["values"] == um3e["values"]
|
|
60865b51bfffb2b623bc7ed423621141f5126350
|
h2o-py/tests/testdir_algos/deepwater/pyunit_airlines_cv_deepwater.py
|
h2o-py/tests/testdir_algos/deepwater/pyunit_airlines_cv_deepwater.py
|
from __future__ import print_function
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
import random
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def cv_airlines():
# read in the dataset and construct training set (and validation set)
df = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/allyears2k_headers.zip"))
# pick the predictors and the correct response column
predictors = ["Year","Month","DayofMonth","DayOfWeek","CRSDepTime","CRSArrTime","UniqueCarrier","FlightNum"]
response_col = "IsDepDelayed"
dl = H2ODeepWaterEstimator(nfolds=3, hidden=[200,200], activation="Rectifier", # network (fully-connected)
hidden_dropout_ratios=[0.1,0.1], input_dropout_ratio=0.0, # regularization
epochs=10, train_samples_per_iteration=100000, # auto-tuning
rate=5e-3, rate_annealing=1e-6, # learning rate
momentum_start=0.9, momentum_stable=0.99, momentum_ramp=1e7, # momentum
mini_batch_size=32, score_duty_cycle=0.2) # scoring
dl.train(x=predictors, y=response_col, training_frame=df)
print(dl.show())
if __name__ == "__main__":
pyunit_utils.standalone_test(cv_airlines)
else:
cv_airlines()
|
Add 3-fold cv airlines example for Deep Water.
|
Add 3-fold cv airlines example for Deep Water.
|
Python
|
apache-2.0
|
jangorecki/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,spennihana/h2o-3,mathemage/h2o-3,mathemage/h2o-3,mathemage/h2o-3,mathemage/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,spennihana/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,spennihana/h2o-3,jangorecki/h2o-3,spennihana/h2o-3,jangorecki/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,jangorecki/h2o-3,mathemage/h2o-3,jangorecki/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,spennihana/h2o-3,h2oai/h2o-3,mathemage/h2o-3,mathemage/h2o-3,jangorecki/h2o-3,h2oai/h2o-3
|
Add 3-fold cv airlines example for Deep Water.
|
from __future__ import print_function
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
import random
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def cv_airlines():
# read in the dataset and construct training set (and validation set)
df = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/allyears2k_headers.zip"))
# pick the predictors and the correct response column
predictors = ["Year","Month","DayofMonth","DayOfWeek","CRSDepTime","CRSArrTime","UniqueCarrier","FlightNum"]
response_col = "IsDepDelayed"
dl = H2ODeepWaterEstimator(nfolds=3, hidden=[200,200], activation="Rectifier", # network (fully-connected)
hidden_dropout_ratios=[0.1,0.1], input_dropout_ratio=0.0, # regularization
epochs=10, train_samples_per_iteration=100000, # auto-tuning
rate=5e-3, rate_annealing=1e-6, # learning rate
momentum_start=0.9, momentum_stable=0.99, momentum_ramp=1e7, # momentum
mini_batch_size=32, score_duty_cycle=0.2) # scoring
dl.train(x=predictors, y=response_col, training_frame=df)
print(dl.show())
if __name__ == "__main__":
pyunit_utils.standalone_test(cv_airlines)
else:
cv_airlines()
|
<commit_before><commit_msg>Add 3-fold cv airlines example for Deep Water.<commit_after>
|
from __future__ import print_function
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
import random
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def cv_airlines():
# read in the dataset and construct training set (and validation set)
df = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/allyears2k_headers.zip"))
# pick the predictors and the correct response column
predictors = ["Year","Month","DayofMonth","DayOfWeek","CRSDepTime","CRSArrTime","UniqueCarrier","FlightNum"]
response_col = "IsDepDelayed"
dl = H2ODeepWaterEstimator(nfolds=3, hidden=[200,200], activation="Rectifier", # network (fully-connected)
hidden_dropout_ratios=[0.1,0.1], input_dropout_ratio=0.0, # regularization
epochs=10, train_samples_per_iteration=100000, # auto-tuning
rate=5e-3, rate_annealing=1e-6, # learning rate
momentum_start=0.9, momentum_stable=0.99, momentum_ramp=1e7, # momentum
mini_batch_size=32, score_duty_cycle=0.2) # scoring
dl.train(x=predictors, y=response_col, training_frame=df)
print(dl.show())
if __name__ == "__main__":
pyunit_utils.standalone_test(cv_airlines)
else:
cv_airlines()
|
Add 3-fold cv airlines example for Deep Water.from __future__ import print_function
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
import random
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def cv_airlines():
# read in the dataset and construct training set (and validation set)
df = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/allyears2k_headers.zip"))
# pick the predictors and the correct response column
predictors = ["Year","Month","DayofMonth","DayOfWeek","CRSDepTime","CRSArrTime","UniqueCarrier","FlightNum"]
response_col = "IsDepDelayed"
dl = H2ODeepWaterEstimator(nfolds=3, hidden=[200,200], activation="Rectifier", # network (fully-connected)
hidden_dropout_ratios=[0.1,0.1], input_dropout_ratio=0.0, # regularization
epochs=10, train_samples_per_iteration=100000, # auto-tuning
rate=5e-3, rate_annealing=1e-6, # learning rate
momentum_start=0.9, momentum_stable=0.99, momentum_ramp=1e7, # momentum
mini_batch_size=32, score_duty_cycle=0.2) # scoring
dl.train(x=predictors, y=response_col, training_frame=df)
print(dl.show())
if __name__ == "__main__":
pyunit_utils.standalone_test(cv_airlines)
else:
cv_airlines()
|
<commit_before><commit_msg>Add 3-fold cv airlines example for Deep Water.<commit_after>from __future__ import print_function
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
import random
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def cv_airlines():
# read in the dataset and construct training set (and validation set)
df = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/allyears2k_headers.zip"))
# pick the predictors and the correct response column
predictors = ["Year","Month","DayofMonth","DayOfWeek","CRSDepTime","CRSArrTime","UniqueCarrier","FlightNum"]
response_col = "IsDepDelayed"
dl = H2ODeepWaterEstimator(nfolds=3, hidden=[200,200], activation="Rectifier", # network (fully-connected)
hidden_dropout_ratios=[0.1,0.1], input_dropout_ratio=0.0, # regularization
epochs=10, train_samples_per_iteration=100000, # auto-tuning
rate=5e-3, rate_annealing=1e-6, # learning rate
momentum_start=0.9, momentum_stable=0.99, momentum_ramp=1e7, # momentum
mini_batch_size=32, score_duty_cycle=0.2) # scoring
dl.train(x=predictors, y=response_col, training_frame=df)
print(dl.show())
if __name__ == "__main__":
pyunit_utils.standalone_test(cv_airlines)
else:
cv_airlines()
|
|
f4f92cc3f5f41f1d8a06089e306a75062f53cb2e
|
pycroscopy/learn/dl/datautils.py
|
pycroscopy/learn/dl/datautils.py
|
from typing import Tuple, Type, Union
import torch
import numpy as np
def init_dataloaders(X_train: np.ndarray,
y_train: np.ndarray,
X_test: np.ndarray,
y_test: np.ndarray,
batch_size: int
) -> Tuple[Type[torch.utils.data.DataLoader]]:
"""
Initializes train and test data loaders
"""
test_iterator = None
test_data = X_test is not None and y_test is not None
device_ = 'cuda' if torch.cuda.is_available() else 'cpu'
X_train = tor(X_train).to(device_)
y_train = tor(y_train).to(device_)
tensordata_train = torch.utils.data.TensorDataset(X_train, y_train)
train_iterator = torch.utils.data.DataLoader(
tensordata_train, batch_size=batch_size, shuffle=True)
if test_data:
X_test = tor(X_test).to(device_)
y_test = tor(y_test).to(device_)
tensordata_test = torch.utils.data.TensorDataset(X_test, y_test)
test_iterator = torch.utils.data.DataLoader(
tensordata_test, batch_size=batch_size)
return train_iterator, test_iterator
def tor(arr: Union[np.ndarray, torch.Tensor],
out_type: str = "float") -> torch.Tensor:
"""
Convertor to PyTorch tensor ('float' or 'long')
"""
if not isinstance(arr, (np.ndarray, torch.Tensor)):
raise NotImplementedError("Provide data as numpy array or torch tensor")
if isinstance(arr, np.ndarray):
arr = torch.from_numpy(arr)
if out_type == "float":
arr = arr.float()
elif out_type == "long":
arr = arr.long()
return arr
|
Add utils for initalizing dataloaders
|
Add utils for initalizing dataloaders
|
Python
|
mit
|
pycroscopy/pycroscopy
|
Add utils for initalizing dataloaders
|
from typing import Tuple, Type, Union
import torch
import numpy as np
def init_dataloaders(X_train: np.ndarray,
y_train: np.ndarray,
X_test: np.ndarray,
y_test: np.ndarray,
batch_size: int
) -> Tuple[Type[torch.utils.data.DataLoader]]:
"""
Initializes train and test data loaders
"""
test_iterator = None
test_data = X_test is not None and y_test is not None
device_ = 'cuda' if torch.cuda.is_available() else 'cpu'
X_train = tor(X_train).to(device_)
y_train = tor(y_train).to(device_)
tensordata_train = torch.utils.data.TensorDataset(X_train, y_train)
train_iterator = torch.utils.data.DataLoader(
tensordata_train, batch_size=batch_size, shuffle=True)
if test_data:
X_test = tor(X_test).to(device_)
y_test = tor(y_test).to(device_)
tensordata_test = torch.utils.data.TensorDataset(X_test, y_test)
test_iterator = torch.utils.data.DataLoader(
tensordata_test, batch_size=batch_size)
return train_iterator, test_iterator
def tor(arr: Union[np.ndarray, torch.Tensor],
out_type: str = "float") -> torch.Tensor:
"""
Convertor to PyTorch tensor ('float' or 'long')
"""
if not isinstance(arr, (np.ndarray, torch.Tensor)):
raise NotImplementedError("Provide data as numpy array or torch tensor")
if isinstance(arr, np.ndarray):
arr = torch.from_numpy(arr)
if out_type == "float":
arr = arr.float()
elif out_type == "long":
arr = arr.long()
return arr
|
<commit_before><commit_msg>Add utils for initalizing dataloaders<commit_after>
|
from typing import Tuple, Type, Union
import torch
import numpy as np
def init_dataloaders(X_train: np.ndarray,
y_train: np.ndarray,
X_test: np.ndarray,
y_test: np.ndarray,
batch_size: int
) -> Tuple[Type[torch.utils.data.DataLoader]]:
"""
Initializes train and test data loaders
"""
test_iterator = None
test_data = X_test is not None and y_test is not None
device_ = 'cuda' if torch.cuda.is_available() else 'cpu'
X_train = tor(X_train).to(device_)
y_train = tor(y_train).to(device_)
tensordata_train = torch.utils.data.TensorDataset(X_train, y_train)
train_iterator = torch.utils.data.DataLoader(
tensordata_train, batch_size=batch_size, shuffle=True)
if test_data:
X_test = tor(X_test).to(device_)
y_test = tor(y_test).to(device_)
tensordata_test = torch.utils.data.TensorDataset(X_test, y_test)
test_iterator = torch.utils.data.DataLoader(
tensordata_test, batch_size=batch_size)
return train_iterator, test_iterator
def tor(arr: Union[np.ndarray, torch.Tensor],
out_type: str = "float") -> torch.Tensor:
"""
Convertor to PyTorch tensor ('float' or 'long')
"""
if not isinstance(arr, (np.ndarray, torch.Tensor)):
raise NotImplementedError("Provide data as numpy array or torch tensor")
if isinstance(arr, np.ndarray):
arr = torch.from_numpy(arr)
if out_type == "float":
arr = arr.float()
elif out_type == "long":
arr = arr.long()
return arr
|
Add utils for initalizing dataloadersfrom typing import Tuple, Type, Union
import torch
import numpy as np
def init_dataloaders(X_train: np.ndarray,
y_train: np.ndarray,
X_test: np.ndarray,
y_test: np.ndarray,
batch_size: int
) -> Tuple[Type[torch.utils.data.DataLoader]]:
"""
Initializes train and test data loaders
"""
test_iterator = None
test_data = X_test is not None and y_test is not None
device_ = 'cuda' if torch.cuda.is_available() else 'cpu'
X_train = tor(X_train).to(device_)
y_train = tor(y_train).to(device_)
tensordata_train = torch.utils.data.TensorDataset(X_train, y_train)
train_iterator = torch.utils.data.DataLoader(
tensordata_train, batch_size=batch_size, shuffle=True)
if test_data:
X_test = tor(X_test).to(device_)
y_test = tor(y_test).to(device_)
tensordata_test = torch.utils.data.TensorDataset(X_test, y_test)
test_iterator = torch.utils.data.DataLoader(
tensordata_test, batch_size=batch_size)
return train_iterator, test_iterator
def tor(arr: Union[np.ndarray, torch.Tensor],
out_type: str = "float") -> torch.Tensor:
"""
Convertor to PyTorch tensor ('float' or 'long')
"""
if not isinstance(arr, (np.ndarray, torch.Tensor)):
raise NotImplementedError("Provide data as numpy array or torch tensor")
if isinstance(arr, np.ndarray):
arr = torch.from_numpy(arr)
if out_type == "float":
arr = arr.float()
elif out_type == "long":
arr = arr.long()
return arr
|
<commit_before><commit_msg>Add utils for initalizing dataloaders<commit_after>from typing import Tuple, Type, Union
import torch
import numpy as np
def init_dataloaders(X_train: np.ndarray,
y_train: np.ndarray,
X_test: np.ndarray,
y_test: np.ndarray,
batch_size: int
) -> Tuple[Type[torch.utils.data.DataLoader]]:
"""
Initializes train and test data loaders
"""
test_iterator = None
test_data = X_test is not None and y_test is not None
device_ = 'cuda' if torch.cuda.is_available() else 'cpu'
X_train = tor(X_train).to(device_)
y_train = tor(y_train).to(device_)
tensordata_train = torch.utils.data.TensorDataset(X_train, y_train)
train_iterator = torch.utils.data.DataLoader(
tensordata_train, batch_size=batch_size, shuffle=True)
if test_data:
X_test = tor(X_test).to(device_)
y_test = tor(y_test).to(device_)
tensordata_test = torch.utils.data.TensorDataset(X_test, y_test)
test_iterator = torch.utils.data.DataLoader(
tensordata_test, batch_size=batch_size)
return train_iterator, test_iterator
def tor(arr: Union[np.ndarray, torch.Tensor],
out_type: str = "float") -> torch.Tensor:
"""
Convertor to PyTorch tensor ('float' or 'long')
"""
if not isinstance(arr, (np.ndarray, torch.Tensor)):
raise NotImplementedError("Provide data as numpy array or torch tensor")
if isinstance(arr, np.ndarray):
arr = torch.from_numpy(arr)
if out_type == "float":
arr = arr.float()
elif out_type == "long":
arr = arr.long()
return arr
|
|
ee01ba77e67d5bc03a14e33cc7b29a4c247b8d55
|
mapit/management/commands/mapit_delete_areas_from_new_generation.py
|
mapit/management/commands/mapit_delete_areas_from_new_generation.py
|
# This script deletes all the areas from the new generation (i.e. the
# most recent inactive one).
from optparse import make_option
from django.core.management.base import NoArgsCommand
from mapit.models import Generation, Area
class Command(NoArgsCommand):
help = 'Remove all areas from the new (inactive) generation'
args = '<GENERATION-ID>'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit',
help='Actually update the database'),)
def handle(self, **options):
new = Generation.objects.new()
if not new:
raise CommandError, "There's no new inactive generation to delete areas from"
generations = Generation.objects.all().order_by('id')
if len(generation) <= 1:
previous_generation = None
else:
previous_generation = generations[-2]
for area in Area.objects.filter(generation_low__lte=new, generation_high__gte=new):
print "Considering", area
g_low = area.generation_low
g_high = area.generation_high
if g_low not in generations:
raise Exception, "area.generation_low was " + g_low + ", which no longer exists!"
if g_high not in generations:
raise Exception, "area.generation_high was " + g_high + ", which no longer exists!"
if area.generation_low == new and area.generation_high == new:
print " ... only exists in", new, " so will delete"
if options['commit']:
area.delete()
print " ... deleted."
else:
print " ... not deleting, since --commit wasn't specified"
elif area.generation_low < new and area.generation_high == new:
print " ... still exists in an earlier generation, so lowering generation_high to", previous_generation
area.generation_high = previous_generation
if options['commit']:
area.save()
print " ... lowered."
else:
print " ... not lowering, since --commit wasn't specified"
elif area.generation_high > new:
# This should never happen - it'd mean the
# implementation of Generation.objects.new() has
# changed or something else is badly wrong:
raise Exception, "Somehow area.generation_high (" + area.generation_high + ") is after Generation.objects.new() (" + new + ")"
|
Add a command to delete areas for a new (inactive) generation
|
Add a command to delete areas for a new (inactive) generation
|
Python
|
agpl-3.0
|
Sinar/mapit,opencorato/mapit,opencorato/mapit,Code4SA/mapit,chris48s/mapit,Sinar/mapit,opencorato/mapit,New-Bamboo/mapit,New-Bamboo/mapit,Code4SA/mapit,Code4SA/mapit,chris48s/mapit,chris48s/mapit
|
Add a command to delete areas for a new (inactive) generation
|
# This script deletes all the areas from the new generation (i.e. the
# most recent inactive one).
from optparse import make_option
from django.core.management.base import NoArgsCommand
from mapit.models import Generation, Area
class Command(NoArgsCommand):
help = 'Remove all areas from the new (inactive) generation'
args = '<GENERATION-ID>'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit',
help='Actually update the database'),)
def handle(self, **options):
new = Generation.objects.new()
if not new:
raise CommandError, "There's no new inactive generation to delete areas from"
generations = Generation.objects.all().order_by('id')
if len(generation) <= 1:
previous_generation = None
else:
previous_generation = generations[-2]
for area in Area.objects.filter(generation_low__lte=new, generation_high__gte=new):
print "Considering", area
g_low = area.generation_low
g_high = area.generation_high
if g_low not in generations:
raise Exception, "area.generation_low was " + g_low + ", which no longer exists!"
if g_high not in generations:
raise Exception, "area.generation_high was " + g_high + ", which no longer exists!"
if area.generation_low == new and area.generation_high == new:
print " ... only exists in", new, " so will delete"
if options['commit']:
area.delete()
print " ... deleted."
else:
print " ... not deleting, since --commit wasn't specified"
elif area.generation_low < new and area.generation_high == new:
print " ... still exists in an earlier generation, so lowering generation_high to", previous_generation
area.generation_high = previous_generation
if options['commit']:
area.save()
print " ... lowered."
else:
print " ... not lowering, since --commit wasn't specified"
elif area.generation_high > new:
# This should never happen - it'd mean the
# implementation of Generation.objects.new() has
# changed or something else is badly wrong:
raise Exception, "Somehow area.generation_high (" + area.generation_high + ") is after Generation.objects.new() (" + new + ")"
|
<commit_before><commit_msg>Add a command to delete areas for a new (inactive) generation<commit_after>
|
# This script deletes all the areas from the new generation (i.e. the
# most recent inactive one).
from optparse import make_option
from django.core.management.base import NoArgsCommand
from mapit.models import Generation, Area
class Command(NoArgsCommand):
help = 'Remove all areas from the new (inactive) generation'
args = '<GENERATION-ID>'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit',
help='Actually update the database'),)
def handle(self, **options):
new = Generation.objects.new()
if not new:
raise CommandError, "There's no new inactive generation to delete areas from"
generations = Generation.objects.all().order_by('id')
if len(generation) <= 1:
previous_generation = None
else:
previous_generation = generations[-2]
for area in Area.objects.filter(generation_low__lte=new, generation_high__gte=new):
print "Considering", area
g_low = area.generation_low
g_high = area.generation_high
if g_low not in generations:
raise Exception, "area.generation_low was " + g_low + ", which no longer exists!"
if g_high not in generations:
raise Exception, "area.generation_high was " + g_high + ", which no longer exists!"
if area.generation_low == new and area.generation_high == new:
print " ... only exists in", new, " so will delete"
if options['commit']:
area.delete()
print " ... deleted."
else:
print " ... not deleting, since --commit wasn't specified"
elif area.generation_low < new and area.generation_high == new:
print " ... still exists in an earlier generation, so lowering generation_high to", previous_generation
area.generation_high = previous_generation
if options['commit']:
area.save()
print " ... lowered."
else:
print " ... not lowering, since --commit wasn't specified"
elif area.generation_high > new:
# This should never happen - it'd mean the
# implementation of Generation.objects.new() has
# changed or something else is badly wrong:
raise Exception, "Somehow area.generation_high (" + area.generation_high + ") is after Generation.objects.new() (" + new + ")"
|
Add a command to delete areas for a new (inactive) generation# This script deletes all the areas from the new generation (i.e. the
# most recent inactive one).
from optparse import make_option
from django.core.management.base import NoArgsCommand
from mapit.models import Generation, Area
class Command(NoArgsCommand):
help = 'Remove all areas from the new (inactive) generation'
args = '<GENERATION-ID>'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit',
help='Actually update the database'),)
def handle(self, **options):
new = Generation.objects.new()
if not new:
raise CommandError, "There's no new inactive generation to delete areas from"
generations = Generation.objects.all().order_by('id')
if len(generation) <= 1:
previous_generation = None
else:
previous_generation = generations[-2]
for area in Area.objects.filter(generation_low__lte=new, generation_high__gte=new):
print "Considering", area
g_low = area.generation_low
g_high = area.generation_high
if g_low not in generations:
raise Exception, "area.generation_low was " + g_low + ", which no longer exists!"
if g_high not in generations:
raise Exception, "area.generation_high was " + g_high + ", which no longer exists!"
if area.generation_low == new and area.generation_high == new:
print " ... only exists in", new, " so will delete"
if options['commit']:
area.delete()
print " ... deleted."
else:
print " ... not deleting, since --commit wasn't specified"
elif area.generation_low < new and area.generation_high == new:
print " ... still exists in an earlier generation, so lowering generation_high to", previous_generation
area.generation_high = previous_generation
if options['commit']:
area.save()
print " ... lowered."
else:
print " ... not lowering, since --commit wasn't specified"
elif area.generation_high > new:
# This should never happen - it'd mean the
# implementation of Generation.objects.new() has
# changed or something else is badly wrong:
raise Exception, "Somehow area.generation_high (" + area.generation_high + ") is after Generation.objects.new() (" + new + ")"
|
<commit_before><commit_msg>Add a command to delete areas for a new (inactive) generation<commit_after># This script deletes all the areas from the new generation (i.e. the
# most recent inactive one).
from optparse import make_option
from django.core.management.base import NoArgsCommand
from mapit.models import Generation, Area
class Command(NoArgsCommand):
help = 'Remove all areas from the new (inactive) generation'
args = '<GENERATION-ID>'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit',
help='Actually update the database'),)
def handle(self, **options):
new = Generation.objects.new()
if not new:
raise CommandError, "There's no new inactive generation to delete areas from"
generations = Generation.objects.all().order_by('id')
if len(generation) <= 1:
previous_generation = None
else:
previous_generation = generations[-2]
for area in Area.objects.filter(generation_low__lte=new, generation_high__gte=new):
print "Considering", area
g_low = area.generation_low
g_high = area.generation_high
if g_low not in generations:
raise Exception, "area.generation_low was " + g_low + ", which no longer exists!"
if g_high not in generations:
raise Exception, "area.generation_high was " + g_high + ", which no longer exists!"
if area.generation_low == new and area.generation_high == new:
print " ... only exists in", new, " so will delete"
if options['commit']:
area.delete()
print " ... deleted."
else:
print " ... not deleting, since --commit wasn't specified"
elif area.generation_low < new and area.generation_high == new:
print " ... still exists in an earlier generation, so lowering generation_high to", previous_generation
area.generation_high = previous_generation
if options['commit']:
area.save()
print " ... lowered."
else:
print " ... not lowering, since --commit wasn't specified"
elif area.generation_high > new:
# This should never happen - it'd mean the
# implementation of Generation.objects.new() has
# changed or something else is badly wrong:
raise Exception, "Somehow area.generation_high (" + area.generation_high + ") is after Generation.objects.new() (" + new + ")"
|
|
6fd8b14cd57a05a0eeb840236e437c452a77d1cd
|
support/strip-leading-comments.py
|
support/strip-leading-comments.py
|
#!/bin/perl
# $Id$
import os, sys
from stat import *
def walktree(dir, callback):
'''recursively descend the directory rooted at dir,
calling the callback function for each regular file'''
print dir
for f in os.listdir(dir):
pathname = '%s/%s' % (dir, f)
mode = os.stat(pathname)[ST_MODE]
if S_ISDIR(mode):
# It's a directory, recurse into it
walktree(pathname, callback)
elif S_ISREG(mode):
# It's a file, call the callback function
callback(pathname)
else:
# Unknown file type, print a message
print 'Skipping %s' % pathname
def visitfile(file):
print file
if __name__ == '__main__':
walktree(sys.argv[1], visitfile)
|
Switch license from LGPL to ASL.
|
Switch license from LGPL to ASL.
git-svn-id: 1006245cb1bdea2fd4c9095e50bc2ef3d167b18c@243048 13f79535-47bb-0310-9956-ffa450edef68
|
Python
|
apache-2.0
|
apache/tapestry3,apache/tapestry3,apache/tapestry3,apache/tapestry3
|
Switch license from LGPL to ASL.
git-svn-id: 1006245cb1bdea2fd4c9095e50bc2ef3d167b18c@243048 13f79535-47bb-0310-9956-ffa450edef68
|
#!/bin/perl
# $Id$
import os, sys
from stat import *
def walktree(dir, callback):
'''recursively descend the directory rooted at dir,
calling the callback function for each regular file'''
print dir
for f in os.listdir(dir):
pathname = '%s/%s' % (dir, f)
mode = os.stat(pathname)[ST_MODE]
if S_ISDIR(mode):
# It's a directory, recurse into it
walktree(pathname, callback)
elif S_ISREG(mode):
# It's a file, call the callback function
callback(pathname)
else:
# Unknown file type, print a message
print 'Skipping %s' % pathname
def visitfile(file):
print file
if __name__ == '__main__':
walktree(sys.argv[1], visitfile)
|
<commit_before><commit_msg>Switch license from LGPL to ASL.
git-svn-id: 1006245cb1bdea2fd4c9095e50bc2ef3d167b18c@243048 13f79535-47bb-0310-9956-ffa450edef68<commit_after>
|
#!/bin/perl
# $Id$
import os, sys
from stat import *
def walktree(dir, callback):
'''recursively descend the directory rooted at dir,
calling the callback function for each regular file'''
print dir
for f in os.listdir(dir):
pathname = '%s/%s' % (dir, f)
mode = os.stat(pathname)[ST_MODE]
if S_ISDIR(mode):
# It's a directory, recurse into it
walktree(pathname, callback)
elif S_ISREG(mode):
# It's a file, call the callback function
callback(pathname)
else:
# Unknown file type, print a message
print 'Skipping %s' % pathname
def visitfile(file):
print file
if __name__ == '__main__':
walktree(sys.argv[1], visitfile)
|
Switch license from LGPL to ASL.
git-svn-id: 1006245cb1bdea2fd4c9095e50bc2ef3d167b18c@243048 13f79535-47bb-0310-9956-ffa450edef68#!/bin/perl
# $Id$
import os, sys
from stat import *
def walktree(dir, callback):
'''recursively descend the directory rooted at dir,
calling the callback function for each regular file'''
print dir
for f in os.listdir(dir):
pathname = '%s/%s' % (dir, f)
mode = os.stat(pathname)[ST_MODE]
if S_ISDIR(mode):
# It's a directory, recurse into it
walktree(pathname, callback)
elif S_ISREG(mode):
# It's a file, call the callback function
callback(pathname)
else:
# Unknown file type, print a message
print 'Skipping %s' % pathname
def visitfile(file):
print file
if __name__ == '__main__':
walktree(sys.argv[1], visitfile)
|
<commit_before><commit_msg>Switch license from LGPL to ASL.
git-svn-id: 1006245cb1bdea2fd4c9095e50bc2ef3d167b18c@243048 13f79535-47bb-0310-9956-ffa450edef68<commit_after>#!/bin/perl
# $Id$
import os, sys
from stat import *
def walktree(dir, callback):
'''recursively descend the directory rooted at dir,
calling the callback function for each regular file'''
print dir
for f in os.listdir(dir):
pathname = '%s/%s' % (dir, f)
mode = os.stat(pathname)[ST_MODE]
if S_ISDIR(mode):
# It's a directory, recurse into it
walktree(pathname, callback)
elif S_ISREG(mode):
# It's a file, call the callback function
callback(pathname)
else:
# Unknown file type, print a message
print 'Skipping %s' % pathname
def visitfile(file):
print file
if __name__ == '__main__':
walktree(sys.argv[1], visitfile)
|
|
192f0c8ce32328dc8079edf0072e423431d4749a
|
nodeconductor/template/migrations/0006_templategroup_tags.py
|
nodeconductor/template/migrations/0006_templategroup_tags.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('template', '0005_new_templates_models'),
]
operations = [
migrations.AddField(
model_name='templategroup',
name='tags',
field=taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', help_text='A comma-separated list of tags.', verbose_name='Tags'),
preserve_default=True,
),
]
|
Add template tags to template groups
|
Add template tags to template groups
- nc-959
|
Python
|
mit
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
Add template tags to template groups
- nc-959
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('template', '0005_new_templates_models'),
]
operations = [
migrations.AddField(
model_name='templategroup',
name='tags',
field=taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', help_text='A comma-separated list of tags.', verbose_name='Tags'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add template tags to template groups
- nc-959<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('template', '0005_new_templates_models'),
]
operations = [
migrations.AddField(
model_name='templategroup',
name='tags',
field=taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', help_text='A comma-separated list of tags.', verbose_name='Tags'),
preserve_default=True,
),
]
|
Add template tags to template groups
- nc-959# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('template', '0005_new_templates_models'),
]
operations = [
migrations.AddField(
model_name='templategroup',
name='tags',
field=taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', help_text='A comma-separated list of tags.', verbose_name='Tags'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add template tags to template groups
- nc-959<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('template', '0005_new_templates_models'),
]
operations = [
migrations.AddField(
model_name='templategroup',
name='tags',
field=taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', help_text='A comma-separated list of tags.', verbose_name='Tags'),
preserve_default=True,
),
]
|
|
e9998ce3b367d70a54a5c2f8150048e7be1634ee
|
csunplugged/tests/utils/translatable_model_loader/test_TranslatableModelLoader.py
|
csunplugged/tests/utils/translatable_model_loader/test_TranslatableModelLoader.py
|
"""Test class for TranslatableModelLoader."""
from django.test import SimpleTestCase
class TranslatableModelLoaderTest(SimpleTestCase):
"""Test class for TranslatableModelLoader."""
def test_get_yaml_translations_english(self):
pass
def test_get_yaml_translations_english_missing_reqd_field(self):
pass
def test_get_yaml_translations_english_missing_reqd_slug(self):
pass
def test_get_yaml_translations_field_map(self):
pass
def test_get_yaml_translations_translated(self):
pass
def test_get_yaml_translations_translated_missing_reqd_field(self):
pass
def test_get_yaml_translations_translated_missing_reqd_slug(self):
pass
def test_get_yaml_translation_missing_yaml(self):
pass
def test_get_markdown_translations_english(self):
pass
def test_get_markdown_translation_english_missing_file(self):
pass
def test_get_markdown_translations_translated(self):
pass
def test_get_markdown_translation_translated_missing_file(self):
pass
def test_populate_translations(self):
pass
def test_mark_translation_availability_all_required_fields_present(self):
pass
def test_mark_translation_availability_required_field_missing(self):
pass
def test_mark_translation_availability_required_fields_not_given(self):
pass
def test_get_blank_translation_dictionary(self):
pass
|
Add test definitions for TranslatableModelLoader unit tests
|
Add test definitions for TranslatableModelLoader unit tests
|
Python
|
mit
|
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
|
Add test definitions for TranslatableModelLoader unit tests
|
"""Test class for TranslatableModelLoader."""
from django.test import SimpleTestCase
class TranslatableModelLoaderTest(SimpleTestCase):
"""Test class for TranslatableModelLoader."""
def test_get_yaml_translations_english(self):
pass
def test_get_yaml_translations_english_missing_reqd_field(self):
pass
def test_get_yaml_translations_english_missing_reqd_slug(self):
pass
def test_get_yaml_translations_field_map(self):
pass
def test_get_yaml_translations_translated(self):
pass
def test_get_yaml_translations_translated_missing_reqd_field(self):
pass
def test_get_yaml_translations_translated_missing_reqd_slug(self):
pass
def test_get_yaml_translation_missing_yaml(self):
pass
def test_get_markdown_translations_english(self):
pass
def test_get_markdown_translation_english_missing_file(self):
pass
def test_get_markdown_translations_translated(self):
pass
def test_get_markdown_translation_translated_missing_file(self):
pass
def test_populate_translations(self):
pass
def test_mark_translation_availability_all_required_fields_present(self):
pass
def test_mark_translation_availability_required_field_missing(self):
pass
def test_mark_translation_availability_required_fields_not_given(self):
pass
def test_get_blank_translation_dictionary(self):
pass
|
<commit_before><commit_msg>Add test definitions for TranslatableModelLoader unit tests<commit_after>
|
"""Test class for TranslatableModelLoader."""
from django.test import SimpleTestCase
class TranslatableModelLoaderTest(SimpleTestCase):
"""Test class for TranslatableModelLoader."""
def test_get_yaml_translations_english(self):
pass
def test_get_yaml_translations_english_missing_reqd_field(self):
pass
def test_get_yaml_translations_english_missing_reqd_slug(self):
pass
def test_get_yaml_translations_field_map(self):
pass
def test_get_yaml_translations_translated(self):
pass
def test_get_yaml_translations_translated_missing_reqd_field(self):
pass
def test_get_yaml_translations_translated_missing_reqd_slug(self):
pass
def test_get_yaml_translation_missing_yaml(self):
pass
def test_get_markdown_translations_english(self):
pass
def test_get_markdown_translation_english_missing_file(self):
pass
def test_get_markdown_translations_translated(self):
pass
def test_get_markdown_translation_translated_missing_file(self):
pass
def test_populate_translations(self):
pass
def test_mark_translation_availability_all_required_fields_present(self):
pass
def test_mark_translation_availability_required_field_missing(self):
pass
def test_mark_translation_availability_required_fields_not_given(self):
pass
def test_get_blank_translation_dictionary(self):
pass
|
Add test definitions for TranslatableModelLoader unit tests"""Test class for TranslatableModelLoader."""
from django.test import SimpleTestCase
class TranslatableModelLoaderTest(SimpleTestCase):
"""Test class for TranslatableModelLoader."""
def test_get_yaml_translations_english(self):
pass
def test_get_yaml_translations_english_missing_reqd_field(self):
pass
def test_get_yaml_translations_english_missing_reqd_slug(self):
pass
def test_get_yaml_translations_field_map(self):
pass
def test_get_yaml_translations_translated(self):
pass
def test_get_yaml_translations_translated_missing_reqd_field(self):
pass
def test_get_yaml_translations_translated_missing_reqd_slug(self):
pass
def test_get_yaml_translation_missing_yaml(self):
pass
def test_get_markdown_translations_english(self):
pass
def test_get_markdown_translation_english_missing_file(self):
pass
def test_get_markdown_translations_translated(self):
pass
def test_get_markdown_translation_translated_missing_file(self):
pass
def test_populate_translations(self):
pass
def test_mark_translation_availability_all_required_fields_present(self):
pass
def test_mark_translation_availability_required_field_missing(self):
pass
def test_mark_translation_availability_required_fields_not_given(self):
pass
def test_get_blank_translation_dictionary(self):
pass
|
<commit_before><commit_msg>Add test definitions for TranslatableModelLoader unit tests<commit_after>"""Test class for TranslatableModelLoader."""
from django.test import SimpleTestCase
class TranslatableModelLoaderTest(SimpleTestCase):
"""Test class for TranslatableModelLoader."""
def test_get_yaml_translations_english(self):
pass
def test_get_yaml_translations_english_missing_reqd_field(self):
pass
def test_get_yaml_translations_english_missing_reqd_slug(self):
pass
def test_get_yaml_translations_field_map(self):
pass
def test_get_yaml_translations_translated(self):
pass
def test_get_yaml_translations_translated_missing_reqd_field(self):
pass
def test_get_yaml_translations_translated_missing_reqd_slug(self):
pass
def test_get_yaml_translation_missing_yaml(self):
pass
def test_get_markdown_translations_english(self):
pass
def test_get_markdown_translation_english_missing_file(self):
pass
def test_get_markdown_translations_translated(self):
pass
def test_get_markdown_translation_translated_missing_file(self):
pass
def test_populate_translations(self):
pass
def test_mark_translation_availability_all_required_fields_present(self):
pass
def test_mark_translation_availability_required_field_missing(self):
pass
def test_mark_translation_availability_required_fields_not_given(self):
pass
def test_get_blank_translation_dictionary(self):
pass
|
|
ef7558c2f0f6f32a8326decaf44c6fb86bd48fc1
|
falmer/content/migrations/0008_auto_20170530_1519.py
|
falmer/content/migrations/0008_auto_20170530_1519.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-30 15:19
from __future__ import unicode_literals
from django.db import migrations
import falmer.content.blocks
import falmer.content.models.selection_grid
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('content', '0007_auto_20170530_1048'),
]
operations = [
migrations.RemoveField(
model_name='selectiongridpage',
name='heading_image',
),
migrations.RemoveField(
model_name='selectiongridpage',
name='show_heading_hero',
),
migrations.AlterField(
model_name='selectiongridpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('heading_hero', wagtail.wagtailcore.blocks.StructBlock((('heading', wagtail.wagtailcore.blocks.CharBlock(help_text='Leave empty to use the page title', required=False)), ('image', falmer.content.blocks.ImageBlock())))), ('selection_grid', wagtail.wagtailcore.blocks.ListBlock(falmer.content.models.selection_grid.GridItem)))),
),
]
|
Remove static selectiongrid hero fields
|
Remove static selectiongrid hero fields
|
Python
|
mit
|
sussexstudent/falmer,sussexstudent/falmer,sussexstudent/falmer,sussexstudent/falmer
|
Remove static selectiongrid hero fields
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-30 15:19
from __future__ import unicode_literals
from django.db import migrations
import falmer.content.blocks
import falmer.content.models.selection_grid
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('content', '0007_auto_20170530_1048'),
]
operations = [
migrations.RemoveField(
model_name='selectiongridpage',
name='heading_image',
),
migrations.RemoveField(
model_name='selectiongridpage',
name='show_heading_hero',
),
migrations.AlterField(
model_name='selectiongridpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('heading_hero', wagtail.wagtailcore.blocks.StructBlock((('heading', wagtail.wagtailcore.blocks.CharBlock(help_text='Leave empty to use the page title', required=False)), ('image', falmer.content.blocks.ImageBlock())))), ('selection_grid', wagtail.wagtailcore.blocks.ListBlock(falmer.content.models.selection_grid.GridItem)))),
),
]
|
<commit_before><commit_msg>Remove static selectiongrid hero fields<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-30 15:19
from __future__ import unicode_literals
from django.db import migrations
import falmer.content.blocks
import falmer.content.models.selection_grid
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('content', '0007_auto_20170530_1048'),
]
operations = [
migrations.RemoveField(
model_name='selectiongridpage',
name='heading_image',
),
migrations.RemoveField(
model_name='selectiongridpage',
name='show_heading_hero',
),
migrations.AlterField(
model_name='selectiongridpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('heading_hero', wagtail.wagtailcore.blocks.StructBlock((('heading', wagtail.wagtailcore.blocks.CharBlock(help_text='Leave empty to use the page title', required=False)), ('image', falmer.content.blocks.ImageBlock())))), ('selection_grid', wagtail.wagtailcore.blocks.ListBlock(falmer.content.models.selection_grid.GridItem)))),
),
]
|
Remove static selectiongrid hero fields# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-30 15:19
from __future__ import unicode_literals
from django.db import migrations
import falmer.content.blocks
import falmer.content.models.selection_grid
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('content', '0007_auto_20170530_1048'),
]
operations = [
migrations.RemoveField(
model_name='selectiongridpage',
name='heading_image',
),
migrations.RemoveField(
model_name='selectiongridpage',
name='show_heading_hero',
),
migrations.AlterField(
model_name='selectiongridpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('heading_hero', wagtail.wagtailcore.blocks.StructBlock((('heading', wagtail.wagtailcore.blocks.CharBlock(help_text='Leave empty to use the page title', required=False)), ('image', falmer.content.blocks.ImageBlock())))), ('selection_grid', wagtail.wagtailcore.blocks.ListBlock(falmer.content.models.selection_grid.GridItem)))),
),
]
|
<commit_before><commit_msg>Remove static selectiongrid hero fields<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-30 15:19
from __future__ import unicode_literals
from django.db import migrations
import falmer.content.blocks
import falmer.content.models.selection_grid
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('content', '0007_auto_20170530_1048'),
]
operations = [
migrations.RemoveField(
model_name='selectiongridpage',
name='heading_image',
),
migrations.RemoveField(
model_name='selectiongridpage',
name='show_heading_hero',
),
migrations.AlterField(
model_name='selectiongridpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('heading_hero', wagtail.wagtailcore.blocks.StructBlock((('heading', wagtail.wagtailcore.blocks.CharBlock(help_text='Leave empty to use the page title', required=False)), ('image', falmer.content.blocks.ImageBlock())))), ('selection_grid', wagtail.wagtailcore.blocks.ListBlock(falmer.content.models.selection_grid.GridItem)))),
),
]
|
|
26dec2182012ab6206d79235526f678480bc3601
|
vpr/vpr_content/utils.py
|
vpr/vpr_content/utils.py
|
from django.db.models import Q
from models import Material, Person
def get_page(pid, qset, per_page=20):
""" Returns item list of specific page in result
pid: ID of page, start from 1
"""
start_on = (pid-1)*per_page
res = qset[start_on:start_on+per_page]
return res
class MaterialScanner(object):
"""docstring for MaterialScanner"""
extract_fields = ['id', 'material_type', 'title', 'material_id', 'version']
def __init__(self, **kwargs):
super(MaterialScanner, self).__init__()
self.m = Material.objects
self.per_page = 20
def filter_description(self, null=True):
""" Return list of material having blank or no-blank description
"""
q = Q(description__isnull=null)
if null:
q = q | Q(description__regex='^(\s|-)*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_image(self, null=True):
""" Return list of material having blank or no-blank description
"""
q = Q(image__isnull=null)
return self.m.filter(q).values(*self.extract_fields)
def filter_keywords(self, null=True):
q = Q(keywords__isnull=null)
if null:
q = q | Q(keywords__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_language(self, null=True):
q = Q(keywords__isnull=null)
if null:
q = q | Q(language__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_categories(self, null=True):
q = Q(categories__isnull=null)
if null:
q = q | Q(categories__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_author(self):
sql0 = 'SELECT DISTINCT(material_rid) FROM vpr_content_materialperson WHERE role=0'
sql1 = 'SELECT %s FROM vpr_content_material WHERE id NOT IN (%s)' % (','.join(self.extract_fields), sql0)
res = self.m.raw(sql1)
return res
def filter_text(self, text_limit=500):
sql1 = 'SELECT %s FROM vpr_content_material WHERE CHAR_LENGTH(text)<%d' % (','.join(self.extract_fields), text_limit)
res = self.m.extra(where=['CHAR_LENGTH(text)<500']).values(*self.extract_fields)
return res
|
Add functions for filtering mal-formed materials
|
Add functions for filtering mal-formed materials
|
Python
|
agpl-3.0
|
voer-platform/vp.repo,voer-platform/vp.repo,voer-platform/vp.repo,voer-platform/vp.repo
|
Add functions for filtering mal-formed materials
|
from django.db.models import Q
from models import Material, Person
def get_page(pid, qset, per_page=20):
""" Returns item list of specific page in result
pid: ID of page, start from 1
"""
start_on = (pid-1)*per_page
res = qset[start_on:start_on+per_page]
return res
class MaterialScanner(object):
"""docstring for MaterialScanner"""
extract_fields = ['id', 'material_type', 'title', 'material_id', 'version']
def __init__(self, **kwargs):
super(MaterialScanner, self).__init__()
self.m = Material.objects
self.per_page = 20
def filter_description(self, null=True):
""" Return list of material having blank or no-blank description
"""
q = Q(description__isnull=null)
if null:
q = q | Q(description__regex='^(\s|-)*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_image(self, null=True):
""" Return list of material having blank or no-blank description
"""
q = Q(image__isnull=null)
return self.m.filter(q).values(*self.extract_fields)
def filter_keywords(self, null=True):
q = Q(keywords__isnull=null)
if null:
q = q | Q(keywords__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_language(self, null=True):
q = Q(keywords__isnull=null)
if null:
q = q | Q(language__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_categories(self, null=True):
q = Q(categories__isnull=null)
if null:
q = q | Q(categories__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_author(self):
sql0 = 'SELECT DISTINCT(material_rid) FROM vpr_content_materialperson WHERE role=0'
sql1 = 'SELECT %s FROM vpr_content_material WHERE id NOT IN (%s)' % (','.join(self.extract_fields), sql0)
res = self.m.raw(sql1)
return res
def filter_text(self, text_limit=500):
sql1 = 'SELECT %s FROM vpr_content_material WHERE CHAR_LENGTH(text)<%d' % (','.join(self.extract_fields), text_limit)
res = self.m.extra(where=['CHAR_LENGTH(text)<500']).values(*self.extract_fields)
return res
|
<commit_before><commit_msg>Add functions for filtering mal-formed materials<commit_after>
|
from django.db.models import Q
from models import Material, Person
def get_page(pid, qset, per_page=20):
""" Returns item list of specific page in result
pid: ID of page, start from 1
"""
start_on = (pid-1)*per_page
res = qset[start_on:start_on+per_page]
return res
class MaterialScanner(object):
"""docstring for MaterialScanner"""
extract_fields = ['id', 'material_type', 'title', 'material_id', 'version']
def __init__(self, **kwargs):
super(MaterialScanner, self).__init__()
self.m = Material.objects
self.per_page = 20
def filter_description(self, null=True):
""" Return list of material having blank or no-blank description
"""
q = Q(description__isnull=null)
if null:
q = q | Q(description__regex='^(\s|-)*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_image(self, null=True):
""" Return list of material having blank or no-blank description
"""
q = Q(image__isnull=null)
return self.m.filter(q).values(*self.extract_fields)
def filter_keywords(self, null=True):
q = Q(keywords__isnull=null)
if null:
q = q | Q(keywords__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_language(self, null=True):
q = Q(keywords__isnull=null)
if null:
q = q | Q(language__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_categories(self, null=True):
q = Q(categories__isnull=null)
if null:
q = q | Q(categories__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_author(self):
sql0 = 'SELECT DISTINCT(material_rid) FROM vpr_content_materialperson WHERE role=0'
sql1 = 'SELECT %s FROM vpr_content_material WHERE id NOT IN (%s)' % (','.join(self.extract_fields), sql0)
res = self.m.raw(sql1)
return res
def filter_text(self, text_limit=500):
sql1 = 'SELECT %s FROM vpr_content_material WHERE CHAR_LENGTH(text)<%d' % (','.join(self.extract_fields), text_limit)
res = self.m.extra(where=['CHAR_LENGTH(text)<500']).values(*self.extract_fields)
return res
|
Add functions for filtering mal-formed materialsfrom django.db.models import Q
from models import Material, Person
def get_page(pid, qset, per_page=20):
""" Returns item list of specific page in result
pid: ID of page, start from 1
"""
start_on = (pid-1)*per_page
res = qset[start_on:start_on+per_page]
return res
class MaterialScanner(object):
"""docstring for MaterialScanner"""
extract_fields = ['id', 'material_type', 'title', 'material_id', 'version']
def __init__(self, **kwargs):
super(MaterialScanner, self).__init__()
self.m = Material.objects
self.per_page = 20
def filter_description(self, null=True):
""" Return list of material having blank or no-blank description
"""
q = Q(description__isnull=null)
if null:
q = q | Q(description__regex='^(\s|-)*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_image(self, null=True):
""" Return list of material having blank or no-blank description
"""
q = Q(image__isnull=null)
return self.m.filter(q).values(*self.extract_fields)
def filter_keywords(self, null=True):
q = Q(keywords__isnull=null)
if null:
q = q | Q(keywords__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_language(self, null=True):
q = Q(keywords__isnull=null)
if null:
q = q | Q(language__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_categories(self, null=True):
q = Q(categories__isnull=null)
if null:
q = q | Q(categories__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_author(self):
sql0 = 'SELECT DISTINCT(material_rid) FROM vpr_content_materialperson WHERE role=0'
sql1 = 'SELECT %s FROM vpr_content_material WHERE id NOT IN (%s)' % (','.join(self.extract_fields), sql0)
res = self.m.raw(sql1)
return res
def filter_text(self, text_limit=500):
sql1 = 'SELECT %s FROM vpr_content_material WHERE CHAR_LENGTH(text)<%d' % (','.join(self.extract_fields), text_limit)
res = self.m.extra(where=['CHAR_LENGTH(text)<500']).values(*self.extract_fields)
return res
|
<commit_before><commit_msg>Add functions for filtering mal-formed materials<commit_after>from django.db.models import Q
from models import Material, Person
def get_page(pid, qset, per_page=20):
""" Returns item list of specific page in result
pid: ID of page, start from 1
"""
start_on = (pid-1)*per_page
res = qset[start_on:start_on+per_page]
return res
class MaterialScanner(object):
"""docstring for MaterialScanner"""
extract_fields = ['id', 'material_type', 'title', 'material_id', 'version']
def __init__(self, **kwargs):
super(MaterialScanner, self).__init__()
self.m = Material.objects
self.per_page = 20
def filter_description(self, null=True):
""" Return list of material having blank or no-blank description
"""
q = Q(description__isnull=null)
if null:
q = q | Q(description__regex='^(\s|-)*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_image(self, null=True):
""" Return list of material having blank or no-blank description
"""
q = Q(image__isnull=null)
return self.m.filter(q).values(*self.extract_fields)
def filter_keywords(self, null=True):
q = Q(keywords__isnull=null)
if null:
q = q | Q(keywords__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_language(self, null=True):
q = Q(keywords__isnull=null)
if null:
q = q | Q(language__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_categories(self, null=True):
q = Q(categories__isnull=null)
if null:
q = q | Q(categories__regex='^\s*$')
return self.m.filter(q).values(*self.extract_fields)
def filter_author(self):
sql0 = 'SELECT DISTINCT(material_rid) FROM vpr_content_materialperson WHERE role=0'
sql1 = 'SELECT %s FROM vpr_content_material WHERE id NOT IN (%s)' % (','.join(self.extract_fields), sql0)
res = self.m.raw(sql1)
return res
def filter_text(self, text_limit=500):
sql1 = 'SELECT %s FROM vpr_content_material WHERE CHAR_LENGTH(text)<%d' % (','.join(self.extract_fields), text_limit)
res = self.m.extra(where=['CHAR_LENGTH(text)<500']).values(*self.extract_fields)
return res
|
|
3628767a0923f9bf728c9445c813725730c8d20b
|
commonsdownloader.py
|
commonsdownloader.py
|
# -=- encoding: latin-1 -=-
"""Download files from Wikimedia Commons"""
import os
import urllib2
import logging
DEFAULT_WIDTH = 100
def clean_up_filename(file_name):
"""Return the cleaned-up file title."""
return file_name.strip().replace(' ', '_')
def make_thumb_url(image_name, width):
"""Return the URL to the thumbnail of the file, at the given width."""
base_url = "http://commons.wikimedia.org/w/thumb.php?f=%s&width=%s"
return base_url % (image_name, width)
def make_thumbnail_name(image_name, extension):
"""Return name of the downloaded thumbnail, based on the extension."""
file_name, _ = os.path.splitext(image_name)
return file_name + '.' + extension
def get_thumbnail_of_file(image_name, width):
"""Return the file contents of the thumbnail of the given file."""
hdr = {'User-Agent': 'Python urllib2'}
url = make_thumb_url(image_name, width)
req = urllib2.Request(url, headers=hdr)
try:
opened = urllib2.urlopen(req)
extension = opened.headers.subtype
return opened.read(), make_thumbnail_name(image_name, extension)
except urllib2.HTTPError, e:
print e.fp.read()
def download_file(image_name, output_path, width=DEFAULT_WIDTH):
"""Download a given Wikimedia Commons file."""
image_name = clean_up_filename(image_name)
logging.info("Downloading %s with width %s", image_name, width)
contents, output_file_name = get_thumbnail_of_file(image_name, width)
output_file_path = os.path.join(output_path, output_file_name)
with open(output_file_path, 'w') as f:
f.write(contents)
|
Add module to download thumbnails from Commons
|
Add module to download thumbnails from Commons
This module allows to download file thumbnails given the image name,
using the module urllib2 to access the MediaWiki API thumb.php.
The entry method is download_file().
|
Python
|
mit
|
Commonists/CommonsDownloader
|
Add module to download thumbnails from Commons
This module allows to download file thumbnails given the image name,
using the module urllib2 to access the MediaWiki API thumb.php.
The entry method is download_file().
|
# -=- encoding: latin-1 -=-
"""Download files from Wikimedia Commons"""
import os
import urllib2
import logging
DEFAULT_WIDTH = 100
def clean_up_filename(file_name):
"""Return the cleaned-up file title."""
return file_name.strip().replace(' ', '_')
def make_thumb_url(image_name, width):
"""Return the URL to the thumbnail of the file, at the given width."""
base_url = "http://commons.wikimedia.org/w/thumb.php?f=%s&width=%s"
return base_url % (image_name, width)
def make_thumbnail_name(image_name, extension):
"""Return name of the downloaded thumbnail, based on the extension."""
file_name, _ = os.path.splitext(image_name)
return file_name + '.' + extension
def get_thumbnail_of_file(image_name, width):
"""Return the file contents of the thumbnail of the given file."""
hdr = {'User-Agent': 'Python urllib2'}
url = make_thumb_url(image_name, width)
req = urllib2.Request(url, headers=hdr)
try:
opened = urllib2.urlopen(req)
extension = opened.headers.subtype
return opened.read(), make_thumbnail_name(image_name, extension)
except urllib2.HTTPError, e:
print e.fp.read()
def download_file(image_name, output_path, width=DEFAULT_WIDTH):
"""Download a given Wikimedia Commons file."""
image_name = clean_up_filename(image_name)
logging.info("Downloading %s with width %s", image_name, width)
contents, output_file_name = get_thumbnail_of_file(image_name, width)
output_file_path = os.path.join(output_path, output_file_name)
with open(output_file_path, 'w') as f:
f.write(contents)
|
<commit_before><commit_msg>Add module to download thumbnails from Commons
This module allows to download file thumbnails given the image name,
using the module urllib2 to access the MediaWiki API thumb.php.
The entry method is download_file().<commit_after>
|
# -=- encoding: latin-1 -=-
"""Download files from Wikimedia Commons"""
import os
import urllib2
import logging
DEFAULT_WIDTH = 100
def clean_up_filename(file_name):
"""Return the cleaned-up file title."""
return file_name.strip().replace(' ', '_')
def make_thumb_url(image_name, width):
"""Return the URL to the thumbnail of the file, at the given width."""
base_url = "http://commons.wikimedia.org/w/thumb.php?f=%s&width=%s"
return base_url % (image_name, width)
def make_thumbnail_name(image_name, extension):
"""Return name of the downloaded thumbnail, based on the extension."""
file_name, _ = os.path.splitext(image_name)
return file_name + '.' + extension
def get_thumbnail_of_file(image_name, width):
"""Return the file contents of the thumbnail of the given file."""
hdr = {'User-Agent': 'Python urllib2'}
url = make_thumb_url(image_name, width)
req = urllib2.Request(url, headers=hdr)
try:
opened = urllib2.urlopen(req)
extension = opened.headers.subtype
return opened.read(), make_thumbnail_name(image_name, extension)
except urllib2.HTTPError, e:
print e.fp.read()
def download_file(image_name, output_path, width=DEFAULT_WIDTH):
"""Download a given Wikimedia Commons file."""
image_name = clean_up_filename(image_name)
logging.info("Downloading %s with width %s", image_name, width)
contents, output_file_name = get_thumbnail_of_file(image_name, width)
output_file_path = os.path.join(output_path, output_file_name)
with open(output_file_path, 'w') as f:
f.write(contents)
|
Add module to download thumbnails from Commons
This module allows to download file thumbnails given the image name,
using the module urllib2 to access the MediaWiki API thumb.php.
The entry method is download_file().# -=- encoding: latin-1 -=-
"""Download files from Wikimedia Commons"""
import os
import urllib2
import logging
DEFAULT_WIDTH = 100
def clean_up_filename(file_name):
"""Return the cleaned-up file title."""
return file_name.strip().replace(' ', '_')
def make_thumb_url(image_name, width):
"""Return the URL to the thumbnail of the file, at the given width."""
base_url = "http://commons.wikimedia.org/w/thumb.php?f=%s&width=%s"
return base_url % (image_name, width)
def make_thumbnail_name(image_name, extension):
"""Return name of the downloaded thumbnail, based on the extension."""
file_name, _ = os.path.splitext(image_name)
return file_name + '.' + extension
def get_thumbnail_of_file(image_name, width):
"""Return the file contents of the thumbnail of the given file."""
hdr = {'User-Agent': 'Python urllib2'}
url = make_thumb_url(image_name, width)
req = urllib2.Request(url, headers=hdr)
try:
opened = urllib2.urlopen(req)
extension = opened.headers.subtype
return opened.read(), make_thumbnail_name(image_name, extension)
except urllib2.HTTPError, e:
print e.fp.read()
def download_file(image_name, output_path, width=DEFAULT_WIDTH):
"""Download a given Wikimedia Commons file."""
image_name = clean_up_filename(image_name)
logging.info("Downloading %s with width %s", image_name, width)
contents, output_file_name = get_thumbnail_of_file(image_name, width)
output_file_path = os.path.join(output_path, output_file_name)
with open(output_file_path, 'w') as f:
f.write(contents)
|
<commit_before><commit_msg>Add module to download thumbnails from Commons
This module allows to download file thumbnails given the image name,
using the module urllib2 to access the MediaWiki API thumb.php.
The entry method is download_file().<commit_after># -=- encoding: latin-1 -=-
"""Download files from Wikimedia Commons"""
import os
import urllib2
import logging
DEFAULT_WIDTH = 100
def clean_up_filename(file_name):
"""Return the cleaned-up file title."""
return file_name.strip().replace(' ', '_')
def make_thumb_url(image_name, width):
"""Return the URL to the thumbnail of the file, at the given width."""
base_url = "http://commons.wikimedia.org/w/thumb.php?f=%s&width=%s"
return base_url % (image_name, width)
def make_thumbnail_name(image_name, extension):
"""Return name of the downloaded thumbnail, based on the extension."""
file_name, _ = os.path.splitext(image_name)
return file_name + '.' + extension
def get_thumbnail_of_file(image_name, width):
"""Return the file contents of the thumbnail of the given file."""
hdr = {'User-Agent': 'Python urllib2'}
url = make_thumb_url(image_name, width)
req = urllib2.Request(url, headers=hdr)
try:
opened = urllib2.urlopen(req)
extension = opened.headers.subtype
return opened.read(), make_thumbnail_name(image_name, extension)
except urllib2.HTTPError, e:
print e.fp.read()
def download_file(image_name, output_path, width=DEFAULT_WIDTH):
"""Download a given Wikimedia Commons file."""
image_name = clean_up_filename(image_name)
logging.info("Downloading %s with width %s", image_name, width)
contents, output_file_name = get_thumbnail_of_file(image_name, width)
output_file_path = os.path.join(output_path, output_file_name)
with open(output_file_path, 'w') as f:
f.write(contents)
|
|
0aedcaccf0d62093ec2d055bf4d72f05f8a99537
|
app/grandchallenge/annotations/migrations/0003_auto_20190603_1208.py
|
app/grandchallenge/annotations/migrations/0003_auto_20190603_1208.py
|
# Generated by Django 2.1.8 on 2019-06-03 12:08
import django.contrib.postgres.fields
from django.db import migrations, models
import grandchallenge.annotations.models
class Migration(migrations.Migration):
dependencies = [("annotations", "0002_auto_20190510_1046")]
operations = [
migrations.AddField(
model_name="singlepolygonannotation",
name="voxel",
field=models.BooleanField(
default=True,
help_text="Are the coordinates in voxel representation? (As opposed to world coordinates)",
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="x_axis_orientation",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.FloatField(),
default=grandchallenge.annotations.models.x_axis_orientation_default,
size=3,
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="y_axis_orientation",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.FloatField(),
default=grandchallenge.annotations.models.y_axis_orientation_default,
size=3,
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="z",
field=models.FloatField(blank=True, null=True),
),
]
|
Add migration for model changes
|
Add migration for model changes
|
Python
|
apache-2.0
|
comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django
|
Add migration for model changes
|
# Generated by Django 2.1.8 on 2019-06-03 12:08
import django.contrib.postgres.fields
from django.db import migrations, models
import grandchallenge.annotations.models
class Migration(migrations.Migration):
dependencies = [("annotations", "0002_auto_20190510_1046")]
operations = [
migrations.AddField(
model_name="singlepolygonannotation",
name="voxel",
field=models.BooleanField(
default=True,
help_text="Are the coordinates in voxel representation? (As opposed to world coordinates)",
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="x_axis_orientation",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.FloatField(),
default=grandchallenge.annotations.models.x_axis_orientation_default,
size=3,
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="y_axis_orientation",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.FloatField(),
default=grandchallenge.annotations.models.y_axis_orientation_default,
size=3,
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="z",
field=models.FloatField(blank=True, null=True),
),
]
|
<commit_before><commit_msg>Add migration for model changes<commit_after>
|
# Generated by Django 2.1.8 on 2019-06-03 12:08
import django.contrib.postgres.fields
from django.db import migrations, models
import grandchallenge.annotations.models
class Migration(migrations.Migration):
dependencies = [("annotations", "0002_auto_20190510_1046")]
operations = [
migrations.AddField(
model_name="singlepolygonannotation",
name="voxel",
field=models.BooleanField(
default=True,
help_text="Are the coordinates in voxel representation? (As opposed to world coordinates)",
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="x_axis_orientation",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.FloatField(),
default=grandchallenge.annotations.models.x_axis_orientation_default,
size=3,
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="y_axis_orientation",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.FloatField(),
default=grandchallenge.annotations.models.y_axis_orientation_default,
size=3,
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="z",
field=models.FloatField(blank=True, null=True),
),
]
|
Add migration for model changes# Generated by Django 2.1.8 on 2019-06-03 12:08
import django.contrib.postgres.fields
from django.db import migrations, models
import grandchallenge.annotations.models
class Migration(migrations.Migration):
dependencies = [("annotations", "0002_auto_20190510_1046")]
operations = [
migrations.AddField(
model_name="singlepolygonannotation",
name="voxel",
field=models.BooleanField(
default=True,
help_text="Are the coordinates in voxel representation? (As opposed to world coordinates)",
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="x_axis_orientation",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.FloatField(),
default=grandchallenge.annotations.models.x_axis_orientation_default,
size=3,
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="y_axis_orientation",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.FloatField(),
default=grandchallenge.annotations.models.y_axis_orientation_default,
size=3,
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="z",
field=models.FloatField(blank=True, null=True),
),
]
|
<commit_before><commit_msg>Add migration for model changes<commit_after># Generated by Django 2.1.8 on 2019-06-03 12:08
import django.contrib.postgres.fields
from django.db import migrations, models
import grandchallenge.annotations.models
class Migration(migrations.Migration):
dependencies = [("annotations", "0002_auto_20190510_1046")]
operations = [
migrations.AddField(
model_name="singlepolygonannotation",
name="voxel",
field=models.BooleanField(
default=True,
help_text="Are the coordinates in voxel representation? (As opposed to world coordinates)",
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="x_axis_orientation",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.FloatField(),
default=grandchallenge.annotations.models.x_axis_orientation_default,
size=3,
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="y_axis_orientation",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.FloatField(),
default=grandchallenge.annotations.models.y_axis_orientation_default,
size=3,
),
),
migrations.AddField(
model_name="singlepolygonannotation",
name="z",
field=models.FloatField(blank=True, null=True),
),
]
|
|
1823d4a41685111b8b70c28ab742b864f205b171
|
006.py
|
006.py
|
"""
Project Euler Problem 6
=======================
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of the first ten
natural numbers and the square of the sum is 3025 - 385 = 2640.
Find the difference between the sum of the squares of the first one
hundred natural numbers and the square of the sum.
"""
def task(numbers):
return sum(numbers) ** 2 - sum(n ** 2 for n in numbers)
def test_task():
assert task(range(1, 10 + 1)) == 2640
print(task(range(1, 100 + 1)))
|
Add solution and unit tests for problem 6
|
Add solution and unit tests for problem 6
|
Python
|
mit
|
BeataBak/project-euler-problems
|
Add solution and unit tests for problem 6
|
"""
Project Euler Problem 6
=======================
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of the first ten
natural numbers and the square of the sum is 3025 - 385 = 2640.
Find the difference between the sum of the squares of the first one
hundred natural numbers and the square of the sum.
"""
def task(numbers):
return sum(numbers) ** 2 - sum(n ** 2 for n in numbers)
def test_task():
assert task(range(1, 10 + 1)) == 2640
print(task(range(1, 100 + 1)))
|
<commit_before><commit_msg>Add solution and unit tests for problem 6<commit_after>
|
"""
Project Euler Problem 6
=======================
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of the first ten
natural numbers and the square of the sum is 3025 - 385 = 2640.
Find the difference between the sum of the squares of the first one
hundred natural numbers and the square of the sum.
"""
def task(numbers):
return sum(numbers) ** 2 - sum(n ** 2 for n in numbers)
def test_task():
assert task(range(1, 10 + 1)) == 2640
print(task(range(1, 100 + 1)))
|
Add solution and unit tests for problem 6"""
Project Euler Problem 6
=======================
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of the first ten
natural numbers and the square of the sum is 3025 - 385 = 2640.
Find the difference between the sum of the squares of the first one
hundred natural numbers and the square of the sum.
"""
def task(numbers):
return sum(numbers) ** 2 - sum(n ** 2 for n in numbers)
def test_task():
assert task(range(1, 10 + 1)) == 2640
print(task(range(1, 100 + 1)))
|
<commit_before><commit_msg>Add solution and unit tests for problem 6<commit_after>"""
Project Euler Problem 6
=======================
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of the first ten
natural numbers and the square of the sum is 3025 - 385 = 2640.
Find the difference between the sum of the squares of the first one
hundred natural numbers and the square of the sum.
"""
def task(numbers):
return sum(numbers) ** 2 - sum(n ** 2 for n in numbers)
def test_task():
assert task(range(1, 10 + 1)) == 2640
print(task(range(1, 100 + 1)))
|
|
a6dd0a8deff9da7d1c3a792a23fa2d57c24b608f
|
src/lineage/utils.py
|
src/lineage/utils.py
|
from multiprocessing import Pool
import os
class Parallelizer:
def __init__(self, parallelize=True, processes=os.cpu_count()):
""" Initialize a `Parallelizer`.
Parameters
----------
parallelize : bool
utilize multiprocessing to speedup calculations
processes : int
processes to launch if multiprocessing
"""
self._parallelize = parallelize
self._processes = processes
def __call__(self, f, tasks):
""" Optionally parallelize execution of a function.
Parameters
----------
f : func
function to execute
tasks : list of dict
tasks to pass to `f`
Returns
-------
list
results of each call to `f`
"""
if self._parallelize:
with Pool(self._processes) as p:
return p.map(f, tasks)
else:
return map(f, tasks)
|
Add multiprocessing capability via `Parallelizer`
|
Add multiprocessing capability via `Parallelizer`
See https://docs.python.org/3/library/multiprocessing.html
|
Python
|
mit
|
apriha/lineage
|
Add multiprocessing capability via `Parallelizer`
See https://docs.python.org/3/library/multiprocessing.html
|
from multiprocessing import Pool
import os
class Parallelizer:
def __init__(self, parallelize=True, processes=os.cpu_count()):
""" Initialize a `Parallelizer`.
Parameters
----------
parallelize : bool
utilize multiprocessing to speedup calculations
processes : int
processes to launch if multiprocessing
"""
self._parallelize = parallelize
self._processes = processes
def __call__(self, f, tasks):
""" Optionally parallelize execution of a function.
Parameters
----------
f : func
function to execute
tasks : list of dict
tasks to pass to `f`
Returns
-------
list
results of each call to `f`
"""
if self._parallelize:
with Pool(self._processes) as p:
return p.map(f, tasks)
else:
return map(f, tasks)
|
<commit_before><commit_msg>Add multiprocessing capability via `Parallelizer`
See https://docs.python.org/3/library/multiprocessing.html<commit_after>
|
from multiprocessing import Pool
import os
class Parallelizer:
def __init__(self, parallelize=True, processes=os.cpu_count()):
""" Initialize a `Parallelizer`.
Parameters
----------
parallelize : bool
utilize multiprocessing to speedup calculations
processes : int
processes to launch if multiprocessing
"""
self._parallelize = parallelize
self._processes = processes
def __call__(self, f, tasks):
""" Optionally parallelize execution of a function.
Parameters
----------
f : func
function to execute
tasks : list of dict
tasks to pass to `f`
Returns
-------
list
results of each call to `f`
"""
if self._parallelize:
with Pool(self._processes) as p:
return p.map(f, tasks)
else:
return map(f, tasks)
|
Add multiprocessing capability via `Parallelizer`
See https://docs.python.org/3/library/multiprocessing.htmlfrom multiprocessing import Pool
import os
class Parallelizer:
def __init__(self, parallelize=True, processes=os.cpu_count()):
""" Initialize a `Parallelizer`.
Parameters
----------
parallelize : bool
utilize multiprocessing to speedup calculations
processes : int
processes to launch if multiprocessing
"""
self._parallelize = parallelize
self._processes = processes
def __call__(self, f, tasks):
""" Optionally parallelize execution of a function.
Parameters
----------
f : func
function to execute
tasks : list of dict
tasks to pass to `f`
Returns
-------
list
results of each call to `f`
"""
if self._parallelize:
with Pool(self._processes) as p:
return p.map(f, tasks)
else:
return map(f, tasks)
|
<commit_before><commit_msg>Add multiprocessing capability via `Parallelizer`
See https://docs.python.org/3/library/multiprocessing.html<commit_after>from multiprocessing import Pool
import os
class Parallelizer:
def __init__(self, parallelize=True, processes=os.cpu_count()):
""" Initialize a `Parallelizer`.
Parameters
----------
parallelize : bool
utilize multiprocessing to speedup calculations
processes : int
processes to launch if multiprocessing
"""
self._parallelize = parallelize
self._processes = processes
def __call__(self, f, tasks):
""" Optionally parallelize execution of a function.
Parameters
----------
f : func
function to execute
tasks : list of dict
tasks to pass to `f`
Returns
-------
list
results of each call to `f`
"""
if self._parallelize:
with Pool(self._processes) as p:
return p.map(f, tasks)
else:
return map(f, tasks)
|
|
796cc99e9bf6d1acdd44d4820343c1c5defcae86
|
tests/test_recursive_reference.py
|
tests/test_recursive_reference.py
|
from mongoengine import Document, fields
import unittest
class MyDocument(Document):
parent = fields.ReferenceField("MyDocument")
class SimpleDocumentTest(unittest.TestCase):
def test_document(self):
MyDocument()
|
Add test with recursive field
|
Add test with recursive field
|
Python
|
bsd-3-clause
|
thomwiggers/django-mongodbforms
|
Add test with recursive field
|
from mongoengine import Document, fields
import unittest
class MyDocument(Document):
parent = fields.ReferenceField("MyDocument")
class SimpleDocumentTest(unittest.TestCase):
def test_document(self):
MyDocument()
|
<commit_before><commit_msg>Add test with recursive field<commit_after>
|
from mongoengine import Document, fields
import unittest
class MyDocument(Document):
parent = fields.ReferenceField("MyDocument")
class SimpleDocumentTest(unittest.TestCase):
def test_document(self):
MyDocument()
|
Add test with recursive fieldfrom mongoengine import Document, fields
import unittest
class MyDocument(Document):
parent = fields.ReferenceField("MyDocument")
class SimpleDocumentTest(unittest.TestCase):
def test_document(self):
MyDocument()
|
<commit_before><commit_msg>Add test with recursive field<commit_after>from mongoengine import Document, fields
import unittest
class MyDocument(Document):
parent = fields.ReferenceField("MyDocument")
class SimpleDocumentTest(unittest.TestCase):
def test_document(self):
MyDocument()
|
|
3044ab001215a34105db8d82e422db8c7f823536
|
results/migrations/0013_update_related_facts_name.py
|
results/migrations/0013_update_related_facts_name.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('results', '0012_add_winner_model'),
]
operations = [
migrations.AlterField(
model_name='resultsfact',
name='page',
field=modelcluster.fields.ParentalKey(related_name='related_facts', to='results.ResultsPage'),
),
]
|
Update related facts attr name in results model
|
Update related facts attr name in results model
|
Python
|
unlicense
|
nott/next.filmfest.by,kinaklub/next.filmfest.by,nott/next.filmfest.by,kinaklub/next.filmfest.by,nott/next.filmfest.by,kinaklub/next.filmfest.by,kinaklub/next.filmfest.by,nott/next.filmfest.by
|
Update related facts attr name in results model
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('results', '0012_add_winner_model'),
]
operations = [
migrations.AlterField(
model_name='resultsfact',
name='page',
field=modelcluster.fields.ParentalKey(related_name='related_facts', to='results.ResultsPage'),
),
]
|
<commit_before><commit_msg>Update related facts attr name in results model<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('results', '0012_add_winner_model'),
]
operations = [
migrations.AlterField(
model_name='resultsfact',
name='page',
field=modelcluster.fields.ParentalKey(related_name='related_facts', to='results.ResultsPage'),
),
]
|
Update related facts attr name in results model# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('results', '0012_add_winner_model'),
]
operations = [
migrations.AlterField(
model_name='resultsfact',
name='page',
field=modelcluster.fields.ParentalKey(related_name='related_facts', to='results.ResultsPage'),
),
]
|
<commit_before><commit_msg>Update related facts attr name in results model<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('results', '0012_add_winner_model'),
]
operations = [
migrations.AlterField(
model_name='resultsfact',
name='page',
field=modelcluster.fields.ParentalKey(related_name='related_facts', to='results.ResultsPage'),
),
]
|
|
1999515136ed0fab1ce3b91f5772ca8922a9ffb2
|
odd.py
|
odd.py
|
def odd( number ):
return number % 2 == 1
number = int( input( "Enter a number: ") )
print( "Is the number " + str( number ) + " odd? Answer: " + str( odd( number) ) )
|
Add the answer to the fourth question of Assignment 3
|
Add the answer to the fourth question of Assignment 3
|
Python
|
mit
|
SuyashD95/python-assignments
|
Add the answer to the fourth question of Assignment 3
|
def odd( number ):
return number % 2 == 1
number = int( input( "Enter a number: ") )
print( "Is the number " + str( number ) + " odd? Answer: " + str( odd( number) ) )
|
<commit_before><commit_msg>Add the answer to the fourth question of Assignment 3<commit_after>
|
def odd( number ):
return number % 2 == 1
number = int( input( "Enter a number: ") )
print( "Is the number " + str( number ) + " odd? Answer: " + str( odd( number) ) )
|
Add the answer to the fourth question of Assignment 3def odd( number ):
return number % 2 == 1
number = int( input( "Enter a number: ") )
print( "Is the number " + str( number ) + " odd? Answer: " + str( odd( number) ) )
|
<commit_before><commit_msg>Add the answer to the fourth question of Assignment 3<commit_after>def odd( number ):
return number % 2 == 1
number = int( input( "Enter a number: ") )
print( "Is the number " + str( number ) + " odd? Answer: " + str( odd( number) ) )
|
|
aca013f2f8c8bc4b6832c3afbe335e89f4959e17
|
thinglang/parser/patterns.py
|
thinglang/parser/patterns.py
|
from thinglang.common import ValueType
from thinglang.lexer.symbols import LexicalGroupEnd
from thinglang.lexer.symbols.arithmetic import FirstOrderLexicalBinaryOperation, SecondOrderLexicalBinaryOperation
from thinglang.lexer.symbols.base import LexicalParenthesesOpen, LexicalParenthesesClose, LexicalSeparator, \
LexicalAccess, LexicalAssignment, LexicalIdentifier
from thinglang.lexer.symbols.functions import LexicalReturnStatement, LexicalArgumentListIndicator, \
LexicalDeclarationMethod, LexicalDeclarationThing
from thinglang.lexer.symbols.logic import LexicalComparison, LexicalConditional
from thinglang.parser.tokens.arithmetic import ArithmeticOperation
from thinglang.parser.tokens.base import AssignmentOperation
from thinglang.parser.tokens.classes import ThingDefinition, MethodDefinition
from thinglang.parser.tokens.functions import Access, ArgumentListPartial, ArgumentList, MethodCall, ReturnStatement, \
ArgumentListDecelerationPartial
from thinglang.parser.tokens.logic import Conditional
FIRST_PASS_PATTERNS = [
((LexicalDeclarationThing, LexicalIdentifier), ThingDefinition), # thing Program
((LexicalDeclarationMethod, LexicalIdentifier, LexicalGroupEnd), MethodDefinition), # does start
((LexicalDeclarationMethod, LexicalIdentifier, ArgumentList), MethodDefinition), # does start with a, b
((LexicalArgumentListIndicator, ValueType), ArgumentListDecelerationPartial), # with a
((LexicalIdentifier, LexicalAccess, LexicalIdentifier), Access), # person.name
((ValueType, SecondOrderLexicalBinaryOperation, ValueType), ArithmeticOperation), # 4 * 2
((ValueType, FirstOrderLexicalBinaryOperation, ValueType), ArithmeticOperation), # 4 + 2
((ArgumentListPartial, SecondOrderLexicalBinaryOperation, ValueType), ArgumentListPartial), # (4 * 2
((ArgumentListPartial, FirstOrderLexicalBinaryOperation, ValueType), ArgumentListPartial), # (4 + 2
((LexicalParenthesesOpen, LexicalParenthesesClose), ArgumentList), # ()
((LexicalParenthesesOpen, ValueType), ArgumentListPartial), # (2
((ArgumentListDecelerationPartial, LexicalSeparator, ValueType), ArgumentListDecelerationPartial), # (2, 3
((ArgumentListPartial, LexicalSeparator, ValueType), ArgumentListPartial), # (2, 3
((ArgumentListPartial, LexicalParenthesesClose), ArgumentList), # (2, 3)
((ArgumentListDecelerationPartial, LexicalGroupEnd), ArgumentList), # (2, 3)
((LexicalConditional, ValueType, LexicalComparison, ValueType), Conditional), # if x eq y
((Access, ArgumentList), MethodCall), # person.walk(...)
((LexicalIdentifier, LexicalIdentifier, LexicalAssignment, ValueType), AssignmentOperation), # number n = 1
((LexicalIdentifier, LexicalAssignment, ValueType), AssignmentOperation), # n = 2,
]
SECOND_PASS_PATTERNS = [
((LexicalReturnStatement, ValueType), ReturnStatement), # return 2
]
REPLACEMENT_PASSES = FIRST_PASS_PATTERNS, SECOND_PASS_PATTERNS
|
Split parser pattenrs into passes to support compound returns
|
Split parser pattenrs into passes to support compound returns
|
Python
|
mit
|
ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang
|
Split parser pattenrs into passes to support compound returns
|
from thinglang.common import ValueType
from thinglang.lexer.symbols import LexicalGroupEnd
from thinglang.lexer.symbols.arithmetic import FirstOrderLexicalBinaryOperation, SecondOrderLexicalBinaryOperation
from thinglang.lexer.symbols.base import LexicalParenthesesOpen, LexicalParenthesesClose, LexicalSeparator, \
LexicalAccess, LexicalAssignment, LexicalIdentifier
from thinglang.lexer.symbols.functions import LexicalReturnStatement, LexicalArgumentListIndicator, \
LexicalDeclarationMethod, LexicalDeclarationThing
from thinglang.lexer.symbols.logic import LexicalComparison, LexicalConditional
from thinglang.parser.tokens.arithmetic import ArithmeticOperation
from thinglang.parser.tokens.base import AssignmentOperation
from thinglang.parser.tokens.classes import ThingDefinition, MethodDefinition
from thinglang.parser.tokens.functions import Access, ArgumentListPartial, ArgumentList, MethodCall, ReturnStatement, \
ArgumentListDecelerationPartial
from thinglang.parser.tokens.logic import Conditional
FIRST_PASS_PATTERNS = [
((LexicalDeclarationThing, LexicalIdentifier), ThingDefinition), # thing Program
((LexicalDeclarationMethod, LexicalIdentifier, LexicalGroupEnd), MethodDefinition), # does start
((LexicalDeclarationMethod, LexicalIdentifier, ArgumentList), MethodDefinition), # does start with a, b
((LexicalArgumentListIndicator, ValueType), ArgumentListDecelerationPartial), # with a
((LexicalIdentifier, LexicalAccess, LexicalIdentifier), Access), # person.name
((ValueType, SecondOrderLexicalBinaryOperation, ValueType), ArithmeticOperation), # 4 * 2
((ValueType, FirstOrderLexicalBinaryOperation, ValueType), ArithmeticOperation), # 4 + 2
((ArgumentListPartial, SecondOrderLexicalBinaryOperation, ValueType), ArgumentListPartial), # (4 * 2
((ArgumentListPartial, FirstOrderLexicalBinaryOperation, ValueType), ArgumentListPartial), # (4 + 2
((LexicalParenthesesOpen, LexicalParenthesesClose), ArgumentList), # ()
((LexicalParenthesesOpen, ValueType), ArgumentListPartial), # (2
((ArgumentListDecelerationPartial, LexicalSeparator, ValueType), ArgumentListDecelerationPartial), # (2, 3
((ArgumentListPartial, LexicalSeparator, ValueType), ArgumentListPartial), # (2, 3
((ArgumentListPartial, LexicalParenthesesClose), ArgumentList), # (2, 3)
((ArgumentListDecelerationPartial, LexicalGroupEnd), ArgumentList), # (2, 3)
((LexicalConditional, ValueType, LexicalComparison, ValueType), Conditional), # if x eq y
((Access, ArgumentList), MethodCall), # person.walk(...)
((LexicalIdentifier, LexicalIdentifier, LexicalAssignment, ValueType), AssignmentOperation), # number n = 1
((LexicalIdentifier, LexicalAssignment, ValueType), AssignmentOperation), # n = 2,
]
SECOND_PASS_PATTERNS = [
((LexicalReturnStatement, ValueType), ReturnStatement), # return 2
]
REPLACEMENT_PASSES = FIRST_PASS_PATTERNS, SECOND_PASS_PATTERNS
|
<commit_before><commit_msg>Split parser pattenrs into passes to support compound returns<commit_after>
|
from thinglang.common import ValueType
from thinglang.lexer.symbols import LexicalGroupEnd
from thinglang.lexer.symbols.arithmetic import FirstOrderLexicalBinaryOperation, SecondOrderLexicalBinaryOperation
from thinglang.lexer.symbols.base import LexicalParenthesesOpen, LexicalParenthesesClose, LexicalSeparator, \
LexicalAccess, LexicalAssignment, LexicalIdentifier
from thinglang.lexer.symbols.functions import LexicalReturnStatement, LexicalArgumentListIndicator, \
LexicalDeclarationMethod, LexicalDeclarationThing
from thinglang.lexer.symbols.logic import LexicalComparison, LexicalConditional
from thinglang.parser.tokens.arithmetic import ArithmeticOperation
from thinglang.parser.tokens.base import AssignmentOperation
from thinglang.parser.tokens.classes import ThingDefinition, MethodDefinition
from thinglang.parser.tokens.functions import Access, ArgumentListPartial, ArgumentList, MethodCall, ReturnStatement, \
ArgumentListDecelerationPartial
from thinglang.parser.tokens.logic import Conditional
FIRST_PASS_PATTERNS = [
((LexicalDeclarationThing, LexicalIdentifier), ThingDefinition), # thing Program
((LexicalDeclarationMethod, LexicalIdentifier, LexicalGroupEnd), MethodDefinition), # does start
((LexicalDeclarationMethod, LexicalIdentifier, ArgumentList), MethodDefinition), # does start with a, b
((LexicalArgumentListIndicator, ValueType), ArgumentListDecelerationPartial), # with a
((LexicalIdentifier, LexicalAccess, LexicalIdentifier), Access), # person.name
((ValueType, SecondOrderLexicalBinaryOperation, ValueType), ArithmeticOperation), # 4 * 2
((ValueType, FirstOrderLexicalBinaryOperation, ValueType), ArithmeticOperation), # 4 + 2
((ArgumentListPartial, SecondOrderLexicalBinaryOperation, ValueType), ArgumentListPartial), # (4 * 2
((ArgumentListPartial, FirstOrderLexicalBinaryOperation, ValueType), ArgumentListPartial), # (4 + 2
((LexicalParenthesesOpen, LexicalParenthesesClose), ArgumentList), # ()
((LexicalParenthesesOpen, ValueType), ArgumentListPartial), # (2
((ArgumentListDecelerationPartial, LexicalSeparator, ValueType), ArgumentListDecelerationPartial), # (2, 3
((ArgumentListPartial, LexicalSeparator, ValueType), ArgumentListPartial), # (2, 3
((ArgumentListPartial, LexicalParenthesesClose), ArgumentList), # (2, 3)
((ArgumentListDecelerationPartial, LexicalGroupEnd), ArgumentList), # (2, 3)
((LexicalConditional, ValueType, LexicalComparison, ValueType), Conditional), # if x eq y
((Access, ArgumentList), MethodCall), # person.walk(...)
((LexicalIdentifier, LexicalIdentifier, LexicalAssignment, ValueType), AssignmentOperation), # number n = 1
((LexicalIdentifier, LexicalAssignment, ValueType), AssignmentOperation), # n = 2,
]
SECOND_PASS_PATTERNS = [
((LexicalReturnStatement, ValueType), ReturnStatement), # return 2
]
REPLACEMENT_PASSES = FIRST_PASS_PATTERNS, SECOND_PASS_PATTERNS
|
Split parser pattenrs into passes to support compound returnsfrom thinglang.common import ValueType
from thinglang.lexer.symbols import LexicalGroupEnd
from thinglang.lexer.symbols.arithmetic import FirstOrderLexicalBinaryOperation, SecondOrderLexicalBinaryOperation
from thinglang.lexer.symbols.base import LexicalParenthesesOpen, LexicalParenthesesClose, LexicalSeparator, \
LexicalAccess, LexicalAssignment, LexicalIdentifier
from thinglang.lexer.symbols.functions import LexicalReturnStatement, LexicalArgumentListIndicator, \
LexicalDeclarationMethod, LexicalDeclarationThing
from thinglang.lexer.symbols.logic import LexicalComparison, LexicalConditional
from thinglang.parser.tokens.arithmetic import ArithmeticOperation
from thinglang.parser.tokens.base import AssignmentOperation
from thinglang.parser.tokens.classes import ThingDefinition, MethodDefinition
from thinglang.parser.tokens.functions import Access, ArgumentListPartial, ArgumentList, MethodCall, ReturnStatement, \
ArgumentListDecelerationPartial
from thinglang.parser.tokens.logic import Conditional
FIRST_PASS_PATTERNS = [
((LexicalDeclarationThing, LexicalIdentifier), ThingDefinition), # thing Program
((LexicalDeclarationMethod, LexicalIdentifier, LexicalGroupEnd), MethodDefinition), # does start
((LexicalDeclarationMethod, LexicalIdentifier, ArgumentList), MethodDefinition), # does start with a, b
((LexicalArgumentListIndicator, ValueType), ArgumentListDecelerationPartial), # with a
((LexicalIdentifier, LexicalAccess, LexicalIdentifier), Access), # person.name
((ValueType, SecondOrderLexicalBinaryOperation, ValueType), ArithmeticOperation), # 4 * 2
((ValueType, FirstOrderLexicalBinaryOperation, ValueType), ArithmeticOperation), # 4 + 2
((ArgumentListPartial, SecondOrderLexicalBinaryOperation, ValueType), ArgumentListPartial), # (4 * 2
((ArgumentListPartial, FirstOrderLexicalBinaryOperation, ValueType), ArgumentListPartial), # (4 + 2
((LexicalParenthesesOpen, LexicalParenthesesClose), ArgumentList), # ()
((LexicalParenthesesOpen, ValueType), ArgumentListPartial), # (2
((ArgumentListDecelerationPartial, LexicalSeparator, ValueType), ArgumentListDecelerationPartial), # (2, 3
((ArgumentListPartial, LexicalSeparator, ValueType), ArgumentListPartial), # (2, 3
((ArgumentListPartial, LexicalParenthesesClose), ArgumentList), # (2, 3)
((ArgumentListDecelerationPartial, LexicalGroupEnd), ArgumentList), # (2, 3)
((LexicalConditional, ValueType, LexicalComparison, ValueType), Conditional), # if x eq y
((Access, ArgumentList), MethodCall), # person.walk(...)
((LexicalIdentifier, LexicalIdentifier, LexicalAssignment, ValueType), AssignmentOperation), # number n = 1
((LexicalIdentifier, LexicalAssignment, ValueType), AssignmentOperation), # n = 2,
]
SECOND_PASS_PATTERNS = [
((LexicalReturnStatement, ValueType), ReturnStatement), # return 2
]
REPLACEMENT_PASSES = FIRST_PASS_PATTERNS, SECOND_PASS_PATTERNS
|
<commit_before><commit_msg>Split parser pattenrs into passes to support compound returns<commit_after>from thinglang.common import ValueType
from thinglang.lexer.symbols import LexicalGroupEnd
from thinglang.lexer.symbols.arithmetic import FirstOrderLexicalBinaryOperation, SecondOrderLexicalBinaryOperation
from thinglang.lexer.symbols.base import LexicalParenthesesOpen, LexicalParenthesesClose, LexicalSeparator, \
LexicalAccess, LexicalAssignment, LexicalIdentifier
from thinglang.lexer.symbols.functions import LexicalReturnStatement, LexicalArgumentListIndicator, \
LexicalDeclarationMethod, LexicalDeclarationThing
from thinglang.lexer.symbols.logic import LexicalComparison, LexicalConditional
from thinglang.parser.tokens.arithmetic import ArithmeticOperation
from thinglang.parser.tokens.base import AssignmentOperation
from thinglang.parser.tokens.classes import ThingDefinition, MethodDefinition
from thinglang.parser.tokens.functions import Access, ArgumentListPartial, ArgumentList, MethodCall, ReturnStatement, \
ArgumentListDecelerationPartial
from thinglang.parser.tokens.logic import Conditional
FIRST_PASS_PATTERNS = [
((LexicalDeclarationThing, LexicalIdentifier), ThingDefinition), # thing Program
((LexicalDeclarationMethod, LexicalIdentifier, LexicalGroupEnd), MethodDefinition), # does start
((LexicalDeclarationMethod, LexicalIdentifier, ArgumentList), MethodDefinition), # does start with a, b
((LexicalArgumentListIndicator, ValueType), ArgumentListDecelerationPartial), # with a
((LexicalIdentifier, LexicalAccess, LexicalIdentifier), Access), # person.name
((ValueType, SecondOrderLexicalBinaryOperation, ValueType), ArithmeticOperation), # 4 * 2
((ValueType, FirstOrderLexicalBinaryOperation, ValueType), ArithmeticOperation), # 4 + 2
((ArgumentListPartial, SecondOrderLexicalBinaryOperation, ValueType), ArgumentListPartial), # (4 * 2
((ArgumentListPartial, FirstOrderLexicalBinaryOperation, ValueType), ArgumentListPartial), # (4 + 2
((LexicalParenthesesOpen, LexicalParenthesesClose), ArgumentList), # ()
((LexicalParenthesesOpen, ValueType), ArgumentListPartial), # (2
((ArgumentListDecelerationPartial, LexicalSeparator, ValueType), ArgumentListDecelerationPartial), # (2, 3
((ArgumentListPartial, LexicalSeparator, ValueType), ArgumentListPartial), # (2, 3
((ArgumentListPartial, LexicalParenthesesClose), ArgumentList), # (2, 3)
((ArgumentListDecelerationPartial, LexicalGroupEnd), ArgumentList), # (2, 3)
((LexicalConditional, ValueType, LexicalComparison, ValueType), Conditional), # if x eq y
((Access, ArgumentList), MethodCall), # person.walk(...)
((LexicalIdentifier, LexicalIdentifier, LexicalAssignment, ValueType), AssignmentOperation), # number n = 1
((LexicalIdentifier, LexicalAssignment, ValueType), AssignmentOperation), # n = 2,
]
SECOND_PASS_PATTERNS = [
((LexicalReturnStatement, ValueType), ReturnStatement), # return 2
]
REPLACEMENT_PASSES = FIRST_PASS_PATTERNS, SECOND_PASS_PATTERNS
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.