commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dec55dcd877f026ce38c728df0bdcb26ac89f361
|
tests/test_dtool_dataset_copy.py
|
tests/test_dtool_dataset_copy.py
|
"""Test the ``dtool dataset copy`` command."""
import os
from click.testing import CliRunner
from dtoolcore import DataSet, ProtoDataSet
from . import chdir_fixture, tmp_dir_fixture # NOQA
def test_dataset_copy_functional(chdir_fixture): # NOQA
from dtool_create.dataset import create, freeze, add, copy
runner = CliRunner()
dataset_name = "my_dataset"
result = runner.invoke(create, [dataset_name])
assert result.exit_code == 0
# At this point we have a proto dataset
dataset_abspath = os.path.abspath(dataset_name)
dataset_uri = "file://{}".format(dataset_abspath)
dataset = ProtoDataSet.from_uri(dataset_uri)
# Create a directory to copy the dataset to.
copy_directory = os.path.abspath("copy_dir")
os.mkdir(copy_directory)
# It should not be possible to copy a proto dataset.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code != 0
# Create sample file to the proto dataset.
sample_file_name = "hello.txt"
with open(sample_file_name, "w") as fh:
fh.write("hello world")
# Put it into the dataset
result = runner.invoke(add, ["item", sample_file_name, dataset_uri])
assert result.exit_code == 0
result = runner.invoke(freeze, [dataset_uri])
assert result.exit_code == 0
# Now we have a dataset.
dataset = DataSet.from_uri(dataset_uri)
# It should now be possible to copy the dataset.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code == 0
# However, it cannot be done again.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code != 0
assert result.output.find("Error: Dataset already exists") != -1
|
Add 'dtool copy' functional test
|
Add 'dtool copy' functional test
|
Python
|
mit
|
jic-dtool/dtool-create
|
Add 'dtool copy' functional test
|
"""Test the ``dtool dataset copy`` command."""
import os
from click.testing import CliRunner
from dtoolcore import DataSet, ProtoDataSet
from . import chdir_fixture, tmp_dir_fixture # NOQA
def test_dataset_copy_functional(chdir_fixture): # NOQA
from dtool_create.dataset import create, freeze, add, copy
runner = CliRunner()
dataset_name = "my_dataset"
result = runner.invoke(create, [dataset_name])
assert result.exit_code == 0
# At this point we have a proto dataset
dataset_abspath = os.path.abspath(dataset_name)
dataset_uri = "file://{}".format(dataset_abspath)
dataset = ProtoDataSet.from_uri(dataset_uri)
# Create a directory to copy the dataset to.
copy_directory = os.path.abspath("copy_dir")
os.mkdir(copy_directory)
# It should not be possible to copy a proto dataset.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code != 0
# Create sample file to the proto dataset.
sample_file_name = "hello.txt"
with open(sample_file_name, "w") as fh:
fh.write("hello world")
# Put it into the dataset
result = runner.invoke(add, ["item", sample_file_name, dataset_uri])
assert result.exit_code == 0
result = runner.invoke(freeze, [dataset_uri])
assert result.exit_code == 0
# Now we have a dataset.
dataset = DataSet.from_uri(dataset_uri)
# It should now be possible to copy the dataset.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code == 0
# However, it cannot be done again.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code != 0
assert result.output.find("Error: Dataset already exists") != -1
|
<commit_before><commit_msg>Add 'dtool copy' functional test<commit_after>
|
"""Test the ``dtool dataset copy`` command."""
import os
from click.testing import CliRunner
from dtoolcore import DataSet, ProtoDataSet
from . import chdir_fixture, tmp_dir_fixture # NOQA
def test_dataset_copy_functional(chdir_fixture): # NOQA
from dtool_create.dataset import create, freeze, add, copy
runner = CliRunner()
dataset_name = "my_dataset"
result = runner.invoke(create, [dataset_name])
assert result.exit_code == 0
# At this point we have a proto dataset
dataset_abspath = os.path.abspath(dataset_name)
dataset_uri = "file://{}".format(dataset_abspath)
dataset = ProtoDataSet.from_uri(dataset_uri)
# Create a directory to copy the dataset to.
copy_directory = os.path.abspath("copy_dir")
os.mkdir(copy_directory)
# It should not be possible to copy a proto dataset.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code != 0
# Create sample file to the proto dataset.
sample_file_name = "hello.txt"
with open(sample_file_name, "w") as fh:
fh.write("hello world")
# Put it into the dataset
result = runner.invoke(add, ["item", sample_file_name, dataset_uri])
assert result.exit_code == 0
result = runner.invoke(freeze, [dataset_uri])
assert result.exit_code == 0
# Now we have a dataset.
dataset = DataSet.from_uri(dataset_uri)
# It should now be possible to copy the dataset.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code == 0
# However, it cannot be done again.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code != 0
assert result.output.find("Error: Dataset already exists") != -1
|
Add 'dtool copy' functional test"""Test the ``dtool dataset copy`` command."""
import os
from click.testing import CliRunner
from dtoolcore import DataSet, ProtoDataSet
from . import chdir_fixture, tmp_dir_fixture # NOQA
def test_dataset_copy_functional(chdir_fixture): # NOQA
from dtool_create.dataset import create, freeze, add, copy
runner = CliRunner()
dataset_name = "my_dataset"
result = runner.invoke(create, [dataset_name])
assert result.exit_code == 0
# At this point we have a proto dataset
dataset_abspath = os.path.abspath(dataset_name)
dataset_uri = "file://{}".format(dataset_abspath)
dataset = ProtoDataSet.from_uri(dataset_uri)
# Create a directory to copy the dataset to.
copy_directory = os.path.abspath("copy_dir")
os.mkdir(copy_directory)
# It should not be possible to copy a proto dataset.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code != 0
# Create sample file to the proto dataset.
sample_file_name = "hello.txt"
with open(sample_file_name, "w") as fh:
fh.write("hello world")
# Put it into the dataset
result = runner.invoke(add, ["item", sample_file_name, dataset_uri])
assert result.exit_code == 0
result = runner.invoke(freeze, [dataset_uri])
assert result.exit_code == 0
# Now we have a dataset.
dataset = DataSet.from_uri(dataset_uri)
# It should now be possible to copy the dataset.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code == 0
# However, it cannot be done again.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code != 0
assert result.output.find("Error: Dataset already exists") != -1
|
<commit_before><commit_msg>Add 'dtool copy' functional test<commit_after>"""Test the ``dtool dataset copy`` command."""
import os
from click.testing import CliRunner
from dtoolcore import DataSet, ProtoDataSet
from . import chdir_fixture, tmp_dir_fixture # NOQA
def test_dataset_copy_functional(chdir_fixture): # NOQA
from dtool_create.dataset import create, freeze, add, copy
runner = CliRunner()
dataset_name = "my_dataset"
result = runner.invoke(create, [dataset_name])
assert result.exit_code == 0
# At this point we have a proto dataset
dataset_abspath = os.path.abspath(dataset_name)
dataset_uri = "file://{}".format(dataset_abspath)
dataset = ProtoDataSet.from_uri(dataset_uri)
# Create a directory to copy the dataset to.
copy_directory = os.path.abspath("copy_dir")
os.mkdir(copy_directory)
# It should not be possible to copy a proto dataset.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code != 0
# Create sample file to the proto dataset.
sample_file_name = "hello.txt"
with open(sample_file_name, "w") as fh:
fh.write("hello world")
# Put it into the dataset
result = runner.invoke(add, ["item", sample_file_name, dataset_uri])
assert result.exit_code == 0
result = runner.invoke(freeze, [dataset_uri])
assert result.exit_code == 0
# Now we have a dataset.
dataset = DataSet.from_uri(dataset_uri)
# It should now be possible to copy the dataset.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code == 0
# However, it cannot be done again.
result = runner.invoke(copy, [dataset_uri, copy_directory])
assert result.exit_code != 0
assert result.output.find("Error: Dataset already exists") != -1
|
|
7f0086f966a2403fb8c653b5f897b73ab1fc4b88
|
tests/integration/test_sts.py
|
tests/integration/test_sts.py
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
from botocore.exceptions import ClientError
class TestSTS(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_regionalized_endpoints(self):
# Since we have to activate STS regionalization, we will test
# that you can send an STS request to a regionalized endpoint
# by making a call with the explicitly wrong region name
sts = self.session.create_client(
'sts', region_name='ap-southeast-1',
endpoint_url='https://sts.us-west-2.amazonaws.com')
# Signing error will be thrown with the incorrect region name included.
with self.assertRaisesRegexp(ClientError, 'ap-southeast-1') as e:
sts.get_session_token()
|
Add integration test for sts regionalization
|
Add integration test for sts regionalization
|
Python
|
apache-2.0
|
pplu/botocore,boto/botocore
|
Add integration test for sts regionalization
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
from botocore.exceptions import ClientError
class TestSTS(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_regionalized_endpoints(self):
# Since we have to activate STS regionalization, we will test
# that you can send an STS request to a regionalized endpoint
# by making a call with the explicitly wrong region name
sts = self.session.create_client(
'sts', region_name='ap-southeast-1',
endpoint_url='https://sts.us-west-2.amazonaws.com')
# Signing error will be thrown with the incorrect region name included.
with self.assertRaisesRegexp(ClientError, 'ap-southeast-1') as e:
sts.get_session_token()
|
<commit_before><commit_msg>Add integration test for sts regionalization<commit_after>
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
from botocore.exceptions import ClientError
class TestSTS(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_regionalized_endpoints(self):
# Since we have to activate STS regionalization, we will test
# that you can send an STS request to a regionalized endpoint
# by making a call with the explicitly wrong region name
sts = self.session.create_client(
'sts', region_name='ap-southeast-1',
endpoint_url='https://sts.us-west-2.amazonaws.com')
# Signing error will be thrown with the incorrect region name included.
with self.assertRaisesRegexp(ClientError, 'ap-southeast-1') as e:
sts.get_session_token()
|
Add integration test for sts regionalization# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
from botocore.exceptions import ClientError
class TestSTS(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_regionalized_endpoints(self):
# Since we have to activate STS regionalization, we will test
# that you can send an STS request to a regionalized endpoint
# by making a call with the explicitly wrong region name
sts = self.session.create_client(
'sts', region_name='ap-southeast-1',
endpoint_url='https://sts.us-west-2.amazonaws.com')
# Signing error will be thrown with the incorrect region name included.
with self.assertRaisesRegexp(ClientError, 'ap-southeast-1') as e:
sts.get_session_token()
|
<commit_before><commit_msg>Add integration test for sts regionalization<commit_after># Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
from botocore.exceptions import ClientError
class TestSTS(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_regionalized_endpoints(self):
# Since we have to activate STS regionalization, we will test
# that you can send an STS request to a regionalized endpoint
# by making a call with the explicitly wrong region name
sts = self.session.create_client(
'sts', region_name='ap-southeast-1',
endpoint_url='https://sts.us-west-2.amazonaws.com')
# Signing error will be thrown with the incorrect region name included.
with self.assertRaisesRegexp(ClientError, 'ap-southeast-1') as e:
sts.get_session_token()
|
|
35f267249955b4c09cbf4432e452dc987488454e
|
tests/test_session.py
|
tests/test_session.py
|
#!/usr/bin/env python
# coding=utf-8
try:
import unittest.mock as mock
except ImportError:
import mock
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from nessusapi.session import Session
from nessusapi.session import Request
class SessionTestCase(unittest.TestCase):
@mock.patch('nessusapi.session.random')
@mock.patch('nessusapi.session.urlopen')
@mock.patch('nessusapi.session.Request')
def test_init(self, mock_request, mock_urlopen, mock_random):
mock_random.randrange.return_value = 2811
mock_urlopen.return_value = StringIO('<?xml version="1.0"?> <reply>'
"<seq>2811</seq>"
"<status>OK</status>"
"<contents><token>ce65ea7</token>"
"<user>"
"<name>admin</name>"
"<admin>TRUE</admin>"
"</user></contents>"
"</reply>")
session = Session('user', 'pass', '192.0.2.3', '8980')
mock_request.assert_called_once_with('https://192.0.2.3:8980/login',
'login=user&password=pass&seq=2811')
self.assertEqual(session.token, "ce65ea7")
if __name__ == '__main__':
unittest.main()
|
Add basic tests for session
|
Add basic tests for session
|
Python
|
mit
|
sait-berkeley-infosec/pynessus-api
|
Add basic tests for session
|
#!/usr/bin/env python
# coding=utf-8
try:
import unittest.mock as mock
except ImportError:
import mock
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from nessusapi.session import Session
from nessusapi.session import Request
class SessionTestCase(unittest.TestCase):
@mock.patch('nessusapi.session.random')
@mock.patch('nessusapi.session.urlopen')
@mock.patch('nessusapi.session.Request')
def test_init(self, mock_request, mock_urlopen, mock_random):
mock_random.randrange.return_value = 2811
mock_urlopen.return_value = StringIO('<?xml version="1.0"?> <reply>'
"<seq>2811</seq>"
"<status>OK</status>"
"<contents><token>ce65ea7</token>"
"<user>"
"<name>admin</name>"
"<admin>TRUE</admin>"
"</user></contents>"
"</reply>")
session = Session('user', 'pass', '192.0.2.3', '8980')
mock_request.assert_called_once_with('https://192.0.2.3:8980/login',
'login=user&password=pass&seq=2811')
self.assertEqual(session.token, "ce65ea7")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic tests for session<commit_after>
|
#!/usr/bin/env python
# coding=utf-8
try:
import unittest.mock as mock
except ImportError:
import mock
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from nessusapi.session import Session
from nessusapi.session import Request
class SessionTestCase(unittest.TestCase):
@mock.patch('nessusapi.session.random')
@mock.patch('nessusapi.session.urlopen')
@mock.patch('nessusapi.session.Request')
def test_init(self, mock_request, mock_urlopen, mock_random):
mock_random.randrange.return_value = 2811
mock_urlopen.return_value = StringIO('<?xml version="1.0"?> <reply>'
"<seq>2811</seq>"
"<status>OK</status>"
"<contents><token>ce65ea7</token>"
"<user>"
"<name>admin</name>"
"<admin>TRUE</admin>"
"</user></contents>"
"</reply>")
session = Session('user', 'pass', '192.0.2.3', '8980')
mock_request.assert_called_once_with('https://192.0.2.3:8980/login',
'login=user&password=pass&seq=2811')
self.assertEqual(session.token, "ce65ea7")
if __name__ == '__main__':
unittest.main()
|
Add basic tests for session#!/usr/bin/env python
# coding=utf-8
try:
import unittest.mock as mock
except ImportError:
import mock
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from nessusapi.session import Session
from nessusapi.session import Request
class SessionTestCase(unittest.TestCase):
@mock.patch('nessusapi.session.random')
@mock.patch('nessusapi.session.urlopen')
@mock.patch('nessusapi.session.Request')
def test_init(self, mock_request, mock_urlopen, mock_random):
mock_random.randrange.return_value = 2811
mock_urlopen.return_value = StringIO('<?xml version="1.0"?> <reply>'
"<seq>2811</seq>"
"<status>OK</status>"
"<contents><token>ce65ea7</token>"
"<user>"
"<name>admin</name>"
"<admin>TRUE</admin>"
"</user></contents>"
"</reply>")
session = Session('user', 'pass', '192.0.2.3', '8980')
mock_request.assert_called_once_with('https://192.0.2.3:8980/login',
'login=user&password=pass&seq=2811')
self.assertEqual(session.token, "ce65ea7")
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic tests for session<commit_after>#!/usr/bin/env python
# coding=utf-8
try:
import unittest.mock as mock
except ImportError:
import mock
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from nessusapi.session import Session
from nessusapi.session import Request
class SessionTestCase(unittest.TestCase):
@mock.patch('nessusapi.session.random')
@mock.patch('nessusapi.session.urlopen')
@mock.patch('nessusapi.session.Request')
def test_init(self, mock_request, mock_urlopen, mock_random):
mock_random.randrange.return_value = 2811
mock_urlopen.return_value = StringIO('<?xml version="1.0"?> <reply>'
"<seq>2811</seq>"
"<status>OK</status>"
"<contents><token>ce65ea7</token>"
"<user>"
"<name>admin</name>"
"<admin>TRUE</admin>"
"</user></contents>"
"</reply>")
session = Session('user', 'pass', '192.0.2.3', '8980')
mock_request.assert_called_once_with('https://192.0.2.3:8980/login',
'login=user&password=pass&seq=2811')
self.assertEqual(session.token, "ce65ea7")
if __name__ == '__main__':
unittest.main()
|
|
11f78d16c22a3bdf56ffaf87f68eb226e418140d
|
examples/aws_product_advertising.py
|
examples/aws_product_advertising.py
|
#!/usr/bin/env python3
'''
Amazon Product Advertising API Client
'''
from xml.etree import ElementTree
import urllib.parse
import dewpoint.aws
import sys
def camel_case(name):
parts = []
for part in name.split('_'):
parts.append(part[0].upper() + part[1:])
return ''.join(parts)
class ProductAdvertising(object):
def __init__(self, key, secret):
self.api = dewpoint.aws.AWSClient(
key=key,
secret=secret,
version='2013-08-01')
def ItemSearch(self, endpoint, associate_tag, **kwargs):
params = {
'Service': 'AWSEcommerceService',
'Operation': 'ItemSearch',
'ContentType': 'text/xml',
'AssociateTag': associate_tag,
}
for key in kwargs:
params[camel_case(key)] = kwargs[key]
query = urllib.parse.urlencode(params)
url = '%s?%s' % (endpoint, query)
status, headers, xml = self.api.request('GET', url)
xml = xml.replace(b' xmlns="http://webservices.amazon.com/AWSECommerceService/2013-08-01"', b'')
tree = ElementTree.XML(xml)
return tree
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: %s <access key> <secret key>' % sys.argv[0])
sys.exit(1)
access_key = sys.argv[1].encode('ascii')
secret_key = sys.argv[2].encode('ascii')
pa = ProductAdvertising(access_key, secret_key)
# Change the endpoint depending on your country:
# https://docs.aws.amazon.com/AWSECommerceService/latest/DG/AnatomyOfaRESTRequest.html#EndpointsandWebServices
xml = pa.ItemSearch(
endpoint='https://webservices.amazon.com/onca/xml',
associate_tag='synack-20',
search_index='Electronics',
browse_node='1254762011',
response_group='ItemAttributes,Offers',
sort='salesrank',
item_page=1)
for element in xml.iterfind('Items/Item'):
asin = element.findtext('ASIN')
name = element.findtext('ItemAttributes/Title')
price = float(element.findtext('Offers/Offer/OfferListing/Price/Amount')) / 100.0
url = element.findtext('DetailPageURL')
print('%s %s\n$%.02f %s\n' % (asin, name, price, url))
|
Add example for AWS Product Advertising API
|
Add example for AWS Product Advertising API
|
Python
|
bsd-2-clause
|
JeremyGrosser/dewpoint
|
Add example for AWS Product Advertising API
|
#!/usr/bin/env python3
'''
Amazon Product Advertising API Client
'''
from xml.etree import ElementTree
import urllib.parse
import dewpoint.aws
import sys
def camel_case(name):
parts = []
for part in name.split('_'):
parts.append(part[0].upper() + part[1:])
return ''.join(parts)
class ProductAdvertising(object):
def __init__(self, key, secret):
self.api = dewpoint.aws.AWSClient(
key=key,
secret=secret,
version='2013-08-01')
def ItemSearch(self, endpoint, associate_tag, **kwargs):
params = {
'Service': 'AWSEcommerceService',
'Operation': 'ItemSearch',
'ContentType': 'text/xml',
'AssociateTag': associate_tag,
}
for key in kwargs:
params[camel_case(key)] = kwargs[key]
query = urllib.parse.urlencode(params)
url = '%s?%s' % (endpoint, query)
status, headers, xml = self.api.request('GET', url)
xml = xml.replace(b' xmlns="http://webservices.amazon.com/AWSECommerceService/2013-08-01"', b'')
tree = ElementTree.XML(xml)
return tree
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: %s <access key> <secret key>' % sys.argv[0])
sys.exit(1)
access_key = sys.argv[1].encode('ascii')
secret_key = sys.argv[2].encode('ascii')
pa = ProductAdvertising(access_key, secret_key)
# Change the endpoint depending on your country:
# https://docs.aws.amazon.com/AWSECommerceService/latest/DG/AnatomyOfaRESTRequest.html#EndpointsandWebServices
xml = pa.ItemSearch(
endpoint='https://webservices.amazon.com/onca/xml',
associate_tag='synack-20',
search_index='Electronics',
browse_node='1254762011',
response_group='ItemAttributes,Offers',
sort='salesrank',
item_page=1)
for element in xml.iterfind('Items/Item'):
asin = element.findtext('ASIN')
name = element.findtext('ItemAttributes/Title')
price = float(element.findtext('Offers/Offer/OfferListing/Price/Amount')) / 100.0
url = element.findtext('DetailPageURL')
print('%s %s\n$%.02f %s\n' % (asin, name, price, url))
|
<commit_before><commit_msg>Add example for AWS Product Advertising API<commit_after>
|
#!/usr/bin/env python3
'''
Amazon Product Advertising API Client
'''
from xml.etree import ElementTree
import urllib.parse
import dewpoint.aws
import sys
def camel_case(name):
parts = []
for part in name.split('_'):
parts.append(part[0].upper() + part[1:])
return ''.join(parts)
class ProductAdvertising(object):
def __init__(self, key, secret):
self.api = dewpoint.aws.AWSClient(
key=key,
secret=secret,
version='2013-08-01')
def ItemSearch(self, endpoint, associate_tag, **kwargs):
params = {
'Service': 'AWSEcommerceService',
'Operation': 'ItemSearch',
'ContentType': 'text/xml',
'AssociateTag': associate_tag,
}
for key in kwargs:
params[camel_case(key)] = kwargs[key]
query = urllib.parse.urlencode(params)
url = '%s?%s' % (endpoint, query)
status, headers, xml = self.api.request('GET', url)
xml = xml.replace(b' xmlns="http://webservices.amazon.com/AWSECommerceService/2013-08-01"', b'')
tree = ElementTree.XML(xml)
return tree
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: %s <access key> <secret key>' % sys.argv[0])
sys.exit(1)
access_key = sys.argv[1].encode('ascii')
secret_key = sys.argv[2].encode('ascii')
pa = ProductAdvertising(access_key, secret_key)
# Change the endpoint depending on your country:
# https://docs.aws.amazon.com/AWSECommerceService/latest/DG/AnatomyOfaRESTRequest.html#EndpointsandWebServices
xml = pa.ItemSearch(
endpoint='https://webservices.amazon.com/onca/xml',
associate_tag='synack-20',
search_index='Electronics',
browse_node='1254762011',
response_group='ItemAttributes,Offers',
sort='salesrank',
item_page=1)
for element in xml.iterfind('Items/Item'):
asin = element.findtext('ASIN')
name = element.findtext('ItemAttributes/Title')
price = float(element.findtext('Offers/Offer/OfferListing/Price/Amount')) / 100.0
url = element.findtext('DetailPageURL')
print('%s %s\n$%.02f %s\n' % (asin, name, price, url))
|
Add example for AWS Product Advertising API#!/usr/bin/env python3
'''
Amazon Product Advertising API Client
'''
from xml.etree import ElementTree
import urllib.parse
import dewpoint.aws
import sys
def camel_case(name):
parts = []
for part in name.split('_'):
parts.append(part[0].upper() + part[1:])
return ''.join(parts)
class ProductAdvertising(object):
def __init__(self, key, secret):
self.api = dewpoint.aws.AWSClient(
key=key,
secret=secret,
version='2013-08-01')
def ItemSearch(self, endpoint, associate_tag, **kwargs):
params = {
'Service': 'AWSEcommerceService',
'Operation': 'ItemSearch',
'ContentType': 'text/xml',
'AssociateTag': associate_tag,
}
for key in kwargs:
params[camel_case(key)] = kwargs[key]
query = urllib.parse.urlencode(params)
url = '%s?%s' % (endpoint, query)
status, headers, xml = self.api.request('GET', url)
xml = xml.replace(b' xmlns="http://webservices.amazon.com/AWSECommerceService/2013-08-01"', b'')
tree = ElementTree.XML(xml)
return tree
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: %s <access key> <secret key>' % sys.argv[0])
sys.exit(1)
access_key = sys.argv[1].encode('ascii')
secret_key = sys.argv[2].encode('ascii')
pa = ProductAdvertising(access_key, secret_key)
# Change the endpoint depending on your country:
# https://docs.aws.amazon.com/AWSECommerceService/latest/DG/AnatomyOfaRESTRequest.html#EndpointsandWebServices
xml = pa.ItemSearch(
endpoint='https://webservices.amazon.com/onca/xml',
associate_tag='synack-20',
search_index='Electronics',
browse_node='1254762011',
response_group='ItemAttributes,Offers',
sort='salesrank',
item_page=1)
for element in xml.iterfind('Items/Item'):
asin = element.findtext('ASIN')
name = element.findtext('ItemAttributes/Title')
price = float(element.findtext('Offers/Offer/OfferListing/Price/Amount')) / 100.0
url = element.findtext('DetailPageURL')
print('%s %s\n$%.02f %s\n' % (asin, name, price, url))
|
<commit_before><commit_msg>Add example for AWS Product Advertising API<commit_after>#!/usr/bin/env python3
'''
Amazon Product Advertising API Client
'''
from xml.etree import ElementTree
import urllib.parse
import dewpoint.aws
import sys
def camel_case(name):
parts = []
for part in name.split('_'):
parts.append(part[0].upper() + part[1:])
return ''.join(parts)
class ProductAdvertising(object):
def __init__(self, key, secret):
self.api = dewpoint.aws.AWSClient(
key=key,
secret=secret,
version='2013-08-01')
def ItemSearch(self, endpoint, associate_tag, **kwargs):
params = {
'Service': 'AWSEcommerceService',
'Operation': 'ItemSearch',
'ContentType': 'text/xml',
'AssociateTag': associate_tag,
}
for key in kwargs:
params[camel_case(key)] = kwargs[key]
query = urllib.parse.urlencode(params)
url = '%s?%s' % (endpoint, query)
status, headers, xml = self.api.request('GET', url)
xml = xml.replace(b' xmlns="http://webservices.amazon.com/AWSECommerceService/2013-08-01"', b'')
tree = ElementTree.XML(xml)
return tree
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: %s <access key> <secret key>' % sys.argv[0])
sys.exit(1)
access_key = sys.argv[1].encode('ascii')
secret_key = sys.argv[2].encode('ascii')
pa = ProductAdvertising(access_key, secret_key)
# Change the endpoint depending on your country:
# https://docs.aws.amazon.com/AWSECommerceService/latest/DG/AnatomyOfaRESTRequest.html#EndpointsandWebServices
xml = pa.ItemSearch(
endpoint='https://webservices.amazon.com/onca/xml',
associate_tag='synack-20',
search_index='Electronics',
browse_node='1254762011',
response_group='ItemAttributes,Offers',
sort='salesrank',
item_page=1)
for element in xml.iterfind('Items/Item'):
asin = element.findtext('ASIN')
name = element.findtext('ItemAttributes/Title')
price = float(element.findtext('Offers/Offer/OfferListing/Price/Amount')) / 100.0
url = element.findtext('DetailPageURL')
print('%s %s\n$%.02f %s\n' % (asin, name, price, url))
|
|
a97371e013a8907c2f3c79303197aeb149402ba6
|
ideascube/conf/idb_fra_sarcelles.py
|
ideascube/conf/idb_fra_sarcelles.py
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Sarcelles, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Sarcelles"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'gutenberg',
'languages': ['fr']
},
{
'id' : 'dirtybiology',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'les-fondamentaux',
'languages': ['fr']
},
]
|
Add conf file for Sarcelles, FR.
|
Add conf file for Sarcelles, FR.
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add conf file for Sarcelles, FR.
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Sarcelles, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Sarcelles"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'gutenberg',
'languages': ['fr']
},
{
'id' : 'dirtybiology',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'les-fondamentaux',
'languages': ['fr']
},
]
|
<commit_before><commit_msg>Add conf file for Sarcelles, FR.<commit_after>
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Sarcelles, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Sarcelles"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'gutenberg',
'languages': ['fr']
},
{
'id' : 'dirtybiology',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'les-fondamentaux',
'languages': ['fr']
},
]
|
Add conf file for Sarcelles, FR.# -*- coding: utf-8 -*-
"""Ideaxbox for Sarcelles, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Sarcelles"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'gutenberg',
'languages': ['fr']
},
{
'id' : 'dirtybiology',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'les-fondamentaux',
'languages': ['fr']
},
]
|
<commit_before><commit_msg>Add conf file for Sarcelles, FR.<commit_after># -*- coding: utf-8 -*-
"""Ideaxbox for Sarcelles, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Sarcelles"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['fr']
},
{
'id': 'gutenberg',
'languages': ['fr']
},
{
'id' : 'dirtybiology',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'les-fondamentaux',
'languages': ['fr']
},
]
|
|
80ea3719a01f8e3017796fbcb075fc5d21ed8bc5
|
cla_public/apps/base/tests/test_healthchecks.py
|
cla_public/apps/base/tests/test_healthchecks.py
|
import unittest
import mock
from cla_public.apps.base import healthchecks
class DiskSpaceHealthcheckTest(unittest.TestCase):
@mock.patch('os.statvfs')
def test_disk_space_check_reports_on_available_and_total_space(self, stat_mock):
stat_mock.return_value.f_bavail = 50 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'available_percent': 50.0, 'available_mb': 50.0, 'total_mb': 100.0}, result)
@mock.patch('os.statvfs')
def test_disk_space_check_passes_when_more_than_2_percent_space_is_available(self, stat_mock):
stat_mock.return_value.f_bavail = 3 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'status': True, 'available_percent': 3.0}, result)
@mock.patch('os.statvfs')
def test_disk_space_check_fails_when_less_than_2_percent_space_is_available(self, stat_mock):
stat_mock.return_value.f_bavail = 2 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'status': False, 'available_percent': 2.0}, result)
|
Add disk space healthcheck test
|
Add disk space healthcheck test
|
Python
|
mit
|
ministryofjustice/cla_public,ministryofjustice/cla_public,ministryofjustice/cla_public,ministryofjustice/cla_public
|
Add disk space healthcheck test
|
import unittest
import mock
from cla_public.apps.base import healthchecks
class DiskSpaceHealthcheckTest(unittest.TestCase):
@mock.patch('os.statvfs')
def test_disk_space_check_reports_on_available_and_total_space(self, stat_mock):
stat_mock.return_value.f_bavail = 50 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'available_percent': 50.0, 'available_mb': 50.0, 'total_mb': 100.0}, result)
@mock.patch('os.statvfs')
def test_disk_space_check_passes_when_more_than_2_percent_space_is_available(self, stat_mock):
stat_mock.return_value.f_bavail = 3 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'status': True, 'available_percent': 3.0}, result)
@mock.patch('os.statvfs')
def test_disk_space_check_fails_when_less_than_2_percent_space_is_available(self, stat_mock):
stat_mock.return_value.f_bavail = 2 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'status': False, 'available_percent': 2.0}, result)
|
<commit_before><commit_msg>Add disk space healthcheck test<commit_after>
|
import unittest
import mock
from cla_public.apps.base import healthchecks
class DiskSpaceHealthcheckTest(unittest.TestCase):
@mock.patch('os.statvfs')
def test_disk_space_check_reports_on_available_and_total_space(self, stat_mock):
stat_mock.return_value.f_bavail = 50 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'available_percent': 50.0, 'available_mb': 50.0, 'total_mb': 100.0}, result)
@mock.patch('os.statvfs')
def test_disk_space_check_passes_when_more_than_2_percent_space_is_available(self, stat_mock):
stat_mock.return_value.f_bavail = 3 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'status': True, 'available_percent': 3.0}, result)
@mock.patch('os.statvfs')
def test_disk_space_check_fails_when_less_than_2_percent_space_is_available(self, stat_mock):
stat_mock.return_value.f_bavail = 2 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'status': False, 'available_percent': 2.0}, result)
|
Add disk space healthcheck testimport unittest
import mock
from cla_public.apps.base import healthchecks
class DiskSpaceHealthcheckTest(unittest.TestCase):
@mock.patch('os.statvfs')
def test_disk_space_check_reports_on_available_and_total_space(self, stat_mock):
stat_mock.return_value.f_bavail = 50 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'available_percent': 50.0, 'available_mb': 50.0, 'total_mb': 100.0}, result)
@mock.patch('os.statvfs')
def test_disk_space_check_passes_when_more_than_2_percent_space_is_available(self, stat_mock):
stat_mock.return_value.f_bavail = 3 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'status': True, 'available_percent': 3.0}, result)
@mock.patch('os.statvfs')
def test_disk_space_check_fails_when_less_than_2_percent_space_is_available(self, stat_mock):
stat_mock.return_value.f_bavail = 2 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'status': False, 'available_percent': 2.0}, result)
|
<commit_before><commit_msg>Add disk space healthcheck test<commit_after>import unittest
import mock
from cla_public.apps.base import healthchecks
class DiskSpaceHealthcheckTest(unittest.TestCase):
@mock.patch('os.statvfs')
def test_disk_space_check_reports_on_available_and_total_space(self, stat_mock):
stat_mock.return_value.f_bavail = 50 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'available_percent': 50.0, 'available_mb': 50.0, 'total_mb': 100.0}, result)
@mock.patch('os.statvfs')
def test_disk_space_check_passes_when_more_than_2_percent_space_is_available(self, stat_mock):
stat_mock.return_value.f_bavail = 3 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'status': True, 'available_percent': 3.0}, result)
@mock.patch('os.statvfs')
def test_disk_space_check_fails_when_less_than_2_percent_space_is_available(self, stat_mock):
stat_mock.return_value.f_bavail = 2 * 1024
stat_mock.return_value.f_blocks = 100 * 1024
stat_mock.return_value.f_frsize = 1024
result = healthchecks.check_disk()
self.assertDictContainsSubset(
{'status': False, 'available_percent': 2.0}, result)
|
|
2cbf4c844bb2fc92504aa60a448265fc5c291e9f
|
sauna/plugins/ext/apt.py
|
sauna/plugins/ext/apt.py
|
from sauna.plugins import Plugin, PluginRegister
my_plugin = PluginRegister('Apt')
@my_plugin.plugin()
class AptPlugin(Plugin):
def __init__(self, config):
super().__init__(config)
try:
import apt
self._apt = apt
except ImportError:
from ... import DependencyError
raise DependencyError(
self.__class__.__name__,
'apt',
deb='python3-apt'
)
self._packages = None
@property
def packages(self) -> list:
if self._packages is None:
with self._apt.Cache() as cache:
cache.upgrade() # Only reads the packages to upgrade
self._packages = cache.get_changes()
return self._packages
@my_plugin.check()
def security_updates(self, check_config):
num_security_packages = 0
for p in self.packages:
for o in p.candidate.origins:
if 'security' in o.codename.lower():
num_security_packages += 1
break
if 'security' in o.label.lower():
num_security_packages += 1
break
if 'security' in o.site.lower():
num_security_packages += 1
break
if num_security_packages == 0:
return self.STATUS_OK, 'No security updates'
return (
self.STATUS_WARN,
f'{num_security_packages} packages with security updates'
)
@my_plugin.check()
def package_updates(self, check_config):
if not self.packages:
return self.STATUS_OK, 'No package updates'
return (
self.STATUS_WARN,
f'{len(self.packages)} packages updates'
)
@staticmethod
def config_sample():
return '''
# Debian APT
# This only consults the local APT cache, it does not
# run an 'apt update'. Use 'unattended-upgrades' for that.
- type: Apt
checks:
- type: security_updates
- type: package_updates
'''
|
Add APT plugin to alert of Debian security updates
|
Add APT plugin to alert of Debian security updates
|
Python
|
bsd-2-clause
|
NicolasLM/sauna,NicolasLM/sauna
|
Add APT plugin to alert of Debian security updates
|
from sauna.plugins import Plugin, PluginRegister
my_plugin = PluginRegister('Apt')
@my_plugin.plugin()
class AptPlugin(Plugin):
def __init__(self, config):
super().__init__(config)
try:
import apt
self._apt = apt
except ImportError:
from ... import DependencyError
raise DependencyError(
self.__class__.__name__,
'apt',
deb='python3-apt'
)
self._packages = None
@property
def packages(self) -> list:
if self._packages is None:
with self._apt.Cache() as cache:
cache.upgrade() # Only reads the packages to upgrade
self._packages = cache.get_changes()
return self._packages
@my_plugin.check()
def security_updates(self, check_config):
num_security_packages = 0
for p in self.packages:
for o in p.candidate.origins:
if 'security' in o.codename.lower():
num_security_packages += 1
break
if 'security' in o.label.lower():
num_security_packages += 1
break
if 'security' in o.site.lower():
num_security_packages += 1
break
if num_security_packages == 0:
return self.STATUS_OK, 'No security updates'
return (
self.STATUS_WARN,
f'{num_security_packages} packages with security updates'
)
@my_plugin.check()
def package_updates(self, check_config):
if not self.packages:
return self.STATUS_OK, 'No package updates'
return (
self.STATUS_WARN,
f'{len(self.packages)} packages updates'
)
@staticmethod
def config_sample():
return '''
# Debian APT
# This only consults the local APT cache, it does not
# run an 'apt update'. Use 'unattended-upgrades' for that.
- type: Apt
checks:
- type: security_updates
- type: package_updates
'''
|
<commit_before><commit_msg>Add APT plugin to alert of Debian security updates<commit_after>
|
from sauna.plugins import Plugin, PluginRegister
my_plugin = PluginRegister('Apt')
@my_plugin.plugin()
class AptPlugin(Plugin):
def __init__(self, config):
super().__init__(config)
try:
import apt
self._apt = apt
except ImportError:
from ... import DependencyError
raise DependencyError(
self.__class__.__name__,
'apt',
deb='python3-apt'
)
self._packages = None
@property
def packages(self) -> list:
if self._packages is None:
with self._apt.Cache() as cache:
cache.upgrade() # Only reads the packages to upgrade
self._packages = cache.get_changes()
return self._packages
@my_plugin.check()
def security_updates(self, check_config):
num_security_packages = 0
for p in self.packages:
for o in p.candidate.origins:
if 'security' in o.codename.lower():
num_security_packages += 1
break
if 'security' in o.label.lower():
num_security_packages += 1
break
if 'security' in o.site.lower():
num_security_packages += 1
break
if num_security_packages == 0:
return self.STATUS_OK, 'No security updates'
return (
self.STATUS_WARN,
f'{num_security_packages} packages with security updates'
)
@my_plugin.check()
def package_updates(self, check_config):
if not self.packages:
return self.STATUS_OK, 'No package updates'
return (
self.STATUS_WARN,
f'{len(self.packages)} packages updates'
)
@staticmethod
def config_sample():
return '''
# Debian APT
# This only consults the local APT cache, it does not
# run an 'apt update'. Use 'unattended-upgrades' for that.
- type: Apt
checks:
- type: security_updates
- type: package_updates
'''
|
Add APT plugin to alert of Debian security updatesfrom sauna.plugins import Plugin, PluginRegister
my_plugin = PluginRegister('Apt')
@my_plugin.plugin()
class AptPlugin(Plugin):
def __init__(self, config):
super().__init__(config)
try:
import apt
self._apt = apt
except ImportError:
from ... import DependencyError
raise DependencyError(
self.__class__.__name__,
'apt',
deb='python3-apt'
)
self._packages = None
@property
def packages(self) -> list:
if self._packages is None:
with self._apt.Cache() as cache:
cache.upgrade() # Only reads the packages to upgrade
self._packages = cache.get_changes()
return self._packages
@my_plugin.check()
def security_updates(self, check_config):
num_security_packages = 0
for p in self.packages:
for o in p.candidate.origins:
if 'security' in o.codename.lower():
num_security_packages += 1
break
if 'security' in o.label.lower():
num_security_packages += 1
break
if 'security' in o.site.lower():
num_security_packages += 1
break
if num_security_packages == 0:
return self.STATUS_OK, 'No security updates'
return (
self.STATUS_WARN,
f'{num_security_packages} packages with security updates'
)
@my_plugin.check()
def package_updates(self, check_config):
if not self.packages:
return self.STATUS_OK, 'No package updates'
return (
self.STATUS_WARN,
f'{len(self.packages)} packages updates'
)
@staticmethod
def config_sample():
return '''
# Debian APT
# This only consults the local APT cache, it does not
# run an 'apt update'. Use 'unattended-upgrades' for that.
- type: Apt
checks:
- type: security_updates
- type: package_updates
'''
|
<commit_before><commit_msg>Add APT plugin to alert of Debian security updates<commit_after>from sauna.plugins import Plugin, PluginRegister
my_plugin = PluginRegister('Apt')
@my_plugin.plugin()
class AptPlugin(Plugin):
def __init__(self, config):
super().__init__(config)
try:
import apt
self._apt = apt
except ImportError:
from ... import DependencyError
raise DependencyError(
self.__class__.__name__,
'apt',
deb='python3-apt'
)
self._packages = None
@property
def packages(self) -> list:
if self._packages is None:
with self._apt.Cache() as cache:
cache.upgrade() # Only reads the packages to upgrade
self._packages = cache.get_changes()
return self._packages
@my_plugin.check()
def security_updates(self, check_config):
num_security_packages = 0
for p in self.packages:
for o in p.candidate.origins:
if 'security' in o.codename.lower():
num_security_packages += 1
break
if 'security' in o.label.lower():
num_security_packages += 1
break
if 'security' in o.site.lower():
num_security_packages += 1
break
if num_security_packages == 0:
return self.STATUS_OK, 'No security updates'
return (
self.STATUS_WARN,
f'{num_security_packages} packages with security updates'
)
@my_plugin.check()
def package_updates(self, check_config):
if not self.packages:
return self.STATUS_OK, 'No package updates'
return (
self.STATUS_WARN,
f'{len(self.packages)} packages updates'
)
@staticmethod
def config_sample():
return '''
# Debian APT
# This only consults the local APT cache, it does not
# run an 'apt update'. Use 'unattended-upgrades' for that.
- type: Apt
checks:
- type: security_updates
- type: package_updates
'''
|
|
04c786d973bdeb99e0e327ec1f0ebb377fb139e0
|
run_server.py
|
run_server.py
|
# -*- coding: UTF-8 -*-
from picar import app
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8080, master=True, processes=1)
|
Add script to run server
|
Add script to run server
|
Python
|
apache-2.0
|
lukaszo/picar_worhshop,lukaszo/picar_worhshop,lukaszo/picar_worhshop,lukaszo/picar_worhshop
|
Add script to run server
|
# -*- coding: UTF-8 -*-
from picar import app
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8080, master=True, processes=1)
|
<commit_before><commit_msg>Add script to run server<commit_after>
|
# -*- coding: UTF-8 -*-
from picar import app
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8080, master=True, processes=1)
|
Add script to run server# -*- coding: UTF-8 -*-
from picar import app
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8080, master=True, processes=1)
|
<commit_before><commit_msg>Add script to run server<commit_after># -*- coding: UTF-8 -*-
from picar import app
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8080, master=True, processes=1)
|
|
c8c439b92e3f466da58ab1a0c2b8ff3bf9242bf4
|
mqtt/tests/test_client.py
|
mqtt/tests/test_client.py
|
import time
from django.test import TestCase
from django.contrib.auth.models import User
from django.conf import settings
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from io import BytesIO
import json
from login.models import Profile, AmbulancePermission, HospitalPermission
from login.serializers import ExtendedProfileSerializer
from ambulance.models import Ambulance, \
AmbulanceStatus, AmbulanceCapability
from ambulance.serializers import AmbulanceSerializer
from hospital.models import Hospital, \
Equipment, HospitalEquipment, EquipmentType
from hospital.serializers import EquipmentSerializer, \
HospitalSerializer, HospitalEquipmentSerializer
from django.test import Client
from .client import MQTTTestCase, MQTTTestClient
from ..client import MQTTException
from ..subscribe import SubscribeClient
class TestMQTTSeed(TestMQTT, MQTTTestCase):
def test_mqttseed(self):
self.assertEqual(True, True)
|
Add more time to mqtt.test.client
|
Add more time to mqtt.test.client
|
Python
|
bsd-3-clause
|
EMSTrack/WebServerAndClient,EMSTrack/WebServerAndClient,EMSTrack/WebServerAndClient
|
Add more time to mqtt.test.client
|
import time
from django.test import TestCase
from django.contrib.auth.models import User
from django.conf import settings
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from io import BytesIO
import json
from login.models import Profile, AmbulancePermission, HospitalPermission
from login.serializers import ExtendedProfileSerializer
from ambulance.models import Ambulance, \
AmbulanceStatus, AmbulanceCapability
from ambulance.serializers import AmbulanceSerializer
from hospital.models import Hospital, \
Equipment, HospitalEquipment, EquipmentType
from hospital.serializers import EquipmentSerializer, \
HospitalSerializer, HospitalEquipmentSerializer
from django.test import Client
from .client import MQTTTestCase, MQTTTestClient
from ..client import MQTTException
from ..subscribe import SubscribeClient
class TestMQTTSeed(TestMQTT, MQTTTestCase):
def test_mqttseed(self):
self.assertEqual(True, True)
|
<commit_before><commit_msg>Add more time to mqtt.test.client<commit_after>
|
import time
from django.test import TestCase
from django.contrib.auth.models import User
from django.conf import settings
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from io import BytesIO
import json
from login.models import Profile, AmbulancePermission, HospitalPermission
from login.serializers import ExtendedProfileSerializer
from ambulance.models import Ambulance, \
AmbulanceStatus, AmbulanceCapability
from ambulance.serializers import AmbulanceSerializer
from hospital.models import Hospital, \
Equipment, HospitalEquipment, EquipmentType
from hospital.serializers import EquipmentSerializer, \
HospitalSerializer, HospitalEquipmentSerializer
from django.test import Client
from .client import MQTTTestCase, MQTTTestClient
from ..client import MQTTException
from ..subscribe import SubscribeClient
class TestMQTTSeed(TestMQTT, MQTTTestCase):
def test_mqttseed(self):
self.assertEqual(True, True)
|
Add more time to mqtt.test.clientimport time
from django.test import TestCase
from django.contrib.auth.models import User
from django.conf import settings
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from io import BytesIO
import json
from login.models import Profile, AmbulancePermission, HospitalPermission
from login.serializers import ExtendedProfileSerializer
from ambulance.models import Ambulance, \
AmbulanceStatus, AmbulanceCapability
from ambulance.serializers import AmbulanceSerializer
from hospital.models import Hospital, \
Equipment, HospitalEquipment, EquipmentType
from hospital.serializers import EquipmentSerializer, \
HospitalSerializer, HospitalEquipmentSerializer
from django.test import Client
from .client import MQTTTestCase, MQTTTestClient
from ..client import MQTTException
from ..subscribe import SubscribeClient
class TestMQTTSeed(TestMQTT, MQTTTestCase):
def test_mqttseed(self):
self.assertEqual(True, True)
|
<commit_before><commit_msg>Add more time to mqtt.test.client<commit_after>import time
from django.test import TestCase
from django.contrib.auth.models import User
from django.conf import settings
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from io import BytesIO
import json
from login.models import Profile, AmbulancePermission, HospitalPermission
from login.serializers import ExtendedProfileSerializer
from ambulance.models import Ambulance, \
AmbulanceStatus, AmbulanceCapability
from ambulance.serializers import AmbulanceSerializer
from hospital.models import Hospital, \
Equipment, HospitalEquipment, EquipmentType
from hospital.serializers import EquipmentSerializer, \
HospitalSerializer, HospitalEquipmentSerializer
from django.test import Client
from .client import MQTTTestCase, MQTTTestClient
from ..client import MQTTException
from ..subscribe import SubscribeClient
class TestMQTTSeed(TestMQTT, MQTTTestCase):
def test_mqttseed(self):
self.assertEqual(True, True)
|
|
7dd898309e81feb686220757a740b6b7b934462d
|
pipelines/genome_merge.py
|
pipelines/genome_merge.py
|
import os
import luigi
import json
import subprocess
from support import merge_fasta as mf
# add parent directory to path
if __name__ == '__main__' and __package__ is None:
os.sys.path.append(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# ----------------------------------TASKS--------------------------------------
class MergeGenome(luigi.Task):
"""
Task to merge genome fasta files
"""
updir = luigi.parameter()
def run(self):
"""
Merge all fasta files in updir
"""
mf.merge_genome_files(self.updir)
def output(self):
"""
Check genome fasta file has been generated
"""
upid = os.path.split(self.updir)[1]
genome_fasta = os.path.join(self.updir, upid+'.fa')
return luigi.LocalTarget(genome_fasta)
# -----------------------------------------------------------------------------
class GenomeMergeEngine(luigi.Task):
"""
Task to load accessions from file
"""
project_dir = luigi.Parameter()
lsf = luigi.BoolParameter(default=True,
description="If used then run on lsf, otherwise run locally")
def run(self):
"""
Call load_upid_gca_file to export all upid_gca accession pairs in
json format.
"""
id_pairs = None
upid_gca_file_loc = os.path.join(self.project_dir, "upid_gca_dict.json")
upid_gca_fp = open(upid_gca_file_loc, 'r')
accessions = json.load(upid_gca_fp)
upid_gca_fp.close()
upids = accessions.keys()
cmd = ''
for upid in upids:
subdir = os.path.join(self.project_dir, upid[-3:])
updir = os.path.join(subdir, updir)
if os.path.exists(updir):
if self.lsf is True:
# TODO launch an lsf job to merge the genome fasta files
pass
else:
cmd = "python \"{this_file}\" MergeGenome --updir {upid}".format(
this_file=os.path.realpath(__file__),
updir=updir)
subprocess.call(cmd, shell=True)
cmd = ''
# -----------------------------------------------------------------------------
if __name__ == '__main__':
luigi.run()
|
Add first version of genome merge pipeline
|
Add first version of genome merge pipeline
|
Python
|
apache-2.0
|
Rfam/rfam-production,Rfam/rfam-production,Rfam/rfam-production
|
Add first version of genome merge pipeline
|
import os
import luigi
import json
import subprocess
from support import merge_fasta as mf
# add parent directory to path
if __name__ == '__main__' and __package__ is None:
os.sys.path.append(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# ----------------------------------TASKS--------------------------------------
class MergeGenome(luigi.Task):
"""
Task to merge genome fasta files
"""
updir = luigi.parameter()
def run(self):
"""
Merge all fasta files in updir
"""
mf.merge_genome_files(self.updir)
def output(self):
"""
Check genome fasta file has been generated
"""
upid = os.path.split(self.updir)[1]
genome_fasta = os.path.join(self.updir, upid+'.fa')
return luigi.LocalTarget(genome_fasta)
# -----------------------------------------------------------------------------
class GenomeMergeEngine(luigi.Task):
"""
Task to load accessions from file
"""
project_dir = luigi.Parameter()
lsf = luigi.BoolParameter(default=True,
description="If used then run on lsf, otherwise run locally")
def run(self):
"""
Call load_upid_gca_file to export all upid_gca accession pairs in
json format.
"""
id_pairs = None
upid_gca_file_loc = os.path.join(self.project_dir, "upid_gca_dict.json")
upid_gca_fp = open(upid_gca_file_loc, 'r')
accessions = json.load(upid_gca_fp)
upid_gca_fp.close()
upids = accessions.keys()
cmd = ''
for upid in upids:
subdir = os.path.join(self.project_dir, upid[-3:])
updir = os.path.join(subdir, updir)
if os.path.exists(updir):
if self.lsf is True:
# TODO launch an lsf job to merge the genome fasta files
pass
else:
cmd = "python \"{this_file}\" MergeGenome --updir {upid}".format(
this_file=os.path.realpath(__file__),
updir=updir)
subprocess.call(cmd, shell=True)
cmd = ''
# -----------------------------------------------------------------------------
if __name__ == '__main__':
luigi.run()
|
<commit_before><commit_msg>Add first version of genome merge pipeline<commit_after>
|
import os
import luigi
import json
import subprocess
from support import merge_fasta as mf
# add parent directory to path
if __name__ == '__main__' and __package__ is None:
os.sys.path.append(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# ----------------------------------TASKS--------------------------------------
class MergeGenome(luigi.Task):
"""
Task to merge genome fasta files
"""
updir = luigi.parameter()
def run(self):
"""
Merge all fasta files in updir
"""
mf.merge_genome_files(self.updir)
def output(self):
"""
Check genome fasta file has been generated
"""
upid = os.path.split(self.updir)[1]
genome_fasta = os.path.join(self.updir, upid+'.fa')
return luigi.LocalTarget(genome_fasta)
# -----------------------------------------------------------------------------
class GenomeMergeEngine(luigi.Task):
"""
Task to load accessions from file
"""
project_dir = luigi.Parameter()
lsf = luigi.BoolParameter(default=True,
description="If used then run on lsf, otherwise run locally")
def run(self):
"""
Call load_upid_gca_file to export all upid_gca accession pairs in
json format.
"""
id_pairs = None
upid_gca_file_loc = os.path.join(self.project_dir, "upid_gca_dict.json")
upid_gca_fp = open(upid_gca_file_loc, 'r')
accessions = json.load(upid_gca_fp)
upid_gca_fp.close()
upids = accessions.keys()
cmd = ''
for upid in upids:
subdir = os.path.join(self.project_dir, upid[-3:])
updir = os.path.join(subdir, updir)
if os.path.exists(updir):
if self.lsf is True:
# TODO launch an lsf job to merge the genome fasta files
pass
else:
cmd = "python \"{this_file}\" MergeGenome --updir {upid}".format(
this_file=os.path.realpath(__file__),
updir=updir)
subprocess.call(cmd, shell=True)
cmd = ''
# -----------------------------------------------------------------------------
if __name__ == '__main__':
luigi.run()
|
Add first version of genome merge pipelineimport os
import luigi
import json
import subprocess
from support import merge_fasta as mf
# add parent directory to path
if __name__ == '__main__' and __package__ is None:
os.sys.path.append(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# ----------------------------------TASKS--------------------------------------
class MergeGenome(luigi.Task):
"""
Task to merge genome fasta files
"""
updir = luigi.parameter()
def run(self):
"""
Merge all fasta files in updir
"""
mf.merge_genome_files(self.updir)
def output(self):
"""
Check genome fasta file has been generated
"""
upid = os.path.split(self.updir)[1]
genome_fasta = os.path.join(self.updir, upid+'.fa')
return luigi.LocalTarget(genome_fasta)
# -----------------------------------------------------------------------------
class GenomeMergeEngine(luigi.Task):
"""
Task to load accessions from file
"""
project_dir = luigi.Parameter()
lsf = luigi.BoolParameter(default=True,
description="If used then run on lsf, otherwise run locally")
def run(self):
"""
Call load_upid_gca_file to export all upid_gca accession pairs in
json format.
"""
id_pairs = None
upid_gca_file_loc = os.path.join(self.project_dir, "upid_gca_dict.json")
upid_gca_fp = open(upid_gca_file_loc, 'r')
accessions = json.load(upid_gca_fp)
upid_gca_fp.close()
upids = accessions.keys()
cmd = ''
for upid in upids:
subdir = os.path.join(self.project_dir, upid[-3:])
updir = os.path.join(subdir, updir)
if os.path.exists(updir):
if self.lsf is True:
# TODO launch an lsf job to merge the genome fasta files
pass
else:
cmd = "python \"{this_file}\" MergeGenome --updir {upid}".format(
this_file=os.path.realpath(__file__),
updir=updir)
subprocess.call(cmd, shell=True)
cmd = ''
# -----------------------------------------------------------------------------
if __name__ == '__main__':
luigi.run()
|
<commit_before><commit_msg>Add first version of genome merge pipeline<commit_after>import os
import luigi
import json
import subprocess
from support import merge_fasta as mf
# add parent directory to path
if __name__ == '__main__' and __package__ is None:
os.sys.path.append(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# ----------------------------------TASKS--------------------------------------
class MergeGenome(luigi.Task):
"""
Task to merge genome fasta files
"""
updir = luigi.parameter()
def run(self):
"""
Merge all fasta files in updir
"""
mf.merge_genome_files(self.updir)
def output(self):
"""
Check genome fasta file has been generated
"""
upid = os.path.split(self.updir)[1]
genome_fasta = os.path.join(self.updir, upid+'.fa')
return luigi.LocalTarget(genome_fasta)
# -----------------------------------------------------------------------------
class GenomeMergeEngine(luigi.Task):
"""
Task to load accessions from file
"""
project_dir = luigi.Parameter()
lsf = luigi.BoolParameter(default=True,
description="If used then run on lsf, otherwise run locally")
def run(self):
"""
Call load_upid_gca_file to export all upid_gca accession pairs in
json format.
"""
id_pairs = None
upid_gca_file_loc = os.path.join(self.project_dir, "upid_gca_dict.json")
upid_gca_fp = open(upid_gca_file_loc, 'r')
accessions = json.load(upid_gca_fp)
upid_gca_fp.close()
upids = accessions.keys()
cmd = ''
for upid in upids:
subdir = os.path.join(self.project_dir, upid[-3:])
updir = os.path.join(subdir, updir)
if os.path.exists(updir):
if self.lsf is True:
# TODO launch an lsf job to merge the genome fasta files
pass
else:
cmd = "python \"{this_file}\" MergeGenome --updir {upid}".format(
this_file=os.path.realpath(__file__),
updir=updir)
subprocess.call(cmd, shell=True)
cmd = ''
# -----------------------------------------------------------------------------
if __name__ == '__main__':
luigi.run()
|
|
b7fac69907e004d02554ccf0f151c7247d464c1b
|
src/TinyIDS/collector.py
|
src/TinyIDS/collector.py
|
# -*- coding: utf-8 -*-
#
# This file is part of TinyIDS.
#
# TinyIDS is a distributed Intrusion Detection System (IDS) for Unix systems.
#
# Project development web site:
#
# http://www.codetrax.org/projects/tinyids
#
# Copyright (c) 2010 George Notaras, G-Loaded.eu, CodeTRAX.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
DEFAULT_PATHS = (
'/usr/local/sbin/*',
'/usr/local/bin/*',
'/sbin/*',
'/bin/*',
'/usr/sbin/*',
'/usr/bin/*',
'/root/bin/*',
'/lib/*',
'/usr/lib/*',
'/usr/local/lib/*',
)
import os
import glob
import logging
from TinyIDS.config import TinyIDSConfigParser
logger = logging.getLogger()
class BaseCollector:
"""
self.config_path = os.path.join(backend_conf_dir, __name__ + '.conf')
"""
def __init__(self, config_path=None):
self.config_path = config_path
self.cfg = TinyIDSConfigParser()
if self.config_path:
if os.path.exists(self.config_path):
self.cfg.read(self.config_path)
logger.debug('Using configuration for %s backend from: %s' % (__name__, self.config_path))
def file_paths(self):
if not self.cfg.has_section('main'):
paths = DEFAULT_PATHS
elif not self.cfg.has_option('main', 'paths'):
paths = DEFAULT_PATHS
else:
paths = self.cfg.getlist('main', 'paths')
for path in paths:
file_list = glob.glob(path)
for fpath in file_list:
if os.path.isfile(fpath): # Follows symbolic links
yield fpath
def collect(self):
pass
|
Define a first concept of the backend API
|
Define a first concept of the backend API
|
Python
|
apache-2.0
|
rafaelang/tinyids
|
Define a first concept of the backend API
|
# -*- coding: utf-8 -*-
#
# This file is part of TinyIDS.
#
# TinyIDS is a distributed Intrusion Detection System (IDS) for Unix systems.
#
# Project development web site:
#
# http://www.codetrax.org/projects/tinyids
#
# Copyright (c) 2010 George Notaras, G-Loaded.eu, CodeTRAX.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
DEFAULT_PATHS = (
'/usr/local/sbin/*',
'/usr/local/bin/*',
'/sbin/*',
'/bin/*',
'/usr/sbin/*',
'/usr/bin/*',
'/root/bin/*',
'/lib/*',
'/usr/lib/*',
'/usr/local/lib/*',
)
import os
import glob
import logging
from TinyIDS.config import TinyIDSConfigParser
logger = logging.getLogger()
class BaseCollector:
"""
self.config_path = os.path.join(backend_conf_dir, __name__ + '.conf')
"""
def __init__(self, config_path=None):
self.config_path = config_path
self.cfg = TinyIDSConfigParser()
if self.config_path:
if os.path.exists(self.config_path):
self.cfg.read(self.config_path)
logger.debug('Using configuration for %s backend from: %s' % (__name__, self.config_path))
def file_paths(self):
if not self.cfg.has_section('main'):
paths = DEFAULT_PATHS
elif not self.cfg.has_option('main', 'paths'):
paths = DEFAULT_PATHS
else:
paths = self.cfg.getlist('main', 'paths')
for path in paths:
file_list = glob.glob(path)
for fpath in file_list:
if os.path.isfile(fpath): # Follows symbolic links
yield fpath
def collect(self):
pass
|
<commit_before><commit_msg>Define a first concept of the backend API<commit_after>
|
# -*- coding: utf-8 -*-
#
# This file is part of TinyIDS.
#
# TinyIDS is a distributed Intrusion Detection System (IDS) for Unix systems.
#
# Project development web site:
#
# http://www.codetrax.org/projects/tinyids
#
# Copyright (c) 2010 George Notaras, G-Loaded.eu, CodeTRAX.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
DEFAULT_PATHS = (
'/usr/local/sbin/*',
'/usr/local/bin/*',
'/sbin/*',
'/bin/*',
'/usr/sbin/*',
'/usr/bin/*',
'/root/bin/*',
'/lib/*',
'/usr/lib/*',
'/usr/local/lib/*',
)
import os
import glob
import logging
from TinyIDS.config import TinyIDSConfigParser
logger = logging.getLogger()
class BaseCollector:
"""
self.config_path = os.path.join(backend_conf_dir, __name__ + '.conf')
"""
def __init__(self, config_path=None):
self.config_path = config_path
self.cfg = TinyIDSConfigParser()
if self.config_path:
if os.path.exists(self.config_path):
self.cfg.read(self.config_path)
logger.debug('Using configuration for %s backend from: %s' % (__name__, self.config_path))
def file_paths(self):
if not self.cfg.has_section('main'):
paths = DEFAULT_PATHS
elif not self.cfg.has_option('main', 'paths'):
paths = DEFAULT_PATHS
else:
paths = self.cfg.getlist('main', 'paths')
for path in paths:
file_list = glob.glob(path)
for fpath in file_list:
if os.path.isfile(fpath): # Follows symbolic links
yield fpath
def collect(self):
pass
|
Define a first concept of the backend API# -*- coding: utf-8 -*-
#
# This file is part of TinyIDS.
#
# TinyIDS is a distributed Intrusion Detection System (IDS) for Unix systems.
#
# Project development web site:
#
# http://www.codetrax.org/projects/tinyids
#
# Copyright (c) 2010 George Notaras, G-Loaded.eu, CodeTRAX.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
DEFAULT_PATHS = (
'/usr/local/sbin/*',
'/usr/local/bin/*',
'/sbin/*',
'/bin/*',
'/usr/sbin/*',
'/usr/bin/*',
'/root/bin/*',
'/lib/*',
'/usr/lib/*',
'/usr/local/lib/*',
)
import os
import glob
import logging
from TinyIDS.config import TinyIDSConfigParser
logger = logging.getLogger()
class BaseCollector:
"""
self.config_path = os.path.join(backend_conf_dir, __name__ + '.conf')
"""
def __init__(self, config_path=None):
self.config_path = config_path
self.cfg = TinyIDSConfigParser()
if self.config_path:
if os.path.exists(self.config_path):
self.cfg.read(self.config_path)
logger.debug('Using configuration for %s backend from: %s' % (__name__, self.config_path))
def file_paths(self):
if not self.cfg.has_section('main'):
paths = DEFAULT_PATHS
elif not self.cfg.has_option('main', 'paths'):
paths = DEFAULT_PATHS
else:
paths = self.cfg.getlist('main', 'paths')
for path in paths:
file_list = glob.glob(path)
for fpath in file_list:
if os.path.isfile(fpath): # Follows symbolic links
yield fpath
def collect(self):
pass
|
<commit_before><commit_msg>Define a first concept of the backend API<commit_after># -*- coding: utf-8 -*-
#
# This file is part of TinyIDS.
#
# TinyIDS is a distributed Intrusion Detection System (IDS) for Unix systems.
#
# Project development web site:
#
# http://www.codetrax.org/projects/tinyids
#
# Copyright (c) 2010 George Notaras, G-Loaded.eu, CodeTRAX.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
DEFAULT_PATHS = (
'/usr/local/sbin/*',
'/usr/local/bin/*',
'/sbin/*',
'/bin/*',
'/usr/sbin/*',
'/usr/bin/*',
'/root/bin/*',
'/lib/*',
'/usr/lib/*',
'/usr/local/lib/*',
)
import os
import glob
import logging
from TinyIDS.config import TinyIDSConfigParser
logger = logging.getLogger()
class BaseCollector:
"""
self.config_path = os.path.join(backend_conf_dir, __name__ + '.conf')
"""
def __init__(self, config_path=None):
self.config_path = config_path
self.cfg = TinyIDSConfigParser()
if self.config_path:
if os.path.exists(self.config_path):
self.cfg.read(self.config_path)
logger.debug('Using configuration for %s backend from: %s' % (__name__, self.config_path))
def file_paths(self):
if not self.cfg.has_section('main'):
paths = DEFAULT_PATHS
elif not self.cfg.has_option('main', 'paths'):
paths = DEFAULT_PATHS
else:
paths = self.cfg.getlist('main', 'paths')
for path in paths:
file_list = glob.glob(path)
for fpath in file_list:
if os.path.isfile(fpath): # Follows symbolic links
yield fpath
def collect(self):
pass
|
|
4a7f47bec8c3089df988ffcf03a27fb2225b2259
|
services/migrations/0008_auto_20170505_1805.py
|
services/migrations/0008_auto_20170505_1805.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-05 18:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0007_auto_20161112_1406'),
]
operations = [
migrations.AlterField(
model_name='service',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='services.Category'),
preserve_default=False,
),
]
|
Set up edit service serializers
|
Set up edit service serializers
|
Python
|
mit
|
Semillas/semillas_backend,Semillas/semillas_platform,Semillas/semillas_platform,Semillas/semillas_backend,Semillas/semillas_backend,Semillas/semillas_platform,Semillas/semillas_backend,Semillas/semillas_platform
|
Set up edit service serializers
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-05 18:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0007_auto_20161112_1406'),
]
operations = [
migrations.AlterField(
model_name='service',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='services.Category'),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Set up edit service serializers<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-05 18:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0007_auto_20161112_1406'),
]
operations = [
migrations.AlterField(
model_name='service',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='services.Category'),
preserve_default=False,
),
]
|
Set up edit service serializers# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-05 18:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0007_auto_20161112_1406'),
]
operations = [
migrations.AlterField(
model_name='service',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='services.Category'),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Set up edit service serializers<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-05 18:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0007_auto_20161112_1406'),
]
operations = [
migrations.AlterField(
model_name='service',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='services.Category'),
preserve_default=False,
),
]
|
|
85015343fd99885ec00db86b4e3705bcc25e62f1
|
test/requests/run-integration-tests.py
|
test/requests/run-integration-tests.py
|
import sys
from test_login_local import TestLoginLocal
from test_registration import TestRegistration
from unittest import TestSuite, TextTestRunner, TestLoader
test_cases = [
TestLoginLocal,
TestRegistration
]
def suite(gn2_url, es_url):
the_suite = TestSuite()
for case in test_cases:
the_suite.addTests(initTest(case, gn2_url, es_url))
return the_suite
def initTest(klass, gn2_url, es_url):
loader = TestLoader()
methodNames = loader.getTestCaseNames(klass)
return [klass(mname, gn2_url, es_url) for mname in methodNames]
def main(gn2_url, es_url):
runner = TextTestRunner()
runner.run(suite(gn2_url, es_url))
if __name__ == "__main__":
if len(sys.argv) < 3:
raise Exception("Required arguments missing:\n\tTry running `run-integration-test.py <gn2-url> <es-url>`")
else:
main(sys.argv[1], sys.argv[2])
|
Add a runner for all integration tests.
|
Add a runner for all integration tests.
|
Python
|
agpl-3.0
|
zsloan/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,DannyArends/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,DannyArends/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,DannyArends/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,DannyArends/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2
|
Add a runner for all integration tests.
|
import sys
from test_login_local import TestLoginLocal
from test_registration import TestRegistration
from unittest import TestSuite, TextTestRunner, TestLoader
test_cases = [
TestLoginLocal,
TestRegistration
]
def suite(gn2_url, es_url):
the_suite = TestSuite()
for case in test_cases:
the_suite.addTests(initTest(case, gn2_url, es_url))
return the_suite
def initTest(klass, gn2_url, es_url):
loader = TestLoader()
methodNames = loader.getTestCaseNames(klass)
return [klass(mname, gn2_url, es_url) for mname in methodNames]
def main(gn2_url, es_url):
runner = TextTestRunner()
runner.run(suite(gn2_url, es_url))
if __name__ == "__main__":
if len(sys.argv) < 3:
raise Exception("Required arguments missing:\n\tTry running `run-integration-test.py <gn2-url> <es-url>`")
else:
main(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add a runner for all integration tests.<commit_after>
|
import sys
from test_login_local import TestLoginLocal
from test_registration import TestRegistration
from unittest import TestSuite, TextTestRunner, TestLoader
test_cases = [
TestLoginLocal,
TestRegistration
]
def suite(gn2_url, es_url):
the_suite = TestSuite()
for case in test_cases:
the_suite.addTests(initTest(case, gn2_url, es_url))
return the_suite
def initTest(klass, gn2_url, es_url):
loader = TestLoader()
methodNames = loader.getTestCaseNames(klass)
return [klass(mname, gn2_url, es_url) for mname in methodNames]
def main(gn2_url, es_url):
runner = TextTestRunner()
runner.run(suite(gn2_url, es_url))
if __name__ == "__main__":
if len(sys.argv) < 3:
raise Exception("Required arguments missing:\n\tTry running `run-integration-test.py <gn2-url> <es-url>`")
else:
main(sys.argv[1], sys.argv[2])
|
Add a runner for all integration tests.import sys
from test_login_local import TestLoginLocal
from test_registration import TestRegistration
from unittest import TestSuite, TextTestRunner, TestLoader
test_cases = [
TestLoginLocal,
TestRegistration
]
def suite(gn2_url, es_url):
the_suite = TestSuite()
for case in test_cases:
the_suite.addTests(initTest(case, gn2_url, es_url))
return the_suite
def initTest(klass, gn2_url, es_url):
loader = TestLoader()
methodNames = loader.getTestCaseNames(klass)
return [klass(mname, gn2_url, es_url) for mname in methodNames]
def main(gn2_url, es_url):
runner = TextTestRunner()
runner.run(suite(gn2_url, es_url))
if __name__ == "__main__":
if len(sys.argv) < 3:
raise Exception("Required arguments missing:\n\tTry running `run-integration-test.py <gn2-url> <es-url>`")
else:
main(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add a runner for all integration tests.<commit_after>import sys
from test_login_local import TestLoginLocal
from test_registration import TestRegistration
from unittest import TestSuite, TextTestRunner, TestLoader
test_cases = [
TestLoginLocal,
TestRegistration
]
def suite(gn2_url, es_url):
the_suite = TestSuite()
for case in test_cases:
the_suite.addTests(initTest(case, gn2_url, es_url))
return the_suite
def initTest(klass, gn2_url, es_url):
loader = TestLoader()
methodNames = loader.getTestCaseNames(klass)
return [klass(mname, gn2_url, es_url) for mname in methodNames]
def main(gn2_url, es_url):
runner = TextTestRunner()
runner.run(suite(gn2_url, es_url))
if __name__ == "__main__":
if len(sys.argv) < 3:
raise Exception("Required arguments missing:\n\tTry running `run-integration-test.py <gn2-url> <es-url>`")
else:
main(sys.argv[1], sys.argv[2])
|
|
ff3a6b35e85058c711a056401b7c3e2d7c7b1260
|
crmapp/accounts/forms.py
|
crmapp/accounts/forms.py
|
from django import forms
from .models import Account
class AccountForm(forms.ModelForm):
class Meta:
model = Account
fields = ('name', 'desc', 'address_one',
'address_two', 'city', 'state', 'phone',
)
widgets = {
'name': forms.TextInput(
attrs={
'placeholder':'Company',
'class':'col-md-12 form-control'
}
),
'desc': forms.Textarea(
attrs={
'placeholder':'Enter a description',
'class':'form-control'
}
),
'address_one': forms.TextInput(
attrs={
'placeholder':'Street Address',
'class':'gi-form-addr form-control'
}
),
'address_two': forms.TextInput(
attrs={
'placeholder':'Suite, PO, etc',
'class':'gi-form-addr form-control'
}
),
'city': forms.TextInput(
attrs={
'placeholder':'City',
'class':'gi-form-addr form-control'
}
),
'state': forms.TextInput(
attrs={
'placeholder':'State',
'class':'gi-form-addr form-control'
}
),
'phone': forms.TextInput(
attrs={
'placeholder':'Phone',
'class':'gi-form-addr form-control'
}
),
}
|
Create the Account Detail Page - Part II > New Account - Create Form
|
Create the Account Detail Page - Part II > New Account - Create Form
|
Python
|
mit
|
deenaariff/Django,tabdon/crmeasyapp,tabdon/crmeasyapp
|
Create the Account Detail Page - Part II > New Account - Create Form
|
from django import forms
from .models import Account
class AccountForm(forms.ModelForm):
class Meta:
model = Account
fields = ('name', 'desc', 'address_one',
'address_two', 'city', 'state', 'phone',
)
widgets = {
'name': forms.TextInput(
attrs={
'placeholder':'Company',
'class':'col-md-12 form-control'
}
),
'desc': forms.Textarea(
attrs={
'placeholder':'Enter a description',
'class':'form-control'
}
),
'address_one': forms.TextInput(
attrs={
'placeholder':'Street Address',
'class':'gi-form-addr form-control'
}
),
'address_two': forms.TextInput(
attrs={
'placeholder':'Suite, PO, etc',
'class':'gi-form-addr form-control'
}
),
'city': forms.TextInput(
attrs={
'placeholder':'City',
'class':'gi-form-addr form-control'
}
),
'state': forms.TextInput(
attrs={
'placeholder':'State',
'class':'gi-form-addr form-control'
}
),
'phone': forms.TextInput(
attrs={
'placeholder':'Phone',
'class':'gi-form-addr form-control'
}
),
}
|
<commit_before><commit_msg>Create the Account Detail Page - Part II > New Account - Create Form<commit_after>
|
from django import forms
from .models import Account
class AccountForm(forms.ModelForm):
class Meta:
model = Account
fields = ('name', 'desc', 'address_one',
'address_two', 'city', 'state', 'phone',
)
widgets = {
'name': forms.TextInput(
attrs={
'placeholder':'Company',
'class':'col-md-12 form-control'
}
),
'desc': forms.Textarea(
attrs={
'placeholder':'Enter a description',
'class':'form-control'
}
),
'address_one': forms.TextInput(
attrs={
'placeholder':'Street Address',
'class':'gi-form-addr form-control'
}
),
'address_two': forms.TextInput(
attrs={
'placeholder':'Suite, PO, etc',
'class':'gi-form-addr form-control'
}
),
'city': forms.TextInput(
attrs={
'placeholder':'City',
'class':'gi-form-addr form-control'
}
),
'state': forms.TextInput(
attrs={
'placeholder':'State',
'class':'gi-form-addr form-control'
}
),
'phone': forms.TextInput(
attrs={
'placeholder':'Phone',
'class':'gi-form-addr form-control'
}
),
}
|
Create the Account Detail Page - Part II > New Account - Create Formfrom django import forms
from .models import Account
class AccountForm(forms.ModelForm):
class Meta:
model = Account
fields = ('name', 'desc', 'address_one',
'address_two', 'city', 'state', 'phone',
)
widgets = {
'name': forms.TextInput(
attrs={
'placeholder':'Company',
'class':'col-md-12 form-control'
}
),
'desc': forms.Textarea(
attrs={
'placeholder':'Enter a description',
'class':'form-control'
}
),
'address_one': forms.TextInput(
attrs={
'placeholder':'Street Address',
'class':'gi-form-addr form-control'
}
),
'address_two': forms.TextInput(
attrs={
'placeholder':'Suite, PO, etc',
'class':'gi-form-addr form-control'
}
),
'city': forms.TextInput(
attrs={
'placeholder':'City',
'class':'gi-form-addr form-control'
}
),
'state': forms.TextInput(
attrs={
'placeholder':'State',
'class':'gi-form-addr form-control'
}
),
'phone': forms.TextInput(
attrs={
'placeholder':'Phone',
'class':'gi-form-addr form-control'
}
),
}
|
<commit_before><commit_msg>Create the Account Detail Page - Part II > New Account - Create Form<commit_after>from django import forms
from .models import Account
class AccountForm(forms.ModelForm):
class Meta:
model = Account
fields = ('name', 'desc', 'address_one',
'address_two', 'city', 'state', 'phone',
)
widgets = {
'name': forms.TextInput(
attrs={
'placeholder':'Company',
'class':'col-md-12 form-control'
}
),
'desc': forms.Textarea(
attrs={
'placeholder':'Enter a description',
'class':'form-control'
}
),
'address_one': forms.TextInput(
attrs={
'placeholder':'Street Address',
'class':'gi-form-addr form-control'
}
),
'address_two': forms.TextInput(
attrs={
'placeholder':'Suite, PO, etc',
'class':'gi-form-addr form-control'
}
),
'city': forms.TextInput(
attrs={
'placeholder':'City',
'class':'gi-form-addr form-control'
}
),
'state': forms.TextInput(
attrs={
'placeholder':'State',
'class':'gi-form-addr form-control'
}
),
'phone': forms.TextInput(
attrs={
'placeholder':'Phone',
'class':'gi-form-addr form-control'
}
),
}
|
|
6a605b47ef4170dcb35040cac9c18c5407f041ae
|
erpnext/patches/remove_extra_button_from_email_digest.py
|
erpnext/patches/remove_extra_button_from_email_digest.py
|
def execute():
import webnotes
webnotes.conn.sql("""
DELETE FROM tabDocField
WHERE parent = 'Email Digest'
AND label = 'Add Recipients'
AND fieldtype = 'Button'""")
|
Patch to remove 'Add Recipients' button from Email Digest form
|
Patch to remove 'Add Recipients' button from Email Digest form
|
Python
|
agpl-3.0
|
indictranstech/reciphergroup-erpnext,Tejal011089/osmosis_erpnext,susuchina/ERPNEXT,SPKian/Testing,mbauskar/sapphire-erpnext,Suninus/erpnext,gangadharkadam/johnerp,SPKian/Testing,treejames/erpnext,Tejal011089/fbd_erpnext,Yellowen/Owrang,suyashphadtare/gd-erp,gangadharkadam/verveerp,gangadhar-kadam/hrerp,4commerce-technologies-AG/erpnext,aruizramon/alec_erpnext,pawaranand/phrerp,indictranstech/buyback-erp,gangadhar-kadam/verve-erp,geekroot/erpnext,geekroot/erpnext,rohitwaghchaure/digitales_erpnext,mbauskar/omnitech-demo-erpnext,geekroot/erpnext,gangadhar-kadam/verve_test_erp,gangadhar-kadam/laganerp,suyashphadtare/test,gangadhar-kadam/sms-erpnext,saurabh6790/medsyn-app1,indictranstech/Das_Erpnext,indictranstech/trufil-erpnext,gangadhar-kadam/powapp,sagar30051991/ozsmart-erp,indictranstech/osmosis-erpnext,gangadharkadam/letzerp,saurabh6790/test-erp,Tejal011089/digitales_erpnext,gangadharkadam/tailorerp,gangadhar-kadam/verve_erp,susuchina/ERPNEXT,gangadhar-kadam/laganerp,rohitwaghchaure/erpnext-receipher,tmimori/erpnext,indictranstech/Das_Erpnext,indictranstech/osmosis-erpnext,gsnbng/erpnext,SPKian/Testing2,rohitwaghchaure/erpnext-receipher,saurabh6790/test-med-app,fuhongliang/erpnext,Tejal011089/huntercamp_erpnext,rohitwaghchaure/New_Theme_Erp,MartinEnder/erpnext-de,saurabh6790/ON-RISAPP,saurabh6790/alert-med-app,rohitwaghchaure/erpnext_smart,mbauskar/alec_frappe5_erpnext,gangadharkadam/contributionerp,fuhongliang/erpnext,gangadharkadam/smrterp,ShashaQin/erpnext,indictranstech/erpnext,gangadharkadam/v6_erp,Drooids/erpnext,saurabh6790/OFF-RISAPP,Tejal011089/fbd_erpnext,anandpdoshi/erpnext,suyashphadtare/vestasi-erp-final,gangadhar-kadam/laganerp,saurabh6790/ON-RISAPP,MartinEnder/erpnext-de,gangadharkadam/v5_erp,suyashphadtare/gd-erp,mahabuber/erpnext,gangadhar-kadam/verve_test_erp,njmube/erpnext,hatwar/Das_erpnext,gangadharkadam/v5_erp,dieface/erpnext,MartinEnder/erpnext-de,treejames/erpnext,hatwar/focal-erpnext,indictranstech/erpnext,rohitwaghchaure/digitales_erpnext,indictranstech/osmosis-erpnext,gangadharkadam/saloon_erp_install,gangadhar-kadam/helpdesk-erpnext,mahabuber/erpnext,indictranstech/trufil-erpnext,rohitwaghchaure/GenieManager-erpnext,Tejal011089/osmosis_erpnext,gangadhar-kadam/mic-erpnext,saurabh6790/test-med-app,saurabh6790/omnisys-app,suyashphadtare/test,hatwar/Das_erpnext,Tejal011089/paypal_erpnext,hatwar/buyback-erpnext,gangadhar-kadam/nassimapp,mbauskar/omnitech-erpnext,mbauskar/sapphire-erpnext,gangadharkadam/contributionerp,saurabh6790/tru_app_back,indictranstech/buyback-erp,gangadhar-kadam/adb-erp,saurabh6790/medsynaptic1-app,suyashphadtare/vestasi-erp-1,mbauskar/omnitech-demo-erpnext,Tejal011089/huntercamp_erpnext,treejames/erpnext,indictranstech/vestasi-erpnext,gangadharkadam/tailorerp,pombredanne/erpnext,mbauskar/Das_Erpnext,indictranstech/internal-erpnext,saurabh6790/medapp,Tejal011089/fbd_erpnext,hatwar/Das_erpnext,indictranstech/reciphergroup-erpnext,ShashaQin/erpnext,gmarke/erpnext,mbauskar/omnitech-erpnext,Tejal011089/trufil-erpnext,treejames/erpnext,saurabh6790/med_app_rels,gangadhar-kadam/verve_erp,susuchina/ERPNEXT,hatwar/focal-erpnext,gangadharkadam/v6_erp,saurabh6790/omn-app,dieface/erpnext,indictranstech/trufil-erpnext,hanselke/erpnext-1,suyashphadtare/sajil-erp,aruizramon/alec_erpnext,hatwar/buyback-erpnext,gangadhar-kadam/verve_test_erp,Suninus/erpnext,gangadhar-kadam/verve_test_erp,Suninus/erpnext,gangadharkadam/johnerp,4commerce-technologies-AG/erpnext,indictranstech/fbd_erpnext,gangadhar-kadam/sapphire_app,indictranstech/buyback-erp,gangadharkadam/v5_erp,tmimori/erpnext,tmimori/erpnext,gangadhar-kadam/prjapp,gangadhar-kadam/helpdesk-erpnext,saurabh6790/pow-app,mbauskar/phrerp,netfirms/erpnext,rohitwaghchaure/erpnext_smart,mahabuber/erpnext,mbauskar/sapphire-erpnext,suyashphadtare/sajil-erp,gangadhar-kadam/church-erpnext,SPKian/Testing2,gangadharkadam/vlinkerp,indictranstech/internal-erpnext,saurabh6790/med_app_rels,hanselke/erpnext-1,shft117/SteckerApp,rohitwaghchaure/erpnext-receipher,Suninus/erpnext,gangadhar-kadam/smrterp,fuhongliang/erpnext,mbauskar/helpdesk-erpnext,pombredanne/erpnext,gangadharkadam/saloon_erp_install,hatwar/buyback-erpnext,dieface/erpnext,mbauskar/alec_frappe5_erpnext,suyashphadtare/gd-erp,suyashphadtare/vestasi-erp-1,gangadharkadam/verveerp,gangadhar-kadam/verve_live_erp,saurabh6790/medapp,mbauskar/helpdesk-erpnext,Tejal011089/paypal_erpnext,gangadhar-kadam/sapphire_app,saurabh6790/tru_app_back,suyashphadtare/test,saurabh6790/omni-apps,gmarke/erpnext,suyashphadtare/vestasi-erp-jan-end,hanselke/erpnext-1,Aptitudetech/ERPNext,gangadhar-kadam/mtn-erpnext,tmimori/erpnext,saurabh6790/omnitech-apps,saurabh6790/omnisys-app,indictranstech/tele-erpnext,sagar30051991/ozsmart-erp,saurabh6790/trufil_app,gangadhar-kadam/church-erpnext,meisterkleister/erpnext,mbauskar/helpdesk-erpnext,mbauskar/omnitech-demo-erpnext,meisterkleister/erpnext,gangadhar-kadam/nassimapp,anandpdoshi/erpnext,shitolepriya/test-erp,gangadharkadam/vlinkerp,gangadhar-kadam/verve_erp,gangadhar-kadam/verve-erp,BhupeshGupta/erpnext,gangadhar-kadam/hrerp,gangadharkadam/letzerp,saurabh6790/medsynaptic-app,gangadharkadam/v6_erp,saurabh6790/pow-app,Drooids/erpnext,saurabh6790/med_new_app,njmube/erpnext,pombredanne/erpnext,indictranstech/erpnext,indictranstech/reciphergroup-erpnext,gangadhar-kadam/powapp,suyashphadtare/sajil-final-erp,shft117/SteckerApp,indictranstech/vestasi-erpnext,rohitwaghchaure/New_Theme_Erp,indictranstech/trufil-erpnext,gangadharkadam/v4_erp,gangadharkadam/saloon_erp_install,indictranstech/fbd_erpnext,anandpdoshi/erpnext,gangadhar-kadam/smrterp,mbauskar/Das_Erpnext,indictranstech/phrerp,mbauskar/omnitech-erpnext,gangadharkadam/vlinkerp,hernad/erpnext,rohitwaghchaure/GenieManager-erpnext,indictranstech/Das_Erpnext,mbauskar/phrerp,saurabh6790/test-erp,mbauskar/alec_frappe5_erpnext,gangadharkadam/vlinkerp,fuhongliang/erpnext,shitolepriya/test-erp,gangadharkadam/contributionerp,ThiagoGarciaAlves/erpnext,Tejal011089/huntercamp_erpnext,rohitwaghchaure/GenieManager-erpnext,gangadharkadam/v6_erp,rohitwaghchaure/GenieManager-erpnext,suyashphadtare/sajil-final-erp,pombredanne/erpnext,hernad/erpnext,mbauskar/omnitech-erpnext,saurabh6790/omnitech-apps,indictranstech/erpnext,Yellowen/Owrang,rohitwaghchaure/New_Theme_Erp,gangadharkadam/sterp,indictranstech/phrerp,gangadharkadam/sterp,indictranstech/buyback-erp,sagar30051991/ozsmart-erp,gangadharkadam/office_erp,gangadhar-kadam/powapp,hatwar/Das_erpnext,gangadhar-kadam/adb-erp,gangadhar-kadam/latestchurcherp,gangadharkadam/letzerp,gangadharkadam/contributionerp,gangadhar-kadam/mtn-erpnext,mbauskar/internal-hr,gangadharkadam/verveerp,indictranstech/Das_Erpnext,gangadharkadam/office_erp,indictranstech/osmosis-erpnext,saurabh6790/medsyn-app,Tejal011089/trufil-erpnext,suyashphadtare/sajil-erp,gsnbng/erpnext,saurabh6790/alert-med-app,mbauskar/Das_Erpnext,ShashaQin/erpnext,saurabh6790/OFF-RISAPP,gangadhar-kadam/verve-erp,gangadharkadam/v5_erp,Tejal011089/Medsyn2_app,indictranstech/vestasi-erpnext,sheafferusa/erpnext,gangadhar-kadam/mic-erpnext,hernad/erpnext,mbauskar/internal-hr,gangadhar-kadam/sms-erpnext,saurabh6790/aimobilize-app-backup,saurabh6790/medsyn-app1,indictranstech/focal-erpnext,ThiagoGarciaAlves/erpnext,BhupeshGupta/erpnext,Tejal011089/fbd_erpnext,indictranstech/focal-erpnext,Tejal011089/paypal_erpnext,indictranstech/fbd_erpnext,mbauskar/Das_Erpnext,SPKian/Testing,saurabh6790/med_new_app,indictranstech/focal-erpnext,saurabh6790/omnit-app,gangadharkadam/smrterp,gangadharkadam/v4_erp,aruizramon/alec_erpnext,gangadhar-kadam/helpdesk-erpnext,gangadhar-kadam/latestchurcherp,suyashphadtare/gd-erp,gsnbng/erpnext,Tejal011089/digitales_erpnext,gangadharkadam/saloon_erp,meisterkleister/erpnext,dieface/erpnext,SPKian/Testing,saurabh6790/medsynaptic1-app,ThiagoGarciaAlves/erpnext,Tejal011089/osmosis_erpnext,saurabh6790/test_final_med_app,sheafferusa/erpnext,gangadhar-kadam/prjapp,gangadharkadam/sher,geekroot/erpnext,hanselke/erpnext-1,hatwar/focal-erpnext,suyashphadtare/vestasi-update-erp,sheafferusa/erpnext,gangadhar-kadam/latestchurcherp,saurabh6790/aimobilize-app-backup,saurabh6790/test-erp,netfirms/erpnext,saurabh6790/test-erp,indictranstech/internal-erpnext,Tejal011089/trufil-erpnext,gangadhar-kadam/helpdesk-erpnext,mbauskar/alec_frappe5_erpnext,SPKian/Testing2,rohitwaghchaure/digitales_erpnext,gangadharkadam/letzerp,suyashphadtare/vestasi-update-erp,saurabh6790/medsynaptic-app,mbauskar/helpdesk-erpnext,mbauskar/phrerp,Tejal011089/osmosis_erpnext,mbauskar/sapphire-erpnext,pawaranand/phrerp,shitolepriya/test-erp,suyashphadtare/sajil-final-erp,Tejal011089/med2-app,gangadhar-kadam/verve_erp,indictranstech/focal-erpnext,indictranstech/tele-erpnext,indictranstech/biggift-erpnext,sagar30051991/ozsmart-erp,Tejal011089/Medsyn2_app,saurabh6790/omnit-app,gangadhar-kadam/sapphire_app,suyashphadtare/vestasi-erp-final,anandpdoshi/erpnext,pawaranand/phrerp,BhupeshGupta/erpnext,gsnbng/erpnext,rohitwaghchaure/erpnext-receipher,mbauskar/omnitech-demo-erpnext,gangadharkadam/saloon_erp,indictranstech/fbd_erpnext,rohitwaghchaure/erpnext_smart,aruizramon/alec_erpnext,ShashaQin/erpnext,meisterkleister/erpnext,saurabh6790/omni-apps,netfirms/erpnext,indictranstech/biggift-erpnext,netfirms/erpnext,gangadharkadam/saloon_erp,saurabh6790/aimobilize,gangadharkadam/sher,BhupeshGupta/erpnext,suyashphadtare/vestasi-erp-1,gangadhar-kadam/verve_live_erp,gangadhar-kadam/latestchurcherp,indictranstech/biggift-erpnext,gangadhar-kadam/verve_live_erp,gangadharkadam/v4_erp,gangadharkadam/v4_erp,Tejal011089/digitales_erpnext,njmube/erpnext,gmarke/erpnext,gangadharkadam/office_erp,Drooids/erpnext,pawaranand/phrerp,shitolepriya/test-erp,rohitwaghchaure/digitales_erpnext,indictranstech/phrerp,indictranstech/internal-erpnext,njmube/erpnext,shft117/SteckerApp,indictranstech/tele-erpnext,saurabh6790/aimobilize,mbauskar/phrerp,mahabuber/erpnext,suyashphadtare/vestasi-erp-jan-end,saurabh6790/omn-app,hernad/erpnext,suyashphadtare/vestasi-erp-final,suyashphadtare/vestasi-erp-jan-end,Tejal011089/huntercamp_erpnext,indictranstech/biggift-erpnext,Drooids/erpnext,susuchina/ERPNEXT,ThiagoGarciaAlves/erpnext,Tejal011089/trufil-erpnext,indictranstech/reciphergroup-erpnext,Tejal011089/med2-app,gangadharkadam/saloon_erp,sheafferusa/erpnext,hatwar/buyback-erpnext,gangadharkadam/verveerp,indictranstech/phrerp,indictranstech/vestasi-erpnext,mbauskar/internal-hr,4commerce-technologies-AG/erpnext,rohitwaghchaure/New_Theme_Erp,MartinEnder/erpnext-de,shft117/SteckerApp,suyashphadtare/vestasi-erp-jan-end,gmarke/erpnext,SPKian/Testing2,saurabh6790/trufil_app,saurabh6790/test_final_med_app,Tejal011089/digitales_erpnext,Tejal011089/paypal_erpnext,indictranstech/tele-erpnext,suyashphadtare/vestasi-update-erp,saurabh6790/medsyn-app,hatwar/focal-erpnext,gangadharkadam/saloon_erp_install,gangadhar-kadam/verve_live_erp
|
Patch to remove 'Add Recipients' button from Email Digest form
|
def execute():
import webnotes
webnotes.conn.sql("""
DELETE FROM tabDocField
WHERE parent = 'Email Digest'
AND label = 'Add Recipients'
AND fieldtype = 'Button'""")
|
<commit_before><commit_msg>Patch to remove 'Add Recipients' button from Email Digest form<commit_after>
|
def execute():
import webnotes
webnotes.conn.sql("""
DELETE FROM tabDocField
WHERE parent = 'Email Digest'
AND label = 'Add Recipients'
AND fieldtype = 'Button'""")
|
Patch to remove 'Add Recipients' button from Email Digest formdef execute():
import webnotes
webnotes.conn.sql("""
DELETE FROM tabDocField
WHERE parent = 'Email Digest'
AND label = 'Add Recipients'
AND fieldtype = 'Button'""")
|
<commit_before><commit_msg>Patch to remove 'Add Recipients' button from Email Digest form<commit_after>def execute():
import webnotes
webnotes.conn.sql("""
DELETE FROM tabDocField
WHERE parent = 'Email Digest'
AND label = 'Add Recipients'
AND fieldtype = 'Button'""")
|
|
d230a6bdb62a5c6e70dbc57921478f7fb224b534
|
tests/profile/test_SampleExpectationsDatasetProfiler.py
|
tests/profile/test_SampleExpectationsDatasetProfiler.py
|
import json
import os
from collections import OrderedDict
import pytest
from six import PY2
import great_expectations as ge
from great_expectations.data_context.util import file_relative_path
from great_expectations.dataset.pandas_dataset import PandasDataset
from great_expectations.datasource import PandasDatasource
from great_expectations.profile.base import DatasetProfiler
from great_expectations.profile.basic_dataset_profiler import SampleExpectationsDatasetProfiler
from great_expectations.profile.columns_exist import ColumnsExistProfiler
from tests.test_utils import expectationSuiteValidationResultSchema
def test__find_next_low_card_column(non_numeric_low_card_dataset, non_numeric_high_card_dataset):
columns = non_numeric_low_card_dataset.get_table_columns()
column_cache = {}
profiled_columns = {
"numeric": [],
"low_card": [],
"string": [],
"datetime": []
}
assert SampleExpectationsDatasetProfiler._find_next_low_card_column(
non_numeric_low_card_dataset,
columns,
profiled_columns,
column_cache
) == "lowcardnonnum"
x = 123
|
Add test module for SampleExpectationsDatasetProfiler
|
Add test module for SampleExpectationsDatasetProfiler
- add test__find_next_low_card_column
|
Python
|
apache-2.0
|
great-expectations/great_expectations,great-expectations/great_expectations,great-expectations/great_expectations,great-expectations/great_expectations
|
Add test module for SampleExpectationsDatasetProfiler
- add test__find_next_low_card_column
|
import json
import os
from collections import OrderedDict
import pytest
from six import PY2
import great_expectations as ge
from great_expectations.data_context.util import file_relative_path
from great_expectations.dataset.pandas_dataset import PandasDataset
from great_expectations.datasource import PandasDatasource
from great_expectations.profile.base import DatasetProfiler
from great_expectations.profile.basic_dataset_profiler import SampleExpectationsDatasetProfiler
from great_expectations.profile.columns_exist import ColumnsExistProfiler
from tests.test_utils import expectationSuiteValidationResultSchema
def test__find_next_low_card_column(non_numeric_low_card_dataset, non_numeric_high_card_dataset):
columns = non_numeric_low_card_dataset.get_table_columns()
column_cache = {}
profiled_columns = {
"numeric": [],
"low_card": [],
"string": [],
"datetime": []
}
assert SampleExpectationsDatasetProfiler._find_next_low_card_column(
non_numeric_low_card_dataset,
columns,
profiled_columns,
column_cache
) == "lowcardnonnum"
x = 123
|
<commit_before><commit_msg>Add test module for SampleExpectationsDatasetProfiler
- add test__find_next_low_card_column<commit_after>
|
import json
import os
from collections import OrderedDict
import pytest
from six import PY2
import great_expectations as ge
from great_expectations.data_context.util import file_relative_path
from great_expectations.dataset.pandas_dataset import PandasDataset
from great_expectations.datasource import PandasDatasource
from great_expectations.profile.base import DatasetProfiler
from great_expectations.profile.basic_dataset_profiler import SampleExpectationsDatasetProfiler
from great_expectations.profile.columns_exist import ColumnsExistProfiler
from tests.test_utils import expectationSuiteValidationResultSchema
def test__find_next_low_card_column(non_numeric_low_card_dataset, non_numeric_high_card_dataset):
columns = non_numeric_low_card_dataset.get_table_columns()
column_cache = {}
profiled_columns = {
"numeric": [],
"low_card": [],
"string": [],
"datetime": []
}
assert SampleExpectationsDatasetProfiler._find_next_low_card_column(
non_numeric_low_card_dataset,
columns,
profiled_columns,
column_cache
) == "lowcardnonnum"
x = 123
|
Add test module for SampleExpectationsDatasetProfiler
- add test__find_next_low_card_columnimport json
import os
from collections import OrderedDict
import pytest
from six import PY2
import great_expectations as ge
from great_expectations.data_context.util import file_relative_path
from great_expectations.dataset.pandas_dataset import PandasDataset
from great_expectations.datasource import PandasDatasource
from great_expectations.profile.base import DatasetProfiler
from great_expectations.profile.basic_dataset_profiler import SampleExpectationsDatasetProfiler
from great_expectations.profile.columns_exist import ColumnsExistProfiler
from tests.test_utils import expectationSuiteValidationResultSchema
def test__find_next_low_card_column(non_numeric_low_card_dataset, non_numeric_high_card_dataset):
columns = non_numeric_low_card_dataset.get_table_columns()
column_cache = {}
profiled_columns = {
"numeric": [],
"low_card": [],
"string": [],
"datetime": []
}
assert SampleExpectationsDatasetProfiler._find_next_low_card_column(
non_numeric_low_card_dataset,
columns,
profiled_columns,
column_cache
) == "lowcardnonnum"
x = 123
|
<commit_before><commit_msg>Add test module for SampleExpectationsDatasetProfiler
- add test__find_next_low_card_column<commit_after>import json
import os
from collections import OrderedDict
import pytest
from six import PY2
import great_expectations as ge
from great_expectations.data_context.util import file_relative_path
from great_expectations.dataset.pandas_dataset import PandasDataset
from great_expectations.datasource import PandasDatasource
from great_expectations.profile.base import DatasetProfiler
from great_expectations.profile.basic_dataset_profiler import SampleExpectationsDatasetProfiler
from great_expectations.profile.columns_exist import ColumnsExistProfiler
from tests.test_utils import expectationSuiteValidationResultSchema
def test__find_next_low_card_column(non_numeric_low_card_dataset, non_numeric_high_card_dataset):
columns = non_numeric_low_card_dataset.get_table_columns()
column_cache = {}
profiled_columns = {
"numeric": [],
"low_card": [],
"string": [],
"datetime": []
}
assert SampleExpectationsDatasetProfiler._find_next_low_card_column(
non_numeric_low_card_dataset,
columns,
profiled_columns,
column_cache
) == "lowcardnonnum"
x = 123
|
|
3850f29d7dc53ffa53a7f08e0ea96d174259273f
|
examples/offline_examples/load_html_test.py
|
examples/offline_examples/load_html_test.py
|
import pytest
from seleniumbase import BaseCase
@pytest.mark.offline # Can be run with: "pytest -m offline"
class OfflineTests(BaseCase):
def test_load_html_string(self):
html = "<h2>Hello</h2><p><input /> <button>OK!</button></p>"
self.load_html_string(html) # Open "data:text/html," then replace html
self.assert_text("Hello", "h2")
self.assert_text("OK!", "button")
self.type("input", "Goodbye")
self.click("button")
new_html = '<h3>Checkbox</h3><p><input type="checkbox" />Check Me!</p>'
self.set_content(new_html) # Same as load_html_string(), but keeps URL
self.assert_text("Checkbox", "h3")
self.assert_text("Check Me!", "p")
self.assert_false(self.is_selected("input"))
self.click("input")
self.assert_true(self.is_selected("input"))
|
Add example tests for "load_html_string()" and "set_content()"
|
Add example tests for "load_html_string()" and "set_content()"
|
Python
|
mit
|
mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase
|
Add example tests for "load_html_string()" and "set_content()"
|
import pytest
from seleniumbase import BaseCase
@pytest.mark.offline # Can be run with: "pytest -m offline"
class OfflineTests(BaseCase):
def test_load_html_string(self):
html = "<h2>Hello</h2><p><input /> <button>OK!</button></p>"
self.load_html_string(html) # Open "data:text/html," then replace html
self.assert_text("Hello", "h2")
self.assert_text("OK!", "button")
self.type("input", "Goodbye")
self.click("button")
new_html = '<h3>Checkbox</h3><p><input type="checkbox" />Check Me!</p>'
self.set_content(new_html) # Same as load_html_string(), but keeps URL
self.assert_text("Checkbox", "h3")
self.assert_text("Check Me!", "p")
self.assert_false(self.is_selected("input"))
self.click("input")
self.assert_true(self.is_selected("input"))
|
<commit_before><commit_msg>Add example tests for "load_html_string()" and "set_content()"<commit_after>
|
import pytest
from seleniumbase import BaseCase
@pytest.mark.offline # Can be run with: "pytest -m offline"
class OfflineTests(BaseCase):
def test_load_html_string(self):
html = "<h2>Hello</h2><p><input /> <button>OK!</button></p>"
self.load_html_string(html) # Open "data:text/html," then replace html
self.assert_text("Hello", "h2")
self.assert_text("OK!", "button")
self.type("input", "Goodbye")
self.click("button")
new_html = '<h3>Checkbox</h3><p><input type="checkbox" />Check Me!</p>'
self.set_content(new_html) # Same as load_html_string(), but keeps URL
self.assert_text("Checkbox", "h3")
self.assert_text("Check Me!", "p")
self.assert_false(self.is_selected("input"))
self.click("input")
self.assert_true(self.is_selected("input"))
|
Add example tests for "load_html_string()" and "set_content()"import pytest
from seleniumbase import BaseCase
@pytest.mark.offline # Can be run with: "pytest -m offline"
class OfflineTests(BaseCase):
def test_load_html_string(self):
html = "<h2>Hello</h2><p><input /> <button>OK!</button></p>"
self.load_html_string(html) # Open "data:text/html," then replace html
self.assert_text("Hello", "h2")
self.assert_text("OK!", "button")
self.type("input", "Goodbye")
self.click("button")
new_html = '<h3>Checkbox</h3><p><input type="checkbox" />Check Me!</p>'
self.set_content(new_html) # Same as load_html_string(), but keeps URL
self.assert_text("Checkbox", "h3")
self.assert_text("Check Me!", "p")
self.assert_false(self.is_selected("input"))
self.click("input")
self.assert_true(self.is_selected("input"))
|
<commit_before><commit_msg>Add example tests for "load_html_string()" and "set_content()"<commit_after>import pytest
from seleniumbase import BaseCase
@pytest.mark.offline # Can be run with: "pytest -m offline"
class OfflineTests(BaseCase):
def test_load_html_string(self):
html = "<h2>Hello</h2><p><input /> <button>OK!</button></p>"
self.load_html_string(html) # Open "data:text/html," then replace html
self.assert_text("Hello", "h2")
self.assert_text("OK!", "button")
self.type("input", "Goodbye")
self.click("button")
new_html = '<h3>Checkbox</h3><p><input type="checkbox" />Check Me!</p>'
self.set_content(new_html) # Same as load_html_string(), but keeps URL
self.assert_text("Checkbox", "h3")
self.assert_text("Check Me!", "p")
self.assert_false(self.is_selected("input"))
self.click("input")
self.assert_true(self.is_selected("input"))
|
|
f9efd374d11faf709e37bbee05d4735ab94c4b46
|
tests/strings/string_format_combined_simple.py
|
tests/strings/string_format_combined_simple.py
|
a = "well"
b = "seems to work"
c = "something else"
d = 10
f = 15
s = "%s: %d, %s: %d, %s: %d" % (a, d, b, f, c, d)
print s
|
Add combined string formatting test
|
Add combined string formatting test
|
Python
|
mit
|
mattpap/py2js,buchuki/pyjaco,chrivers/pyjaco,buchuki/pyjaco,buchuki/pyjaco,mattpap/py2js,qsnake/py2js,chrivers/pyjaco,chrivers/pyjaco,qsnake/py2js
|
Add combined string formatting test
|
a = "well"
b = "seems to work"
c = "something else"
d = 10
f = 15
s = "%s: %d, %s: %d, %s: %d" % (a, d, b, f, c, d)
print s
|
<commit_before><commit_msg>Add combined string formatting test<commit_after>
|
a = "well"
b = "seems to work"
c = "something else"
d = 10
f = 15
s = "%s: %d, %s: %d, %s: %d" % (a, d, b, f, c, d)
print s
|
Add combined string formatting testa = "well"
b = "seems to work"
c = "something else"
d = 10
f = 15
s = "%s: %d, %s: %d, %s: %d" % (a, d, b, f, c, d)
print s
|
<commit_before><commit_msg>Add combined string formatting test<commit_after>a = "well"
b = "seems to work"
c = "something else"
d = 10
f = 15
s = "%s: %d, %s: %d, %s: %d" % (a, d, b, f, c, d)
print s
|
|
eba23f8dc96e9f14a27b813313f579b6b9146f09
|
senlin/tests/functional/test_profile_type.py
|
senlin/tests/functional/test_profile_type.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.tests.functional import base
class TestProfileType(base.SenlinFunctionalTest):
def test_get_profile_types(self):
# Check that listing profile types works.
rel_url = 'profile_types'
status = [200]
resp = self.client.api_request('GET', rel_url,
expected_resp_status=status)
profile_types = resp.body['profile_types']
expected_profile_types = [{'name': 'os.nova.server'},
{'name': 'os.heat.stack'}]
self.assertEqual(expected_profile_types, profile_types)
|
Add functional test for listing profile_type
|
Add functional test for listing profile_type
This patch adds a functional test case for listing Senlin profile
types.
Change-Id: I2510e3d0d7d3b9a216019dc1fda6aebe4a25e2fd
|
Python
|
apache-2.0
|
openstack/senlin,Alzon/senlin,Alzon/senlin,openstack/senlin,stackforge/senlin,tengqm/senlin-container,stackforge/senlin,openstack/senlin,tengqm/senlin-container
|
Add functional test for listing profile_type
This patch adds a functional test case for listing Senlin profile
types.
Change-Id: I2510e3d0d7d3b9a216019dc1fda6aebe4a25e2fd
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.tests.functional import base
class TestProfileType(base.SenlinFunctionalTest):
def test_get_profile_types(self):
# Check that listing profile types works.
rel_url = 'profile_types'
status = [200]
resp = self.client.api_request('GET', rel_url,
expected_resp_status=status)
profile_types = resp.body['profile_types']
expected_profile_types = [{'name': 'os.nova.server'},
{'name': 'os.heat.stack'}]
self.assertEqual(expected_profile_types, profile_types)
|
<commit_before><commit_msg>Add functional test for listing profile_type
This patch adds a functional test case for listing Senlin profile
types.
Change-Id: I2510e3d0d7d3b9a216019dc1fda6aebe4a25e2fd<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.tests.functional import base
class TestProfileType(base.SenlinFunctionalTest):
def test_get_profile_types(self):
# Check that listing profile types works.
rel_url = 'profile_types'
status = [200]
resp = self.client.api_request('GET', rel_url,
expected_resp_status=status)
profile_types = resp.body['profile_types']
expected_profile_types = [{'name': 'os.nova.server'},
{'name': 'os.heat.stack'}]
self.assertEqual(expected_profile_types, profile_types)
|
Add functional test for listing profile_type
This patch adds a functional test case for listing Senlin profile
types.
Change-Id: I2510e3d0d7d3b9a216019dc1fda6aebe4a25e2fd# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.tests.functional import base
class TestProfileType(base.SenlinFunctionalTest):
def test_get_profile_types(self):
# Check that listing profile types works.
rel_url = 'profile_types'
status = [200]
resp = self.client.api_request('GET', rel_url,
expected_resp_status=status)
profile_types = resp.body['profile_types']
expected_profile_types = [{'name': 'os.nova.server'},
{'name': 'os.heat.stack'}]
self.assertEqual(expected_profile_types, profile_types)
|
<commit_before><commit_msg>Add functional test for listing profile_type
This patch adds a functional test case for listing Senlin profile
types.
Change-Id: I2510e3d0d7d3b9a216019dc1fda6aebe4a25e2fd<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.tests.functional import base
class TestProfileType(base.SenlinFunctionalTest):
def test_get_profile_types(self):
# Check that listing profile types works.
rel_url = 'profile_types'
status = [200]
resp = self.client.api_request('GET', rel_url,
expected_resp_status=status)
profile_types = resp.body['profile_types']
expected_profile_types = [{'name': 'os.nova.server'},
{'name': 'os.heat.stack'}]
self.assertEqual(expected_profile_types, profile_types)
|
|
e01eb8dd1fa7e79a176cc1017cff0e3b2e619540
|
algorithms/a_star_tree_manhattan_distance.py
|
algorithms/a_star_tree_manhattan_distance.py
|
"""
pynpuzzle - Solve n-puzzle with Python
A* tree search algorithm using manhattan distance heuristic
Version : 1.0.0
Author : Hamidreza Mahdavipanah
Repository: http://github.com/mahdavipanah/pynpuzzle
License : MIT License
"""
import heapq
from .util import best_first_seach as bfs
def search(state, goal_state):
"""A* tree search using manhattan distance heuristic"""
def gn(node):
return node.gn()
tiles_places = []
for i in range(len(goal_state)):
for j in range(len(goal_state)):
heapq.heappush(tiles_places, (goal_state[i][j], (i, j)))
def hn(node):
cost = 0
for i in range(len(node.state)):
for j in range(len(node.state)):
tile_i, tile_j = tiles_places[node.state[i][j]][1]
if i != tile_i or j != tile_j:
cost += abs(tile_i - i) + abs(tile_j - j)
return cost
def fn(node):
return gn(node) + hn(node)
return bfs.search(state, goal_state, fn)
|
Add a* tree search algorithm using manhattan distance heuristic
|
Add a* tree search algorithm using manhattan distance heuristic
|
Python
|
mit
|
mahdavipanah/pynpuzzle
|
Add a* tree search algorithm using manhattan distance heuristic
|
"""
pynpuzzle - Solve n-puzzle with Python
A* tree search algorithm using manhattan distance heuristic
Version : 1.0.0
Author : Hamidreza Mahdavipanah
Repository: http://github.com/mahdavipanah/pynpuzzle
License : MIT License
"""
import heapq
from .util import best_first_seach as bfs
def search(state, goal_state):
"""A* tree search using manhattan distance heuristic"""
def gn(node):
return node.gn()
tiles_places = []
for i in range(len(goal_state)):
for j in range(len(goal_state)):
heapq.heappush(tiles_places, (goal_state[i][j], (i, j)))
def hn(node):
cost = 0
for i in range(len(node.state)):
for j in range(len(node.state)):
tile_i, tile_j = tiles_places[node.state[i][j]][1]
if i != tile_i or j != tile_j:
cost += abs(tile_i - i) + abs(tile_j - j)
return cost
def fn(node):
return gn(node) + hn(node)
return bfs.search(state, goal_state, fn)
|
<commit_before><commit_msg>Add a* tree search algorithm using manhattan distance heuristic<commit_after>
|
"""
pynpuzzle - Solve n-puzzle with Python
A* tree search algorithm using manhattan distance heuristic
Version : 1.0.0
Author : Hamidreza Mahdavipanah
Repository: http://github.com/mahdavipanah/pynpuzzle
License : MIT License
"""
import heapq
from .util import best_first_seach as bfs
def search(state, goal_state):
"""A* tree search using manhattan distance heuristic"""
def gn(node):
return node.gn()
tiles_places = []
for i in range(len(goal_state)):
for j in range(len(goal_state)):
heapq.heappush(tiles_places, (goal_state[i][j], (i, j)))
def hn(node):
cost = 0
for i in range(len(node.state)):
for j in range(len(node.state)):
tile_i, tile_j = tiles_places[node.state[i][j]][1]
if i != tile_i or j != tile_j:
cost += abs(tile_i - i) + abs(tile_j - j)
return cost
def fn(node):
return gn(node) + hn(node)
return bfs.search(state, goal_state, fn)
|
Add a* tree search algorithm using manhattan distance heuristic"""
pynpuzzle - Solve n-puzzle with Python
A* tree search algorithm using manhattan distance heuristic
Version : 1.0.0
Author : Hamidreza Mahdavipanah
Repository: http://github.com/mahdavipanah/pynpuzzle
License : MIT License
"""
import heapq
from .util import best_first_seach as bfs
def search(state, goal_state):
"""A* tree search using manhattan distance heuristic"""
def gn(node):
return node.gn()
tiles_places = []
for i in range(len(goal_state)):
for j in range(len(goal_state)):
heapq.heappush(tiles_places, (goal_state[i][j], (i, j)))
def hn(node):
cost = 0
for i in range(len(node.state)):
for j in range(len(node.state)):
tile_i, tile_j = tiles_places[node.state[i][j]][1]
if i != tile_i or j != tile_j:
cost += abs(tile_i - i) + abs(tile_j - j)
return cost
def fn(node):
return gn(node) + hn(node)
return bfs.search(state, goal_state, fn)
|
<commit_before><commit_msg>Add a* tree search algorithm using manhattan distance heuristic<commit_after>"""
pynpuzzle - Solve n-puzzle with Python
A* tree search algorithm using manhattan distance heuristic
Version : 1.0.0
Author : Hamidreza Mahdavipanah
Repository: http://github.com/mahdavipanah/pynpuzzle
License : MIT License
"""
import heapq
from .util import best_first_seach as bfs
def search(state, goal_state):
"""A* tree search using manhattan distance heuristic"""
def gn(node):
return node.gn()
tiles_places = []
for i in range(len(goal_state)):
for j in range(len(goal_state)):
heapq.heappush(tiles_places, (goal_state[i][j], (i, j)))
def hn(node):
cost = 0
for i in range(len(node.state)):
for j in range(len(node.state)):
tile_i, tile_j = tiles_places[node.state[i][j]][1]
if i != tile_i or j != tile_j:
cost += abs(tile_i - i) + abs(tile_j - j)
return cost
def fn(node):
return gn(node) + hn(node)
return bfs.search(state, goal_state, fn)
|
|
bef7208ea4bbaa26638f8ab22148b2c63fed5a2a
|
blast-search.py
|
blast-search.py
|
import os
import re
import sets
import glob
import logging
import MySQLdb
import library.genbank
logging.basicConfig(level=logging.INFO)
from Bio import Entrez, SeqIO, SearchIO
from Bio.Blast import NCBIWWW, NCBIXML
### A few import static variables
library.genbank.email = 'me@my.address.com'
print "\n\nGetting a list of FASTA files..."
fasta_files = glob.glob("fasta/*.FASTA") # get a list of all fasta files in /fasta
file_count = len(fasta_files)
print "%s files successful found.\n" %file_count
print "WARNING:\n"
print "There are %s FASTA files to be processed." %file_count
print "This process can take quite a long time.\n\n"
for f in fasta_files:
print "Reading original FASTA file %s" %f[7:]
with open (f, "r") as fasta_file:
## parse input to get a clean file_id
sequences = fasta_file.read()
fasta_file.close()
print "Done."
print "Starting BLASTN Search against %s" %f[7:]
print "This may take awhile, and requires a presistant Internet connection..."
## Blast using the FASTA set, and assing the XML string result to result_handle
result_handle = NCBIWWW.qblast("blastn", "nt", sequences)
print "Done."
print "Writing results to a BLAST.XML file..."
## write the XML string to a file for later parsing
blast_file_id = "".join(["blast-temp/", f[7:-6], ".xml"])
save_result = open(blast_file_id, "w")
save_result.write(result_handle.read())
save_result.close()
result_handle.close()
print "Done."
print "BLAST search successfully completed."
print "Process Complete."
|
Split the blast search functionality from the compare feature to compartmentalize time heavy features.
|
Split the blast search functionality from the compare feature to compartmentalize time heavy features.
|
Python
|
bsd-3-clause
|
lcoghill/phyloboost,lcoghill/phyloboost,lcoghill/phyloboost
|
Split the blast search functionality from the compare feature to compartmentalize time heavy features.
|
import os
import re
import sets
import glob
import logging
import MySQLdb
import library.genbank
logging.basicConfig(level=logging.INFO)
from Bio import Entrez, SeqIO, SearchIO
from Bio.Blast import NCBIWWW, NCBIXML
### A few import static variables
library.genbank.email = 'me@my.address.com'
print "\n\nGetting a list of FASTA files..."
fasta_files = glob.glob("fasta/*.FASTA") # get a list of all fasta files in /fasta
file_count = len(fasta_files)
print "%s files successful found.\n" %file_count
print "WARNING:\n"
print "There are %s FASTA files to be processed." %file_count
print "This process can take quite a long time.\n\n"
for f in fasta_files:
print "Reading original FASTA file %s" %f[7:]
with open (f, "r") as fasta_file:
## parse input to get a clean file_id
sequences = fasta_file.read()
fasta_file.close()
print "Done."
print "Starting BLASTN Search against %s" %f[7:]
print "This may take awhile, and requires a presistant Internet connection..."
## Blast using the FASTA set, and assing the XML string result to result_handle
result_handle = NCBIWWW.qblast("blastn", "nt", sequences)
print "Done."
print "Writing results to a BLAST.XML file..."
## write the XML string to a file for later parsing
blast_file_id = "".join(["blast-temp/", f[7:-6], ".xml"])
save_result = open(blast_file_id, "w")
save_result.write(result_handle.read())
save_result.close()
result_handle.close()
print "Done."
print "BLAST search successfully completed."
print "Process Complete."
|
<commit_before><commit_msg>Split the blast search functionality from the compare feature to compartmentalize time heavy features.<commit_after>
|
import os
import re
import sets
import glob
import logging
import MySQLdb
import library.genbank
logging.basicConfig(level=logging.INFO)
from Bio import Entrez, SeqIO, SearchIO
from Bio.Blast import NCBIWWW, NCBIXML
### A few import static variables
library.genbank.email = 'me@my.address.com'
print "\n\nGetting a list of FASTA files..."
fasta_files = glob.glob("fasta/*.FASTA") # get a list of all fasta files in /fasta
file_count = len(fasta_files)
print "%s files successful found.\n" %file_count
print "WARNING:\n"
print "There are %s FASTA files to be processed." %file_count
print "This process can take quite a long time.\n\n"
for f in fasta_files:
print "Reading original FASTA file %s" %f[7:]
with open (f, "r") as fasta_file:
## parse input to get a clean file_id
sequences = fasta_file.read()
fasta_file.close()
print "Done."
print "Starting BLASTN Search against %s" %f[7:]
print "This may take awhile, and requires a presistant Internet connection..."
## Blast using the FASTA set, and assing the XML string result to result_handle
result_handle = NCBIWWW.qblast("blastn", "nt", sequences)
print "Done."
print "Writing results to a BLAST.XML file..."
## write the XML string to a file for later parsing
blast_file_id = "".join(["blast-temp/", f[7:-6], ".xml"])
save_result = open(blast_file_id, "w")
save_result.write(result_handle.read())
save_result.close()
result_handle.close()
print "Done."
print "BLAST search successfully completed."
print "Process Complete."
|
Split the blast search functionality from the compare feature to compartmentalize time heavy features.import os
import re
import sets
import glob
import logging
import MySQLdb
import library.genbank
logging.basicConfig(level=logging.INFO)
from Bio import Entrez, SeqIO, SearchIO
from Bio.Blast import NCBIWWW, NCBIXML
### A few import static variables
library.genbank.email = 'me@my.address.com'
print "\n\nGetting a list of FASTA files..."
fasta_files = glob.glob("fasta/*.FASTA") # get a list of all fasta files in /fasta
file_count = len(fasta_files)
print "%s files successful found.\n" %file_count
print "WARNING:\n"
print "There are %s FASTA files to be processed." %file_count
print "This process can take quite a long time.\n\n"
for f in fasta_files:
print "Reading original FASTA file %s" %f[7:]
with open (f, "r") as fasta_file:
## parse input to get a clean file_id
sequences = fasta_file.read()
fasta_file.close()
print "Done."
print "Starting BLASTN Search against %s" %f[7:]
print "This may take awhile, and requires a presistant Internet connection..."
## Blast using the FASTA set, and assing the XML string result to result_handle
result_handle = NCBIWWW.qblast("blastn", "nt", sequences)
print "Done."
print "Writing results to a BLAST.XML file..."
## write the XML string to a file for later parsing
blast_file_id = "".join(["blast-temp/", f[7:-6], ".xml"])
save_result = open(blast_file_id, "w")
save_result.write(result_handle.read())
save_result.close()
result_handle.close()
print "Done."
print "BLAST search successfully completed."
print "Process Complete."
|
<commit_before><commit_msg>Split the blast search functionality from the compare feature to compartmentalize time heavy features.<commit_after>import os
import re
import sets
import glob
import logging
import MySQLdb
import library.genbank
logging.basicConfig(level=logging.INFO)
from Bio import Entrez, SeqIO, SearchIO
from Bio.Blast import NCBIWWW, NCBIXML
### A few import static variables
library.genbank.email = 'me@my.address.com'
print "\n\nGetting a list of FASTA files..."
fasta_files = glob.glob("fasta/*.FASTA") # get a list of all fasta files in /fasta
file_count = len(fasta_files)
print "%s files successful found.\n" %file_count
print "WARNING:\n"
print "There are %s FASTA files to be processed." %file_count
print "This process can take quite a long time.\n\n"
for f in fasta_files:
print "Reading original FASTA file %s" %f[7:]
with open (f, "r") as fasta_file:
## parse input to get a clean file_id
sequences = fasta_file.read()
fasta_file.close()
print "Done."
print "Starting BLASTN Search against %s" %f[7:]
print "This may take awhile, and requires a presistant Internet connection..."
## Blast using the FASTA set, and assing the XML string result to result_handle
result_handle = NCBIWWW.qblast("blastn", "nt", sequences)
print "Done."
print "Writing results to a BLAST.XML file..."
## write the XML string to a file for later parsing
blast_file_id = "".join(["blast-temp/", f[7:-6], ".xml"])
save_result = open(blast_file_id, "w")
save_result.write(result_handle.read())
save_result.close()
result_handle.close()
print "Done."
print "BLAST search successfully completed."
print "Process Complete."
|
|
d4522ad74add7848cfddb25bfcab656a7d47730e
|
src/auditlog/migrations/0003_logentry_detailed_object_repr.py
|
src/auditlog/migrations/0003_logentry_detailed_object_repr.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('auditlog', '0002_auto_support_long_primary_keys'),
]
operations = [
migrations.AddField(
model_name='logentry',
name='detailed_object_repr',
field=jsonfield.fields.JSONField(null=True, blank=True),
),
]
|
Add migration for detailed_object_repr field
|
Add migration for detailed_object_repr field
|
Python
|
mit
|
jjkester/django-auditlog,johnrtipton/django-auditlog,rauleb/django-auditlog,chris-griffin/django-auditlog,robmagee/django-auditlog,Zmeylol/auditlog,kbussell/django-auditlog
|
Add migration for detailed_object_repr field
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('auditlog', '0002_auto_support_long_primary_keys'),
]
operations = [
migrations.AddField(
model_name='logentry',
name='detailed_object_repr',
field=jsonfield.fields.JSONField(null=True, blank=True),
),
]
|
<commit_before><commit_msg>Add migration for detailed_object_repr field<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('auditlog', '0002_auto_support_long_primary_keys'),
]
operations = [
migrations.AddField(
model_name='logentry',
name='detailed_object_repr',
field=jsonfield.fields.JSONField(null=True, blank=True),
),
]
|
Add migration for detailed_object_repr field# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('auditlog', '0002_auto_support_long_primary_keys'),
]
operations = [
migrations.AddField(
model_name='logentry',
name='detailed_object_repr',
field=jsonfield.fields.JSONField(null=True, blank=True),
),
]
|
<commit_before><commit_msg>Add migration for detailed_object_repr field<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('auditlog', '0002_auto_support_long_primary_keys'),
]
operations = [
migrations.AddField(
model_name='logentry',
name='detailed_object_repr',
field=jsonfield.fields.JSONField(null=True, blank=True),
),
]
|
|
29467c4a52bb5caf492460b70cb58caf5fe8f728
|
calaccess_processed/management/commands/loadocdmodels.py
|
calaccess_processed/management/commands/loadocdmodels.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Load data extracted from scrape and raw data snapshot into OCD models.
"""
from django.apps import apps
from django.utils.timezone import now
from django.core.management import call_command
from calaccess_processed.models import ProcessedDataVersion
from calaccess_processed.management.commands import CalAccessCommand
class Command(CalAccessCommand):
"""
Load data extracted from scrape and raw data snapshot into OCD models.
"""
help = 'Load data extracted from scrape and raw data snapshot into OCD models'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
self.processed_version = ProcessedDataVersion.objects.latest()
self.start_datetime = now()
self.load()
self.finish_datetime = now()
self.archive()
self.success('Done!')
self.duration()
def load(self):
"""
Load all of the processed models.
"""
call_command(
'loadparties',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadballotmeasurecontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadretentioncontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadcandidatecontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'mergecandidates',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadcandidaciesfrom501s',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadincumbentofficeholders',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
def archive(self):
"""
Save a csv file for each loaded OCD model.
"""
ocd_models = [
m._meta.object_name for m in apps.get_app_config(
'opencivicdata'
).get_models()
if not m._meta.abstract and
m.objects.count() > 0
]
for m in ocd_models:
processed_data_file, created = self.processed_version.files.get_or_create(
file_name=m,
)
processed_data_file.process_start_time = self.start_datetime
processed_data_file.save()
call_command(
'archivecalaccessprocessedfile',
'opencivicdata',
m,
)
processed_data_file.process_finish_time = self.finish_datetime
processed_data_file.save()
self.duration()
|
Create ProcessedDataModels for OCD models, group loading into one cmd
|
Create ProcessedDataModels for OCD models, group loading into one cmd
|
Python
|
mit
|
california-civic-data-coalition/django-calaccess-processed-data,california-civic-data-coalition/django-calaccess-processed-data
|
Create ProcessedDataModels for OCD models, group loading into one cmd
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Load data extracted from scrape and raw data snapshot into OCD models.
"""
from django.apps import apps
from django.utils.timezone import now
from django.core.management import call_command
from calaccess_processed.models import ProcessedDataVersion
from calaccess_processed.management.commands import CalAccessCommand
class Command(CalAccessCommand):
"""
Load data extracted from scrape and raw data snapshot into OCD models.
"""
help = 'Load data extracted from scrape and raw data snapshot into OCD models'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
self.processed_version = ProcessedDataVersion.objects.latest()
self.start_datetime = now()
self.load()
self.finish_datetime = now()
self.archive()
self.success('Done!')
self.duration()
def load(self):
"""
Load all of the processed models.
"""
call_command(
'loadparties',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadballotmeasurecontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadretentioncontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadcandidatecontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'mergecandidates',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadcandidaciesfrom501s',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadincumbentofficeholders',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
def archive(self):
"""
Save a csv file for each loaded OCD model.
"""
ocd_models = [
m._meta.object_name for m in apps.get_app_config(
'opencivicdata'
).get_models()
if not m._meta.abstract and
m.objects.count() > 0
]
for m in ocd_models:
processed_data_file, created = self.processed_version.files.get_or_create(
file_name=m,
)
processed_data_file.process_start_time = self.start_datetime
processed_data_file.save()
call_command(
'archivecalaccessprocessedfile',
'opencivicdata',
m,
)
processed_data_file.process_finish_time = self.finish_datetime
processed_data_file.save()
self.duration()
|
<commit_before><commit_msg>Create ProcessedDataModels for OCD models, group loading into one cmd<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Load data extracted from scrape and raw data snapshot into OCD models.
"""
from django.apps import apps
from django.utils.timezone import now
from django.core.management import call_command
from calaccess_processed.models import ProcessedDataVersion
from calaccess_processed.management.commands import CalAccessCommand
class Command(CalAccessCommand):
"""
Load data extracted from scrape and raw data snapshot into OCD models.
"""
help = 'Load data extracted from scrape and raw data snapshot into OCD models'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
self.processed_version = ProcessedDataVersion.objects.latest()
self.start_datetime = now()
self.load()
self.finish_datetime = now()
self.archive()
self.success('Done!')
self.duration()
def load(self):
"""
Load all of the processed models.
"""
call_command(
'loadparties',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadballotmeasurecontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadretentioncontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadcandidatecontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'mergecandidates',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadcandidaciesfrom501s',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadincumbentofficeholders',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
def archive(self):
"""
Save a csv file for each loaded OCD model.
"""
ocd_models = [
m._meta.object_name for m in apps.get_app_config(
'opencivicdata'
).get_models()
if not m._meta.abstract and
m.objects.count() > 0
]
for m in ocd_models:
processed_data_file, created = self.processed_version.files.get_or_create(
file_name=m,
)
processed_data_file.process_start_time = self.start_datetime
processed_data_file.save()
call_command(
'archivecalaccessprocessedfile',
'opencivicdata',
m,
)
processed_data_file.process_finish_time = self.finish_datetime
processed_data_file.save()
self.duration()
|
Create ProcessedDataModels for OCD models, group loading into one cmd#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Load data extracted from scrape and raw data snapshot into OCD models.
"""
from django.apps import apps
from django.utils.timezone import now
from django.core.management import call_command
from calaccess_processed.models import ProcessedDataVersion
from calaccess_processed.management.commands import CalAccessCommand
class Command(CalAccessCommand):
"""
Load data extracted from scrape and raw data snapshot into OCD models.
"""
help = 'Load data extracted from scrape and raw data snapshot into OCD models'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
self.processed_version = ProcessedDataVersion.objects.latest()
self.start_datetime = now()
self.load()
self.finish_datetime = now()
self.archive()
self.success('Done!')
self.duration()
def load(self):
"""
Load all of the processed models.
"""
call_command(
'loadparties',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadballotmeasurecontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadretentioncontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadcandidatecontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'mergecandidates',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadcandidaciesfrom501s',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadincumbentofficeholders',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
def archive(self):
"""
Save a csv file for each loaded OCD model.
"""
ocd_models = [
m._meta.object_name for m in apps.get_app_config(
'opencivicdata'
).get_models()
if not m._meta.abstract and
m.objects.count() > 0
]
for m in ocd_models:
processed_data_file, created = self.processed_version.files.get_or_create(
file_name=m,
)
processed_data_file.process_start_time = self.start_datetime
processed_data_file.save()
call_command(
'archivecalaccessprocessedfile',
'opencivicdata',
m,
)
processed_data_file.process_finish_time = self.finish_datetime
processed_data_file.save()
self.duration()
|
<commit_before><commit_msg>Create ProcessedDataModels for OCD models, group loading into one cmd<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Load data extracted from scrape and raw data snapshot into OCD models.
"""
from django.apps import apps
from django.utils.timezone import now
from django.core.management import call_command
from calaccess_processed.models import ProcessedDataVersion
from calaccess_processed.management.commands import CalAccessCommand
class Command(CalAccessCommand):
"""
Load data extracted from scrape and raw data snapshot into OCD models.
"""
help = 'Load data extracted from scrape and raw data snapshot into OCD models'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
self.processed_version = ProcessedDataVersion.objects.latest()
self.start_datetime = now()
self.load()
self.finish_datetime = now()
self.archive()
self.success('Done!')
self.duration()
def load(self):
"""
Load all of the processed models.
"""
call_command(
'loadparties',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadballotmeasurecontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadretentioncontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadcandidatecontests',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'mergecandidates',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadcandidaciesfrom501s',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
call_command(
'loadincumbentofficeholders',
verbosity=self.verbosity,
no_color=self.no_color,
)
self.duration()
def archive(self):
"""
Save a csv file for each loaded OCD model.
"""
ocd_models = [
m._meta.object_name for m in apps.get_app_config(
'opencivicdata'
).get_models()
if not m._meta.abstract and
m.objects.count() > 0
]
for m in ocd_models:
processed_data_file, created = self.processed_version.files.get_or_create(
file_name=m,
)
processed_data_file.process_start_time = self.start_datetime
processed_data_file.save()
call_command(
'archivecalaccessprocessedfile',
'opencivicdata',
m,
)
processed_data_file.process_finish_time = self.finish_datetime
processed_data_file.save()
self.duration()
|
|
af6f37dd7ccc38d788614a033440a9dafdabb884
|
nysa/ibuilder/lib/gen_scripts/gen_sim_tb.py
|
nysa/ibuilder/lib/gen_scripts/gen_sim_tb.py
|
import sys
import os
import string
import copy
from string import Template
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import sim_utils as sutils
import gen_sim_top
from gen import Gen
class GenSimTop(Gen):
def __init__(self):
return
def get_name (self):
print "generate sim tb!"
def gen_script (self, tags = {}, buf = "", user_paths = [], debug = False):
#Copy the interface so we don't modify the original
gs = gen_sim_top.GenSimTop()
buf = gs.gen_script(tags, buf, user_paths)
return sutils.generate_tb_module(tags, buf, user_paths)
|
ADD script to generate test bench
|
ADD script to generate test bench
|
Python
|
mit
|
CospanDesign/nysa,CospanDesign/nysa
|
ADD script to generate test bench
|
import sys
import os
import string
import copy
from string import Template
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import sim_utils as sutils
import gen_sim_top
from gen import Gen
class GenSimTop(Gen):
def __init__(self):
return
def get_name (self):
print "generate sim tb!"
def gen_script (self, tags = {}, buf = "", user_paths = [], debug = False):
#Copy the interface so we don't modify the original
gs = gen_sim_top.GenSimTop()
buf = gs.gen_script(tags, buf, user_paths)
return sutils.generate_tb_module(tags, buf, user_paths)
|
<commit_before><commit_msg>ADD script to generate test bench<commit_after>
|
import sys
import os
import string
import copy
from string import Template
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import sim_utils as sutils
import gen_sim_top
from gen import Gen
class GenSimTop(Gen):
def __init__(self):
return
def get_name (self):
print "generate sim tb!"
def gen_script (self, tags = {}, buf = "", user_paths = [], debug = False):
#Copy the interface so we don't modify the original
gs = gen_sim_top.GenSimTop()
buf = gs.gen_script(tags, buf, user_paths)
return sutils.generate_tb_module(tags, buf, user_paths)
|
ADD script to generate test benchimport sys
import os
import string
import copy
from string import Template
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import sim_utils as sutils
import gen_sim_top
from gen import Gen
class GenSimTop(Gen):
def __init__(self):
return
def get_name (self):
print "generate sim tb!"
def gen_script (self, tags = {}, buf = "", user_paths = [], debug = False):
#Copy the interface so we don't modify the original
gs = gen_sim_top.GenSimTop()
buf = gs.gen_script(tags, buf, user_paths)
return sutils.generate_tb_module(tags, buf, user_paths)
|
<commit_before><commit_msg>ADD script to generate test bench<commit_after>import sys
import os
import string
import copy
from string import Template
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import sim_utils as sutils
import gen_sim_top
from gen import Gen
class GenSimTop(Gen):
def __init__(self):
return
def get_name (self):
print "generate sim tb!"
def gen_script (self, tags = {}, buf = "", user_paths = [], debug = False):
#Copy the interface so we don't modify the original
gs = gen_sim_top.GenSimTop()
buf = gs.gen_script(tags, buf, user_paths)
return sutils.generate_tb_module(tags, buf, user_paths)
|
|
5e1ec7c300126ddec9976c7441cb255d36c36904
|
tools/rename_code.py
|
tools/rename_code.py
|
#!/bin/env python3
import os
import re
import sys
if len(sys.argv) < 2 :#or sys.argv[1] == "--help":
print("rename_code.py <source_code_directory>")
print("Update python code to new style.")
sys.exit(0)
old_names = set()
class_names = set()
for (dir, dirs, files) in os.walk(sys.argv[1]):
if dir == ".git":
continue
for file in files:
if not file.endswith(".py"):
continue
path = os.path.join(dir, file)
print("-----", path)
with open(path) as f:
data = f.read()
for i in re.finditer(r"\bdef\s+(\w+)", data):
name = i.group(1)
if re.search(r"[A-Z]", name):
print(":", name)
old_names.add(name)
for i in re.finditer(r"\bclass\s+(\w+)", data):
name = i.group(1)
class_names.add(name)
#print("old_names:", old_names)
#print("class_names:", class_names)
for i in class_names:
if i in old_names:
print(i, "is also a class name")
old_names.remove(i)
#print("old_names:", old_names)
find_old_name = re.compile(r"(\b"+r"\b|\b".join(old_names)+r")\b")
for (dir, dirs, files) in os.walk(sys.argv[1]):
if dir == ".git":
continue
for file in files:
if not file.endswith(".py"):
continue
path = os.path.join(dir, file)
print("-----", path)
with open(path, "r+") as f:
data = find_old_name.sub(
lambda x: re.sub(
"([A-Z])",
lambda n: "_"+n.group(1).lower(),
x.group(1)),
f.read())
f.seek(0)
f.write(data)
|
Add tool to update method names.
|
Add tool to update method names.
|
Python
|
apache-2.0
|
google/ci_edit,google/ci_edit,google/ci_edit
|
Add tool to update method names.
|
#!/bin/env python3
import os
import re
import sys
if len(sys.argv) < 2 :#or sys.argv[1] == "--help":
print("rename_code.py <source_code_directory>")
print("Update python code to new style.")
sys.exit(0)
old_names = set()
class_names = set()
for (dir, dirs, files) in os.walk(sys.argv[1]):
if dir == ".git":
continue
for file in files:
if not file.endswith(".py"):
continue
path = os.path.join(dir, file)
print("-----", path)
with open(path) as f:
data = f.read()
for i in re.finditer(r"\bdef\s+(\w+)", data):
name = i.group(1)
if re.search(r"[A-Z]", name):
print(":", name)
old_names.add(name)
for i in re.finditer(r"\bclass\s+(\w+)", data):
name = i.group(1)
class_names.add(name)
#print("old_names:", old_names)
#print("class_names:", class_names)
for i in class_names:
if i in old_names:
print(i, "is also a class name")
old_names.remove(i)
#print("old_names:", old_names)
find_old_name = re.compile(r"(\b"+r"\b|\b".join(old_names)+r")\b")
for (dir, dirs, files) in os.walk(sys.argv[1]):
if dir == ".git":
continue
for file in files:
if not file.endswith(".py"):
continue
path = os.path.join(dir, file)
print("-----", path)
with open(path, "r+") as f:
data = find_old_name.sub(
lambda x: re.sub(
"([A-Z])",
lambda n: "_"+n.group(1).lower(),
x.group(1)),
f.read())
f.seek(0)
f.write(data)
|
<commit_before><commit_msg>Add tool to update method names.<commit_after>
|
#!/bin/env python3
import os
import re
import sys
if len(sys.argv) < 2 :#or sys.argv[1] == "--help":
print("rename_code.py <source_code_directory>")
print("Update python code to new style.")
sys.exit(0)
old_names = set()
class_names = set()
for (dir, dirs, files) in os.walk(sys.argv[1]):
if dir == ".git":
continue
for file in files:
if not file.endswith(".py"):
continue
path = os.path.join(dir, file)
print("-----", path)
with open(path) as f:
data = f.read()
for i in re.finditer(r"\bdef\s+(\w+)", data):
name = i.group(1)
if re.search(r"[A-Z]", name):
print(":", name)
old_names.add(name)
for i in re.finditer(r"\bclass\s+(\w+)", data):
name = i.group(1)
class_names.add(name)
#print("old_names:", old_names)
#print("class_names:", class_names)
for i in class_names:
if i in old_names:
print(i, "is also a class name")
old_names.remove(i)
#print("old_names:", old_names)
find_old_name = re.compile(r"(\b"+r"\b|\b".join(old_names)+r")\b")
for (dir, dirs, files) in os.walk(sys.argv[1]):
if dir == ".git":
continue
for file in files:
if not file.endswith(".py"):
continue
path = os.path.join(dir, file)
print("-----", path)
with open(path, "r+") as f:
data = find_old_name.sub(
lambda x: re.sub(
"([A-Z])",
lambda n: "_"+n.group(1).lower(),
x.group(1)),
f.read())
f.seek(0)
f.write(data)
|
Add tool to update method names.#!/bin/env python3
import os
import re
import sys
if len(sys.argv) < 2 :#or sys.argv[1] == "--help":
print("rename_code.py <source_code_directory>")
print("Update python code to new style.")
sys.exit(0)
old_names = set()
class_names = set()
for (dir, dirs, files) in os.walk(sys.argv[1]):
if dir == ".git":
continue
for file in files:
if not file.endswith(".py"):
continue
path = os.path.join(dir, file)
print("-----", path)
with open(path) as f:
data = f.read()
for i in re.finditer(r"\bdef\s+(\w+)", data):
name = i.group(1)
if re.search(r"[A-Z]", name):
print(":", name)
old_names.add(name)
for i in re.finditer(r"\bclass\s+(\w+)", data):
name = i.group(1)
class_names.add(name)
#print("old_names:", old_names)
#print("class_names:", class_names)
for i in class_names:
if i in old_names:
print(i, "is also a class name")
old_names.remove(i)
#print("old_names:", old_names)
find_old_name = re.compile(r"(\b"+r"\b|\b".join(old_names)+r")\b")
for (dir, dirs, files) in os.walk(sys.argv[1]):
if dir == ".git":
continue
for file in files:
if not file.endswith(".py"):
continue
path = os.path.join(dir, file)
print("-----", path)
with open(path, "r+") as f:
data = find_old_name.sub(
lambda x: re.sub(
"([A-Z])",
lambda n: "_"+n.group(1).lower(),
x.group(1)),
f.read())
f.seek(0)
f.write(data)
|
<commit_before><commit_msg>Add tool to update method names.<commit_after>#!/bin/env python3
import os
import re
import sys
if len(sys.argv) < 2 :#or sys.argv[1] == "--help":
print("rename_code.py <source_code_directory>")
print("Update python code to new style.")
sys.exit(0)
old_names = set()
class_names = set()
for (dir, dirs, files) in os.walk(sys.argv[1]):
if dir == ".git":
continue
for file in files:
if not file.endswith(".py"):
continue
path = os.path.join(dir, file)
print("-----", path)
with open(path) as f:
data = f.read()
for i in re.finditer(r"\bdef\s+(\w+)", data):
name = i.group(1)
if re.search(r"[A-Z]", name):
print(":", name)
old_names.add(name)
for i in re.finditer(r"\bclass\s+(\w+)", data):
name = i.group(1)
class_names.add(name)
#print("old_names:", old_names)
#print("class_names:", class_names)
for i in class_names:
if i in old_names:
print(i, "is also a class name")
old_names.remove(i)
#print("old_names:", old_names)
find_old_name = re.compile(r"(\b"+r"\b|\b".join(old_names)+r")\b")
for (dir, dirs, files) in os.walk(sys.argv[1]):
if dir == ".git":
continue
for file in files:
if not file.endswith(".py"):
continue
path = os.path.join(dir, file)
print("-----", path)
with open(path, "r+") as f:
data = find_old_name.sub(
lambda x: re.sub(
"([A-Z])",
lambda n: "_"+n.group(1).lower(),
x.group(1)),
f.read())
f.seek(0)
f.write(data)
|
|
1738872044ce26576b50895bee32a9933a5787cc
|
tools/touch_all_files.py
|
tools/touch_all_files.py
|
#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
Add script to synthesize all uploaded files. Patch by Dan Callahan.
|
Add script to synthesize all uploaded files.
Patch by Dan Callahan.
|
Python
|
bsd-3-clause
|
pydotorg/pypi,pydotorg/pypi,pydotorg/pypi,pydotorg/pypi
|
Add script to synthesize all uploaded files.
Patch by Dan Callahan.
|
#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
<commit_before><commit_msg>Add script to synthesize all uploaded files.
Patch by Dan Callahan.<commit_after>
|
#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
Add script to synthesize all uploaded files.
Patch by Dan Callahan.#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
<commit_before><commit_msg>Add script to synthesize all uploaded files.
Patch by Dan Callahan.<commit_after>#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
|
2263af5f5aad3c787e566a629bf8ab6b1a1a36d6
|
generate-abuse.py
|
generate-abuse.py
|
#!/usr/bin/env python
import sys
import random
from abuse.parse import parse
from abuse.generate import generate
from abuse.generate import RuleSet
class RandomSelector(object):
def select(self, lower, upper):
return random.randint(lower, upper - 1)
rule_set = RuleSet()
parse(open(sys.argv[1]).read(), rule_set)
print generate(rule_set, RandomSelector())
|
Add script to read file and generate abuse
|
Add script to read file and generate abuse
|
Python
|
bsd-2-clause
|
mwilliamson/abuse,mwilliamson/abuse
|
Add script to read file and generate abuse
|
#!/usr/bin/env python
import sys
import random
from abuse.parse import parse
from abuse.generate import generate
from abuse.generate import RuleSet
class RandomSelector(object):
def select(self, lower, upper):
return random.randint(lower, upper - 1)
rule_set = RuleSet()
parse(open(sys.argv[1]).read(), rule_set)
print generate(rule_set, RandomSelector())
|
<commit_before><commit_msg>Add script to read file and generate abuse<commit_after>
|
#!/usr/bin/env python
import sys
import random
from abuse.parse import parse
from abuse.generate import generate
from abuse.generate import RuleSet
class RandomSelector(object):
def select(self, lower, upper):
return random.randint(lower, upper - 1)
rule_set = RuleSet()
parse(open(sys.argv[1]).read(), rule_set)
print generate(rule_set, RandomSelector())
|
Add script to read file and generate abuse#!/usr/bin/env python
import sys
import random
from abuse.parse import parse
from abuse.generate import generate
from abuse.generate import RuleSet
class RandomSelector(object):
def select(self, lower, upper):
return random.randint(lower, upper - 1)
rule_set = RuleSet()
parse(open(sys.argv[1]).read(), rule_set)
print generate(rule_set, RandomSelector())
|
<commit_before><commit_msg>Add script to read file and generate abuse<commit_after>#!/usr/bin/env python
import sys
import random
from abuse.parse import parse
from abuse.generate import generate
from abuse.generate import RuleSet
class RandomSelector(object):
def select(self, lower, upper):
return random.randint(lower, upper - 1)
rule_set = RuleSet()
parse(open(sys.argv[1]).read(), rule_set)
print generate(rule_set, RandomSelector())
|
|
13b5e76f80bf3364e1609e97970117c7b980c359
|
usability/codefolding/codefoldingpreprocessor.py
|
usability/codefolding/codefoldingpreprocessor.py
|
"""This preprocessor removes lines in code cells that have been marked as `folded`
by the codefolding extension
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2014, Juergen Hasch
#
# Distributed under the terms of the Modified BSD License.
#
#-----------------------------------------------------------------------------
from IPython.nbconvert.preprocessors import *
import StringIO
class CodeFoldingPreprocessor(Preprocessor):
def fold_cell(self,cell,folded):
"""
Remove folded lines and add a '<->' at the parent line
"""
f = StringIO.StringIO(cell)
lines = f.readlines()
if folded[0] == 0 and lines[0][0] == '#':
self.log.info("folded: %s, %s" % (folded[0],lines[0][0]))
return lines[0].rstrip('\n') + '<->\n'
fold_indent = 0
fold = False
fcell = ""
for i,l in enumerate(lines):
indent = len(l)-len(l.lstrip(' '))
if indent <= fold_indent:
fold = False
fold_indent = 0
if i in folded:
fold = True
fold_indent = indent
fcell += l.rstrip('\n') + '<->\n'
if fold is False:
fcell += l
return fcell
def preprocess_cell(self, cell, resources, index):
"""
Read out metadata and remove lines if marked as `folded` in cell metadata.
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py)
"""
if hasattr(cell, "input") and cell.cell_type == "code":
if hasattr(cell['metadata'], 'code_folding'):
folded = cell['metadata']['code_folding']
cell.input = self.fold_cell(cell.input, folded)
return cell, resources
|
Add preprocessor for codefolding extension
|
Add preprocessor for codefolding extension
Thes preprocessor removes folded lines in codecells as defined by the
cell metadata
|
Python
|
bsd-3-clause
|
andyneff/IPython-notebook-extensions,benvarkey/IPython-notebook-extensions,juhasch/IPython-notebook-extensions,davande/IPython-notebook-extensions,jbn/IPython-notebook-extensions,jcb91/IPython-notebook-extensions,motleytech/IPython-notebook-extensions,Konubinix/IPython-notebook-extensions,andyneff/IPython-notebook-extensions,davande/IPython-notebook-extensions,juhasch/IPython-notebook-extensions,jbn/IPython-notebook-extensions,benvarkey/IPython-notebook-extensions,jcb91/IPython-notebook-extensions,andyneff/IPython-notebook-extensions,jbn/IPython-notebook-extensions,andyneff/IPython-notebook-extensions,ipython-contrib/IPython-notebook-extensions,ipython-contrib/IPython-notebook-extensions,ipython-contrib/IPython-notebook-extensions,Konubinix/IPython-notebook-extensions,jbn/IPython-notebook-extensions,benvarkey/IPython-notebook-extensions,danielballan/IPython-notebook-extensions,ipython-contrib/IPython-notebook-extensions,jcb91/IPython-notebook-extensions,danielballan/IPython-notebook-extensions,motleytech/IPython-notebook-extensions,motleytech/IPython-notebook-extensions,danielballan/IPython-notebook-extensions,benvarkey/IPython-notebook-extensions,davande/IPython-notebook-extensions,motleytech/IPython-notebook-extensions,Konubinix/IPython-notebook-extensions,juhasch/IPython-notebook-extensions
|
Add preprocessor for codefolding extension
Thes preprocessor removes folded lines in codecells as defined by the
cell metadata
|
"""This preprocessor removes lines in code cells that have been marked as `folded`
by the codefolding extension
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2014, Juergen Hasch
#
# Distributed under the terms of the Modified BSD License.
#
#-----------------------------------------------------------------------------
from IPython.nbconvert.preprocessors import *
import StringIO
class CodeFoldingPreprocessor(Preprocessor):
def fold_cell(self,cell,folded):
"""
Remove folded lines and add a '<->' at the parent line
"""
f = StringIO.StringIO(cell)
lines = f.readlines()
if folded[0] == 0 and lines[0][0] == '#':
self.log.info("folded: %s, %s" % (folded[0],lines[0][0]))
return lines[0].rstrip('\n') + '<->\n'
fold_indent = 0
fold = False
fcell = ""
for i,l in enumerate(lines):
indent = len(l)-len(l.lstrip(' '))
if indent <= fold_indent:
fold = False
fold_indent = 0
if i in folded:
fold = True
fold_indent = indent
fcell += l.rstrip('\n') + '<->\n'
if fold is False:
fcell += l
return fcell
def preprocess_cell(self, cell, resources, index):
"""
Read out metadata and remove lines if marked as `folded` in cell metadata.
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py)
"""
if hasattr(cell, "input") and cell.cell_type == "code":
if hasattr(cell['metadata'], 'code_folding'):
folded = cell['metadata']['code_folding']
cell.input = self.fold_cell(cell.input, folded)
return cell, resources
|
<commit_before><commit_msg>Add preprocessor for codefolding extension
Thes preprocessor removes folded lines in codecells as defined by the
cell metadata<commit_after>
|
"""This preprocessor removes lines in code cells that have been marked as `folded`
by the codefolding extension
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2014, Juergen Hasch
#
# Distributed under the terms of the Modified BSD License.
#
#-----------------------------------------------------------------------------
from IPython.nbconvert.preprocessors import *
import StringIO
class CodeFoldingPreprocessor(Preprocessor):
def fold_cell(self,cell,folded):
"""
Remove folded lines and add a '<->' at the parent line
"""
f = StringIO.StringIO(cell)
lines = f.readlines()
if folded[0] == 0 and lines[0][0] == '#':
self.log.info("folded: %s, %s" % (folded[0],lines[0][0]))
return lines[0].rstrip('\n') + '<->\n'
fold_indent = 0
fold = False
fcell = ""
for i,l in enumerate(lines):
indent = len(l)-len(l.lstrip(' '))
if indent <= fold_indent:
fold = False
fold_indent = 0
if i in folded:
fold = True
fold_indent = indent
fcell += l.rstrip('\n') + '<->\n'
if fold is False:
fcell += l
return fcell
def preprocess_cell(self, cell, resources, index):
"""
Read out metadata and remove lines if marked as `folded` in cell metadata.
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py)
"""
if hasattr(cell, "input") and cell.cell_type == "code":
if hasattr(cell['metadata'], 'code_folding'):
folded = cell['metadata']['code_folding']
cell.input = self.fold_cell(cell.input, folded)
return cell, resources
|
Add preprocessor for codefolding extension
Thes preprocessor removes folded lines in codecells as defined by the
cell metadata"""This preprocessor removes lines in code cells that have been marked as `folded`
by the codefolding extension
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2014, Juergen Hasch
#
# Distributed under the terms of the Modified BSD License.
#
#-----------------------------------------------------------------------------
from IPython.nbconvert.preprocessors import *
import StringIO
class CodeFoldingPreprocessor(Preprocessor):
def fold_cell(self,cell,folded):
"""
Remove folded lines and add a '<->' at the parent line
"""
f = StringIO.StringIO(cell)
lines = f.readlines()
if folded[0] == 0 and lines[0][0] == '#':
self.log.info("folded: %s, %s" % (folded[0],lines[0][0]))
return lines[0].rstrip('\n') + '<->\n'
fold_indent = 0
fold = False
fcell = ""
for i,l in enumerate(lines):
indent = len(l)-len(l.lstrip(' '))
if indent <= fold_indent:
fold = False
fold_indent = 0
if i in folded:
fold = True
fold_indent = indent
fcell += l.rstrip('\n') + '<->\n'
if fold is False:
fcell += l
return fcell
def preprocess_cell(self, cell, resources, index):
"""
Read out metadata and remove lines if marked as `folded` in cell metadata.
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py)
"""
if hasattr(cell, "input") and cell.cell_type == "code":
if hasattr(cell['metadata'], 'code_folding'):
folded = cell['metadata']['code_folding']
cell.input = self.fold_cell(cell.input, folded)
return cell, resources
|
<commit_before><commit_msg>Add preprocessor for codefolding extension
Thes preprocessor removes folded lines in codecells as defined by the
cell metadata<commit_after>"""This preprocessor removes lines in code cells that have been marked as `folded`
by the codefolding extension
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2014, Juergen Hasch
#
# Distributed under the terms of the Modified BSD License.
#
#-----------------------------------------------------------------------------
from IPython.nbconvert.preprocessors import *
import StringIO
class CodeFoldingPreprocessor(Preprocessor):
def fold_cell(self,cell,folded):
"""
Remove folded lines and add a '<->' at the parent line
"""
f = StringIO.StringIO(cell)
lines = f.readlines()
if folded[0] == 0 and lines[0][0] == '#':
self.log.info("folded: %s, %s" % (folded[0],lines[0][0]))
return lines[0].rstrip('\n') + '<->\n'
fold_indent = 0
fold = False
fcell = ""
for i,l in enumerate(lines):
indent = len(l)-len(l.lstrip(' '))
if indent <= fold_indent:
fold = False
fold_indent = 0
if i in folded:
fold = True
fold_indent = indent
fcell += l.rstrip('\n') + '<->\n'
if fold is False:
fcell += l
return fcell
def preprocess_cell(self, cell, resources, index):
"""
Read out metadata and remove lines if marked as `folded` in cell metadata.
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py)
"""
if hasattr(cell, "input") and cell.cell_type == "code":
if hasattr(cell['metadata'], 'code_folding'):
folded = cell['metadata']['code_folding']
cell.input = self.fold_cell(cell.input, folded)
return cell, resources
|
|
306d4a11681ac34e5241059dc165d69725f46472
|
tests/functional/preview_and_dev/test_notify_api_letter.py
|
tests/functional/preview_and_dev/test_notify_api_letter.py
|
from retry.api import retry_call
from config import Config
from tests.postman import (
send_notification_via_api,
get_notification_by_id_via_api,
NotificationStatuses
)
from tests.test_utils import assert_notification_body, recordtime
@recordtime
def test_send_letter_notification_via_api(profile, seeded_client):
notification_id = send_notification_via_api(
seeded_client, profile.jenkins_build_letter_template_id,
profile.notify_research_letter_contact, 'letter'
)
notification = retry_call(
get_notification_by_id_via_api,
fargs=[seeded_client, notification_id, NotificationStatuses.SENT],
tries=Config.NOTIFICATION_RETRY_TIMES,
delay=Config.NOTIFICATION_RETRY_INTERVAL
)
assert_notification_body(notification_id, notification)
|
Test creating letter notification via API
|
Test creating letter notification via API
|
Python
|
mit
|
alphagov/notifications-functional-tests,alphagov/notifications-functional-tests
|
Test creating letter notification via API
|
from retry.api import retry_call
from config import Config
from tests.postman import (
send_notification_via_api,
get_notification_by_id_via_api,
NotificationStatuses
)
from tests.test_utils import assert_notification_body, recordtime
@recordtime
def test_send_letter_notification_via_api(profile, seeded_client):
notification_id = send_notification_via_api(
seeded_client, profile.jenkins_build_letter_template_id,
profile.notify_research_letter_contact, 'letter'
)
notification = retry_call(
get_notification_by_id_via_api,
fargs=[seeded_client, notification_id, NotificationStatuses.SENT],
tries=Config.NOTIFICATION_RETRY_TIMES,
delay=Config.NOTIFICATION_RETRY_INTERVAL
)
assert_notification_body(notification_id, notification)
|
<commit_before><commit_msg>Test creating letter notification via API<commit_after>
|
from retry.api import retry_call
from config import Config
from tests.postman import (
send_notification_via_api,
get_notification_by_id_via_api,
NotificationStatuses
)
from tests.test_utils import assert_notification_body, recordtime
@recordtime
def test_send_letter_notification_via_api(profile, seeded_client):
notification_id = send_notification_via_api(
seeded_client, profile.jenkins_build_letter_template_id,
profile.notify_research_letter_contact, 'letter'
)
notification = retry_call(
get_notification_by_id_via_api,
fargs=[seeded_client, notification_id, NotificationStatuses.SENT],
tries=Config.NOTIFICATION_RETRY_TIMES,
delay=Config.NOTIFICATION_RETRY_INTERVAL
)
assert_notification_body(notification_id, notification)
|
Test creating letter notification via APIfrom retry.api import retry_call
from config import Config
from tests.postman import (
send_notification_via_api,
get_notification_by_id_via_api,
NotificationStatuses
)
from tests.test_utils import assert_notification_body, recordtime
@recordtime
def test_send_letter_notification_via_api(profile, seeded_client):
notification_id = send_notification_via_api(
seeded_client, profile.jenkins_build_letter_template_id,
profile.notify_research_letter_contact, 'letter'
)
notification = retry_call(
get_notification_by_id_via_api,
fargs=[seeded_client, notification_id, NotificationStatuses.SENT],
tries=Config.NOTIFICATION_RETRY_TIMES,
delay=Config.NOTIFICATION_RETRY_INTERVAL
)
assert_notification_body(notification_id, notification)
|
<commit_before><commit_msg>Test creating letter notification via API<commit_after>from retry.api import retry_call
from config import Config
from tests.postman import (
send_notification_via_api,
get_notification_by_id_via_api,
NotificationStatuses
)
from tests.test_utils import assert_notification_body, recordtime
@recordtime
def test_send_letter_notification_via_api(profile, seeded_client):
notification_id = send_notification_via_api(
seeded_client, profile.jenkins_build_letter_template_id,
profile.notify_research_letter_contact, 'letter'
)
notification = retry_call(
get_notification_by_id_via_api,
fargs=[seeded_client, notification_id, NotificationStatuses.SENT],
tries=Config.NOTIFICATION_RETRY_TIMES,
delay=Config.NOTIFICATION_RETRY_INTERVAL
)
assert_notification_body(notification_id, notification)
|
|
c0d2550e4cfb647b21e6fdde9705fe6e4a76a2df
|
Lambda/Functions/TagEC2Dependencies/tag_ec2_dependencies.py
|
Lambda/Functions/TagEC2Dependencies/tag_ec2_dependencies.py
|
'''
Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS"
BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under the License.
'''
from __future__ import print_function
print('Loading function')
import json, boto3, re
def lambda_handler(event, context):
# print("Received event: \n" + json.dumps(event))
# If CreateTags failed nothing to do
if 'errorCode' in event['detail']:
print('CreateTags failed with error code {} and error message "{}", nothing to do.'
.format(event['detail']['errorCode'], event['detail']['errorMessage']))
return
region = event['detail']['awsRegion']
ec2 = boto3.client('ec2', region_name=region)
instance_ids = []
is_instance = re.compile('i-[0-9a-f]+')
# Run instances may create several instances, then the event will contain
# several instances
for item in event['detail']['requestParameters']['resourcesSet']['items']:
if is_instance.match(item['resourceId']):
instance_ids.append(item['resourceId'])
# check if we were tagging any instances
if len(instance_ids) == 0:
return
tags = []
for tag in event['detail']['requestParameters']['tagSet']['items']:
tags.append({
'Key': tag['key'],
'Value': tag['value']
})
# If the number of created instances then describe instances may be paginated
paginator = ec2.get_paginator('describe_instances')
instances_iterator = paginator.paginate(
DryRun=False,
InstanceIds=instance_ids
)
for page in instances_iterator:
resources = []
for reservation in page['Reservations']:
for instance in reservation['Instances']:
for eni in instance['NetworkInterfaces']:
resources.append(eni['NetworkInterfaceId'])
for volume in instance['BlockDeviceMappings']:
if 'Ebs' in volume:
resources.append(volume['Ebs']['VolumeId'])
print("Tagging resorces for instance ids:\n[{}]".format(', '.join(instance_ids)))
print("Resources to be tagged:\n[{}]".format(', '.join(resources)))
ec2.create_tags(
DryRun=False,
Resources=resources,
Tags=tags
)
return
|
Add tag EC2 dependencies Lambda Python code.
|
Add tag EC2 dependencies Lambda Python code.
|
Python
|
apache-2.0
|
rsavordelli/aws-support-tools,rsavordelli/aws-support-tools,rsavordelli/aws-support-tools,rsavordelli/aws-support-tools,rsavordelli/aws-support-tools,rsavordelli/aws-support-tools
|
Add tag EC2 dependencies Lambda Python code.
|
'''
Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS"
BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under the License.
'''
from __future__ import print_function
print('Loading function')
import json, boto3, re
def lambda_handler(event, context):
# print("Received event: \n" + json.dumps(event))
# If CreateTags failed nothing to do
if 'errorCode' in event['detail']:
print('CreateTags failed with error code {} and error message "{}", nothing to do.'
.format(event['detail']['errorCode'], event['detail']['errorMessage']))
return
region = event['detail']['awsRegion']
ec2 = boto3.client('ec2', region_name=region)
instance_ids = []
is_instance = re.compile('i-[0-9a-f]+')
# Run instances may create several instances, then the event will contain
# several instances
for item in event['detail']['requestParameters']['resourcesSet']['items']:
if is_instance.match(item['resourceId']):
instance_ids.append(item['resourceId'])
# check if we were tagging any instances
if len(instance_ids) == 0:
return
tags = []
for tag in event['detail']['requestParameters']['tagSet']['items']:
tags.append({
'Key': tag['key'],
'Value': tag['value']
})
# If the number of created instances then describe instances may be paginated
paginator = ec2.get_paginator('describe_instances')
instances_iterator = paginator.paginate(
DryRun=False,
InstanceIds=instance_ids
)
for page in instances_iterator:
resources = []
for reservation in page['Reservations']:
for instance in reservation['Instances']:
for eni in instance['NetworkInterfaces']:
resources.append(eni['NetworkInterfaceId'])
for volume in instance['BlockDeviceMappings']:
if 'Ebs' in volume:
resources.append(volume['Ebs']['VolumeId'])
print("Tagging resorces for instance ids:\n[{}]".format(', '.join(instance_ids)))
print("Resources to be tagged:\n[{}]".format(', '.join(resources)))
ec2.create_tags(
DryRun=False,
Resources=resources,
Tags=tags
)
return
|
<commit_before><commit_msg>Add tag EC2 dependencies Lambda Python code.<commit_after>
|
'''
Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS"
BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under the License.
'''
from __future__ import print_function
print('Loading function')
import json, boto3, re
def lambda_handler(event, context):
# print("Received event: \n" + json.dumps(event))
# If CreateTags failed nothing to do
if 'errorCode' in event['detail']:
print('CreateTags failed with error code {} and error message "{}", nothing to do.'
.format(event['detail']['errorCode'], event['detail']['errorMessage']))
return
region = event['detail']['awsRegion']
ec2 = boto3.client('ec2', region_name=region)
instance_ids = []
is_instance = re.compile('i-[0-9a-f]+')
# Run instances may create several instances, then the event will contain
# several instances
for item in event['detail']['requestParameters']['resourcesSet']['items']:
if is_instance.match(item['resourceId']):
instance_ids.append(item['resourceId'])
# check if we were tagging any instances
if len(instance_ids) == 0:
return
tags = []
for tag in event['detail']['requestParameters']['tagSet']['items']:
tags.append({
'Key': tag['key'],
'Value': tag['value']
})
# If the number of created instances then describe instances may be paginated
paginator = ec2.get_paginator('describe_instances')
instances_iterator = paginator.paginate(
DryRun=False,
InstanceIds=instance_ids
)
for page in instances_iterator:
resources = []
for reservation in page['Reservations']:
for instance in reservation['Instances']:
for eni in instance['NetworkInterfaces']:
resources.append(eni['NetworkInterfaceId'])
for volume in instance['BlockDeviceMappings']:
if 'Ebs' in volume:
resources.append(volume['Ebs']['VolumeId'])
print("Tagging resorces for instance ids:\n[{}]".format(', '.join(instance_ids)))
print("Resources to be tagged:\n[{}]".format(', '.join(resources)))
ec2.create_tags(
DryRun=False,
Resources=resources,
Tags=tags
)
return
|
Add tag EC2 dependencies Lambda Python code.'''
Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS"
BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under the License.
'''
from __future__ import print_function
print('Loading function')
import json, boto3, re
def lambda_handler(event, context):
# print("Received event: \n" + json.dumps(event))
# If CreateTags failed nothing to do
if 'errorCode' in event['detail']:
print('CreateTags failed with error code {} and error message "{}", nothing to do.'
.format(event['detail']['errorCode'], event['detail']['errorMessage']))
return
region = event['detail']['awsRegion']
ec2 = boto3.client('ec2', region_name=region)
instance_ids = []
is_instance = re.compile('i-[0-9a-f]+')
# Run instances may create several instances, then the event will contain
# several instances
for item in event['detail']['requestParameters']['resourcesSet']['items']:
if is_instance.match(item['resourceId']):
instance_ids.append(item['resourceId'])
# check if we were tagging any instances
if len(instance_ids) == 0:
return
tags = []
for tag in event['detail']['requestParameters']['tagSet']['items']:
tags.append({
'Key': tag['key'],
'Value': tag['value']
})
# If the number of created instances then describe instances may be paginated
paginator = ec2.get_paginator('describe_instances')
instances_iterator = paginator.paginate(
DryRun=False,
InstanceIds=instance_ids
)
for page in instances_iterator:
resources = []
for reservation in page['Reservations']:
for instance in reservation['Instances']:
for eni in instance['NetworkInterfaces']:
resources.append(eni['NetworkInterfaceId'])
for volume in instance['BlockDeviceMappings']:
if 'Ebs' in volume:
resources.append(volume['Ebs']['VolumeId'])
print("Tagging resorces for instance ids:\n[{}]".format(', '.join(instance_ids)))
print("Resources to be tagged:\n[{}]".format(', '.join(resources)))
ec2.create_tags(
DryRun=False,
Resources=resources,
Tags=tags
)
return
|
<commit_before><commit_msg>Add tag EC2 dependencies Lambda Python code.<commit_after>'''
Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS"
BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under the License.
'''
from __future__ import print_function
print('Loading function')
import json, boto3, re
def lambda_handler(event, context):
# print("Received event: \n" + json.dumps(event))
# If CreateTags failed nothing to do
if 'errorCode' in event['detail']:
print('CreateTags failed with error code {} and error message "{}", nothing to do.'
.format(event['detail']['errorCode'], event['detail']['errorMessage']))
return
region = event['detail']['awsRegion']
ec2 = boto3.client('ec2', region_name=region)
instance_ids = []
is_instance = re.compile('i-[0-9a-f]+')
# Run instances may create several instances, then the event will contain
# several instances
for item in event['detail']['requestParameters']['resourcesSet']['items']:
if is_instance.match(item['resourceId']):
instance_ids.append(item['resourceId'])
# check if we were tagging any instances
if len(instance_ids) == 0:
return
tags = []
for tag in event['detail']['requestParameters']['tagSet']['items']:
tags.append({
'Key': tag['key'],
'Value': tag['value']
})
# If the number of created instances then describe instances may be paginated
paginator = ec2.get_paginator('describe_instances')
instances_iterator = paginator.paginate(
DryRun=False,
InstanceIds=instance_ids
)
for page in instances_iterator:
resources = []
for reservation in page['Reservations']:
for instance in reservation['Instances']:
for eni in instance['NetworkInterfaces']:
resources.append(eni['NetworkInterfaceId'])
for volume in instance['BlockDeviceMappings']:
if 'Ebs' in volume:
resources.append(volume['Ebs']['VolumeId'])
print("Tagging resorces for instance ids:\n[{}]".format(', '.join(instance_ids)))
print("Resources to be tagged:\n[{}]".format(', '.join(resources)))
ec2.create_tags(
DryRun=False,
Resources=resources,
Tags=tags
)
return
|
|
944379d74969a17fa85cb05c5541d3d569764097
|
studygroups/management/commands/fix_course_created_by.py
|
studygroups/management/commands/fix_course_created_by.py
|
from django.core.management.base import BaseCommand, CommandError
from studygroups.models import Course
from django.contrib.auth.models import User
class Command(BaseCommand):
help = 'Add a created_by field to all courses'
def handle(self, *args, **options):
user1 = User.objects.get(pk=1324)
courses = Course.objects.filter(created_by__isnull=True, created_at__gt=user1.date_joined)
courses.update(created_by=user1)
courses = Course.objects.filter(created_by__isnull=True)
courses.update(created_by=User.objects.get(pk=3))
|
Add task to fix created_by for courses without it
|
Add task to fix created_by for courses without it
|
Python
|
mit
|
p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles
|
Add task to fix created_by for courses without it
|
from django.core.management.base import BaseCommand, CommandError
from studygroups.models import Course
from django.contrib.auth.models import User
class Command(BaseCommand):
help = 'Add a created_by field to all courses'
def handle(self, *args, **options):
user1 = User.objects.get(pk=1324)
courses = Course.objects.filter(created_by__isnull=True, created_at__gt=user1.date_joined)
courses.update(created_by=user1)
courses = Course.objects.filter(created_by__isnull=True)
courses.update(created_by=User.objects.get(pk=3))
|
<commit_before><commit_msg>Add task to fix created_by for courses without it<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from studygroups.models import Course
from django.contrib.auth.models import User
class Command(BaseCommand):
help = 'Add a created_by field to all courses'
def handle(self, *args, **options):
user1 = User.objects.get(pk=1324)
courses = Course.objects.filter(created_by__isnull=True, created_at__gt=user1.date_joined)
courses.update(created_by=user1)
courses = Course.objects.filter(created_by__isnull=True)
courses.update(created_by=User.objects.get(pk=3))
|
Add task to fix created_by for courses without itfrom django.core.management.base import BaseCommand, CommandError
from studygroups.models import Course
from django.contrib.auth.models import User
class Command(BaseCommand):
help = 'Add a created_by field to all courses'
def handle(self, *args, **options):
user1 = User.objects.get(pk=1324)
courses = Course.objects.filter(created_by__isnull=True, created_at__gt=user1.date_joined)
courses.update(created_by=user1)
courses = Course.objects.filter(created_by__isnull=True)
courses.update(created_by=User.objects.get(pk=3))
|
<commit_before><commit_msg>Add task to fix created_by for courses without it<commit_after>from django.core.management.base import BaseCommand, CommandError
from studygroups.models import Course
from django.contrib.auth.models import User
class Command(BaseCommand):
help = 'Add a created_by field to all courses'
def handle(self, *args, **options):
user1 = User.objects.get(pk=1324)
courses = Course.objects.filter(created_by__isnull=True, created_at__gt=user1.date_joined)
courses.update(created_by=user1)
courses = Course.objects.filter(created_by__isnull=True)
courses.update(created_by=User.objects.get(pk=3))
|
|
bf7f3dfa737c01a441064d4dad0be3978fe1e2c5
|
jetson/networkTable.py
|
jetson/networkTable.py
|
import time
from networktables import NetworkTables
rioIP = '10.58.06.2' #this shouldn't change
tableName = 'JetsonToRio' #should be same in rio's java NT program
updateRateSecs = 1
#initialize Jetson as client to the roborio server
NetworkTables.initialize(server=rioIP)
table = NetworkTables.getTable(tableName)
while True:
#assuming for now that the opencv script returns an array of 4 ints
visionOutputs = [1,2,3,4] #fill this in later
table.putNumberArray("jetsonVals", visionOutputs)
time.sleep(updateRateSecs)
|
Add @njk345's initial work on networktables
|
Add @njk345's initial work on networktables
|
Python
|
mit
|
frc5806/Steamworks,frc5806/Steamworks,frc5806/Steamworks,frc5806/Steamworks
|
Add @njk345's initial work on networktables
|
import time
from networktables import NetworkTables
rioIP = '10.58.06.2' #this shouldn't change
tableName = 'JetsonToRio' #should be same in rio's java NT program
updateRateSecs = 1
#initialize Jetson as client to the roborio server
NetworkTables.initialize(server=rioIP)
table = NetworkTables.getTable(tableName)
while True:
#assuming for now that the opencv script returns an array of 4 ints
visionOutputs = [1,2,3,4] #fill this in later
table.putNumberArray("jetsonVals", visionOutputs)
time.sleep(updateRateSecs)
|
<commit_before><commit_msg>Add @njk345's initial work on networktables<commit_after>
|
import time
from networktables import NetworkTables
rioIP = '10.58.06.2' #this shouldn't change
tableName = 'JetsonToRio' #should be same in rio's java NT program
updateRateSecs = 1
#initialize Jetson as client to the roborio server
NetworkTables.initialize(server=rioIP)
table = NetworkTables.getTable(tableName)
while True:
#assuming for now that the opencv script returns an array of 4 ints
visionOutputs = [1,2,3,4] #fill this in later
table.putNumberArray("jetsonVals", visionOutputs)
time.sleep(updateRateSecs)
|
Add @njk345's initial work on networktablesimport time
from networktables import NetworkTables
rioIP = '10.58.06.2' #this shouldn't change
tableName = 'JetsonToRio' #should be same in rio's java NT program
updateRateSecs = 1
#initialize Jetson as client to the roborio server
NetworkTables.initialize(server=rioIP)
table = NetworkTables.getTable(tableName)
while True:
#assuming for now that the opencv script returns an array of 4 ints
visionOutputs = [1,2,3,4] #fill this in later
table.putNumberArray("jetsonVals", visionOutputs)
time.sleep(updateRateSecs)
|
<commit_before><commit_msg>Add @njk345's initial work on networktables<commit_after>import time
from networktables import NetworkTables
rioIP = '10.58.06.2' #this shouldn't change
tableName = 'JetsonToRio' #should be same in rio's java NT program
updateRateSecs = 1
#initialize Jetson as client to the roborio server
NetworkTables.initialize(server=rioIP)
table = NetworkTables.getTable(tableName)
while True:
#assuming for now that the opencv script returns an array of 4 ints
visionOutputs = [1,2,3,4] #fill this in later
table.putNumberArray("jetsonVals", visionOutputs)
time.sleep(updateRateSecs)
|
|
16104da5676ea7f62bd85c69df9afe5305125c79
|
kitnirc/contrib/freenode.py
|
kitnirc/contrib/freenode.py
|
import logging
from kitnirc.modular import Module
_log = logging.getLogger(__name__)
class FreenodeModule(Module):
"""A KitnIRC module which provides Freenode-specific functionality.
Freenode is irc.freenode.net and runs ircd-seven, a Freenode-specific
branch of the charybdis ircd. Functionality provided by this module:
1. If the configuration has a 'password' field in the [nickserv]
section, it will send that password automatically to the server.
2. Sets usermode 'Q' (no forwarding), to prevent the bot from being
forwarded to a different channel than the one it was told to join.
Bots almost never want to be in a channel they didn't specify.
"""
@Module.handle("WELCOME")
def set_no_forwarding(self, client, *args):
client.send("MODE", client.user.nick, "+Q")
@Module.handle("PASSWORD")
def nickserv_password(self, client, *args):
# Freenode will pass through the server password to NickServ,
# skipping the need to send a password via PRIVMSG. If the bot
# has an account under a different name than its nick, you can
# use accountnick:password as the value of the password field.
if self.controller.config.has_option("nickserv", "password"):
# Sending directly via the socket, to avoid logging
password = self.controller.config.get("nickserv", "password")
_log.info("Sending NickServ password...")
client.socket.send("PASS %s\r\n" % password)
module = FreenodeModule
# vim: set ts=4 sts=4 sw=4 et:
|
Add the Freenode module to contrib.
|
Add the Freenode module to contrib.
|
Python
|
mit
|
ayust/kitnirc
|
Add the Freenode module to contrib.
|
import logging
from kitnirc.modular import Module
_log = logging.getLogger(__name__)
class FreenodeModule(Module):
"""A KitnIRC module which provides Freenode-specific functionality.
Freenode is irc.freenode.net and runs ircd-seven, a Freenode-specific
branch of the charybdis ircd. Functionality provided by this module:
1. If the configuration has a 'password' field in the [nickserv]
section, it will send that password automatically to the server.
2. Sets usermode 'Q' (no forwarding), to prevent the bot from being
forwarded to a different channel than the one it was told to join.
Bots almost never want to be in a channel they didn't specify.
"""
@Module.handle("WELCOME")
def set_no_forwarding(self, client, *args):
client.send("MODE", client.user.nick, "+Q")
@Module.handle("PASSWORD")
def nickserv_password(self, client, *args):
# Freenode will pass through the server password to NickServ,
# skipping the need to send a password via PRIVMSG. If the bot
# has an account under a different name than its nick, you can
# use accountnick:password as the value of the password field.
if self.controller.config.has_option("nickserv", "password"):
# Sending directly via the socket, to avoid logging
password = self.controller.config.get("nickserv", "password")
_log.info("Sending NickServ password...")
client.socket.send("PASS %s\r\n" % password)
module = FreenodeModule
# vim: set ts=4 sts=4 sw=4 et:
|
<commit_before><commit_msg>Add the Freenode module to contrib.<commit_after>
|
import logging
from kitnirc.modular import Module
_log = logging.getLogger(__name__)
class FreenodeModule(Module):
"""A KitnIRC module which provides Freenode-specific functionality.
Freenode is irc.freenode.net and runs ircd-seven, a Freenode-specific
branch of the charybdis ircd. Functionality provided by this module:
1. If the configuration has a 'password' field in the [nickserv]
section, it will send that password automatically to the server.
2. Sets usermode 'Q' (no forwarding), to prevent the bot from being
forwarded to a different channel than the one it was told to join.
Bots almost never want to be in a channel they didn't specify.
"""
@Module.handle("WELCOME")
def set_no_forwarding(self, client, *args):
client.send("MODE", client.user.nick, "+Q")
@Module.handle("PASSWORD")
def nickserv_password(self, client, *args):
# Freenode will pass through the server password to NickServ,
# skipping the need to send a password via PRIVMSG. If the bot
# has an account under a different name than its nick, you can
# use accountnick:password as the value of the password field.
if self.controller.config.has_option("nickserv", "password"):
# Sending directly via the socket, to avoid logging
password = self.controller.config.get("nickserv", "password")
_log.info("Sending NickServ password...")
client.socket.send("PASS %s\r\n" % password)
module = FreenodeModule
# vim: set ts=4 sts=4 sw=4 et:
|
Add the Freenode module to contrib.import logging
from kitnirc.modular import Module
_log = logging.getLogger(__name__)
class FreenodeModule(Module):
"""A KitnIRC module which provides Freenode-specific functionality.
Freenode is irc.freenode.net and runs ircd-seven, a Freenode-specific
branch of the charybdis ircd. Functionality provided by this module:
1. If the configuration has a 'password' field in the [nickserv]
section, it will send that password automatically to the server.
2. Sets usermode 'Q' (no forwarding), to prevent the bot from being
forwarded to a different channel than the one it was told to join.
Bots almost never want to be in a channel they didn't specify.
"""
@Module.handle("WELCOME")
def set_no_forwarding(self, client, *args):
client.send("MODE", client.user.nick, "+Q")
@Module.handle("PASSWORD")
def nickserv_password(self, client, *args):
# Freenode will pass through the server password to NickServ,
# skipping the need to send a password via PRIVMSG. If the bot
# has an account under a different name than its nick, you can
# use accountnick:password as the value of the password field.
if self.controller.config.has_option("nickserv", "password"):
# Sending directly via the socket, to avoid logging
password = self.controller.config.get("nickserv", "password")
_log.info("Sending NickServ password...")
client.socket.send("PASS %s\r\n" % password)
module = FreenodeModule
# vim: set ts=4 sts=4 sw=4 et:
|
<commit_before><commit_msg>Add the Freenode module to contrib.<commit_after>import logging
from kitnirc.modular import Module
_log = logging.getLogger(__name__)
class FreenodeModule(Module):
"""A KitnIRC module which provides Freenode-specific functionality.
Freenode is irc.freenode.net and runs ircd-seven, a Freenode-specific
branch of the charybdis ircd. Functionality provided by this module:
1. If the configuration has a 'password' field in the [nickserv]
section, it will send that password automatically to the server.
2. Sets usermode 'Q' (no forwarding), to prevent the bot from being
forwarded to a different channel than the one it was told to join.
Bots almost never want to be in a channel they didn't specify.
"""
@Module.handle("WELCOME")
def set_no_forwarding(self, client, *args):
client.send("MODE", client.user.nick, "+Q")
@Module.handle("PASSWORD")
def nickserv_password(self, client, *args):
# Freenode will pass through the server password to NickServ,
# skipping the need to send a password via PRIVMSG. If the bot
# has an account under a different name than its nick, you can
# use accountnick:password as the value of the password field.
if self.controller.config.has_option("nickserv", "password"):
# Sending directly via the socket, to avoid logging
password = self.controller.config.get("nickserv", "password")
_log.info("Sending NickServ password...")
client.socket.send("PASS %s\r\n" % password)
module = FreenodeModule
# vim: set ts=4 sts=4 sw=4 et:
|
|
81467f6cfd238dcfea5b4c60954bb8584f5d659d
|
files/scripts/replacefilesinalfresco.py
|
files/scripts/replacefilesinalfresco.py
|
import os, shutil
def copyFile(src, dest):
try:
shutil.copy(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
print('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
print('Error: %s' % e.strerror)
sourcefavicon = "/opt/alfresco/tomcat/webapps/share/themes/beeTheme/images/favicon.ico"
replacefavicon = ["/opt/alfresco/tomcat/webapps/ROOT/favicon.ico",
"/opt/alfresco/tomcat/webapps/alfresco/favicon.ico",
"/opt/alfresco/tomcat/webapps/alfresco/images/logo/AlfrescoLogo16.ico",
"/opt/alfresco/tomcat/webapps/share/favicon.ico",
"/opt/alfresco/tomcat/webapps/solr4/favicon.ico",
"/opt/alfresco/tomcat/webapps/solr4/img/favicon.ico"]
for favicon in replacefavicon:
copyFile( sourcefavicon, favicon )
fd = os.open(favicon,os.O_RDONLY)
os.fchown( fd, 501, 501)
os.close(fd)
sourcelogo = "/opt/alfresco/tomcat/webapps/share/themes/beeTheme/images/alfresco-share-logo.png"
replacelogo = ["/opt/alfresco/tomcat/webapps/share/components/images/alfresco-share-logo.png"]
for logo in replacelogo:
copyFile( sourcelogo, logo )
fd = os.open(logo,os.O_RDONLY)
os.fchown( fd, 501, 501)
os.close(fd)
|
Add script to sort graphics
|
Add script to sort graphics
|
Python
|
mit
|
marsbard/puppet-alfresco,marsbard/puppet-alfresco,digcat/puppet-alfresco,digcat/puppet-alfresco,marsbard/puppet-alfresco,marsbard/puppet-alfresco,digcat/puppet-alfresco,digcat/puppet-alfresco,marsbard/puppet-alfresco,digcat/puppet-alfresco
|
Add script to sort graphics
|
import os, shutil
def copyFile(src, dest):
try:
shutil.copy(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
print('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
print('Error: %s' % e.strerror)
sourcefavicon = "/opt/alfresco/tomcat/webapps/share/themes/beeTheme/images/favicon.ico"
replacefavicon = ["/opt/alfresco/tomcat/webapps/ROOT/favicon.ico",
"/opt/alfresco/tomcat/webapps/alfresco/favicon.ico",
"/opt/alfresco/tomcat/webapps/alfresco/images/logo/AlfrescoLogo16.ico",
"/opt/alfresco/tomcat/webapps/share/favicon.ico",
"/opt/alfresco/tomcat/webapps/solr4/favicon.ico",
"/opt/alfresco/tomcat/webapps/solr4/img/favicon.ico"]
for favicon in replacefavicon:
copyFile( sourcefavicon, favicon )
fd = os.open(favicon,os.O_RDONLY)
os.fchown( fd, 501, 501)
os.close(fd)
sourcelogo = "/opt/alfresco/tomcat/webapps/share/themes/beeTheme/images/alfresco-share-logo.png"
replacelogo = ["/opt/alfresco/tomcat/webapps/share/components/images/alfresco-share-logo.png"]
for logo in replacelogo:
copyFile( sourcelogo, logo )
fd = os.open(logo,os.O_RDONLY)
os.fchown( fd, 501, 501)
os.close(fd)
|
<commit_before><commit_msg>Add script to sort graphics<commit_after>
|
import os, shutil
def copyFile(src, dest):
try:
shutil.copy(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
print('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
print('Error: %s' % e.strerror)
sourcefavicon = "/opt/alfresco/tomcat/webapps/share/themes/beeTheme/images/favicon.ico"
replacefavicon = ["/opt/alfresco/tomcat/webapps/ROOT/favicon.ico",
"/opt/alfresco/tomcat/webapps/alfresco/favicon.ico",
"/opt/alfresco/tomcat/webapps/alfresco/images/logo/AlfrescoLogo16.ico",
"/opt/alfresco/tomcat/webapps/share/favicon.ico",
"/opt/alfresco/tomcat/webapps/solr4/favicon.ico",
"/opt/alfresco/tomcat/webapps/solr4/img/favicon.ico"]
for favicon in replacefavicon:
copyFile( sourcefavicon, favicon )
fd = os.open(favicon,os.O_RDONLY)
os.fchown( fd, 501, 501)
os.close(fd)
sourcelogo = "/opt/alfresco/tomcat/webapps/share/themes/beeTheme/images/alfresco-share-logo.png"
replacelogo = ["/opt/alfresco/tomcat/webapps/share/components/images/alfresco-share-logo.png"]
for logo in replacelogo:
copyFile( sourcelogo, logo )
fd = os.open(logo,os.O_RDONLY)
os.fchown( fd, 501, 501)
os.close(fd)
|
Add script to sort graphicsimport os, shutil
def copyFile(src, dest):
try:
shutil.copy(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
print('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
print('Error: %s' % e.strerror)
sourcefavicon = "/opt/alfresco/tomcat/webapps/share/themes/beeTheme/images/favicon.ico"
replacefavicon = ["/opt/alfresco/tomcat/webapps/ROOT/favicon.ico",
"/opt/alfresco/tomcat/webapps/alfresco/favicon.ico",
"/opt/alfresco/tomcat/webapps/alfresco/images/logo/AlfrescoLogo16.ico",
"/opt/alfresco/tomcat/webapps/share/favicon.ico",
"/opt/alfresco/tomcat/webapps/solr4/favicon.ico",
"/opt/alfresco/tomcat/webapps/solr4/img/favicon.ico"]
for favicon in replacefavicon:
copyFile( sourcefavicon, favicon )
fd = os.open(favicon,os.O_RDONLY)
os.fchown( fd, 501, 501)
os.close(fd)
sourcelogo = "/opt/alfresco/tomcat/webapps/share/themes/beeTheme/images/alfresco-share-logo.png"
replacelogo = ["/opt/alfresco/tomcat/webapps/share/components/images/alfresco-share-logo.png"]
for logo in replacelogo:
copyFile( sourcelogo, logo )
fd = os.open(logo,os.O_RDONLY)
os.fchown( fd, 501, 501)
os.close(fd)
|
<commit_before><commit_msg>Add script to sort graphics<commit_after>import os, shutil
def copyFile(src, dest):
try:
shutil.copy(src, dest)
# eg. src and dest are the same file
except shutil.Error as e:
print('Error: %s' % e)
# eg. source or destination doesn't exist
except IOError as e:
print('Error: %s' % e.strerror)
sourcefavicon = "/opt/alfresco/tomcat/webapps/share/themes/beeTheme/images/favicon.ico"
replacefavicon = ["/opt/alfresco/tomcat/webapps/ROOT/favicon.ico",
"/opt/alfresco/tomcat/webapps/alfresco/favicon.ico",
"/opt/alfresco/tomcat/webapps/alfresco/images/logo/AlfrescoLogo16.ico",
"/opt/alfresco/tomcat/webapps/share/favicon.ico",
"/opt/alfresco/tomcat/webapps/solr4/favicon.ico",
"/opt/alfresco/tomcat/webapps/solr4/img/favicon.ico"]
for favicon in replacefavicon:
copyFile( sourcefavicon, favicon )
fd = os.open(favicon,os.O_RDONLY)
os.fchown( fd, 501, 501)
os.close(fd)
sourcelogo = "/opt/alfresco/tomcat/webapps/share/themes/beeTheme/images/alfresco-share-logo.png"
replacelogo = ["/opt/alfresco/tomcat/webapps/share/components/images/alfresco-share-logo.png"]
for logo in replacelogo:
copyFile( sourcelogo, logo )
fd = os.open(logo,os.O_RDONLY)
os.fchown( fd, 501, 501)
os.close(fd)
|
|
9f087d3a25839719b080ed1170084196a8e476bd
|
main.py
|
main.py
|
from ctypes import *
SOFT_DOG_DATA_SIZE = 256
SOFT_DOG_LIBRARY = "win32dll.dll"
def LOGE(message):
print message
def LOGI(message):
print message
def checkSoftDogLibraryIsExist():
LOGI(SOFT_DOG_LIBRARY + " is exist.")
return True
def _readSoftDogData(outBuffer):
# TODO
# Check SOFT_DOG_LIBRARY if is exits.
if not checkSoftDogLibraryIsExist():
return False
windll = WinDLL(SOFT_DOG_LIBRARY)
if not windll:
LOGI("Load soft dog library: " + SOFT_DOG_LIBRARY + " failed!!!")
return False
LOGE("Load soft dog library: " + SOFT_DOG_LIBRARY + " Success.")
if not windll.DogRead:
LOGE("Can not find function entry: DogRead")
return False
dataBytes = c_ulong(90)
readAddr = c_ulong(0)
windll.DogRead.argtypes = [c_ulong, c_ulong, c_char_p]
windll.DogRead(dataBytes, readAddr, outBuffer)
return True
def readSoftDogData():
softDogData = ""
outBuffer = (c_char * SOFT_DOG_DATA_SIZE)('\0')
if _readSoftDogData(outBuffer):
for item in outBuffer:
if item == '\0':
break
softDogData = softDogData + item
return softDogData
print readSoftDogData()
|
Add read soft dog data function.
|
Add read soft dog data function.
Signed-off-by: Yunchao Chen <c67c666c8087d4aedc12b7d32092a3c04fc5fd4b@xiaoyezi.com>
|
Python
|
mit
|
iiiCode/network-diagnostic-tools
|
Add read soft dog data function.
Signed-off-by: Yunchao Chen <c67c666c8087d4aedc12b7d32092a3c04fc5fd4b@xiaoyezi.com>
|
from ctypes import *
SOFT_DOG_DATA_SIZE = 256
SOFT_DOG_LIBRARY = "win32dll.dll"
def LOGE(message):
print message
def LOGI(message):
print message
def checkSoftDogLibraryIsExist():
LOGI(SOFT_DOG_LIBRARY + " is exist.")
return True
def _readSoftDogData(outBuffer):
# TODO
# Check SOFT_DOG_LIBRARY if is exits.
if not checkSoftDogLibraryIsExist():
return False
windll = WinDLL(SOFT_DOG_LIBRARY)
if not windll:
LOGI("Load soft dog library: " + SOFT_DOG_LIBRARY + " failed!!!")
return False
LOGE("Load soft dog library: " + SOFT_DOG_LIBRARY + " Success.")
if not windll.DogRead:
LOGE("Can not find function entry: DogRead")
return False
dataBytes = c_ulong(90)
readAddr = c_ulong(0)
windll.DogRead.argtypes = [c_ulong, c_ulong, c_char_p]
windll.DogRead(dataBytes, readAddr, outBuffer)
return True
def readSoftDogData():
softDogData = ""
outBuffer = (c_char * SOFT_DOG_DATA_SIZE)('\0')
if _readSoftDogData(outBuffer):
for item in outBuffer:
if item == '\0':
break
softDogData = softDogData + item
return softDogData
print readSoftDogData()
|
<commit_before><commit_msg>Add read soft dog data function.
Signed-off-by: Yunchao Chen <c67c666c8087d4aedc12b7d32092a3c04fc5fd4b@xiaoyezi.com><commit_after>
|
from ctypes import *
SOFT_DOG_DATA_SIZE = 256
SOFT_DOG_LIBRARY = "win32dll.dll"
def LOGE(message):
print message
def LOGI(message):
print message
def checkSoftDogLibraryIsExist():
LOGI(SOFT_DOG_LIBRARY + " is exist.")
return True
def _readSoftDogData(outBuffer):
# TODO
# Check SOFT_DOG_LIBRARY if is exits.
if not checkSoftDogLibraryIsExist():
return False
windll = WinDLL(SOFT_DOG_LIBRARY)
if not windll:
LOGI("Load soft dog library: " + SOFT_DOG_LIBRARY + " failed!!!")
return False
LOGE("Load soft dog library: " + SOFT_DOG_LIBRARY + " Success.")
if not windll.DogRead:
LOGE("Can not find function entry: DogRead")
return False
dataBytes = c_ulong(90)
readAddr = c_ulong(0)
windll.DogRead.argtypes = [c_ulong, c_ulong, c_char_p]
windll.DogRead(dataBytes, readAddr, outBuffer)
return True
def readSoftDogData():
softDogData = ""
outBuffer = (c_char * SOFT_DOG_DATA_SIZE)('\0')
if _readSoftDogData(outBuffer):
for item in outBuffer:
if item == '\0':
break
softDogData = softDogData + item
return softDogData
print readSoftDogData()
|
Add read soft dog data function.
Signed-off-by: Yunchao Chen <c67c666c8087d4aedc12b7d32092a3c04fc5fd4b@xiaoyezi.com>from ctypes import *
SOFT_DOG_DATA_SIZE = 256
SOFT_DOG_LIBRARY = "win32dll.dll"
def LOGE(message):
print message
def LOGI(message):
print message
def checkSoftDogLibraryIsExist():
LOGI(SOFT_DOG_LIBRARY + " is exist.")
return True
def _readSoftDogData(outBuffer):
# TODO
# Check SOFT_DOG_LIBRARY if is exits.
if not checkSoftDogLibraryIsExist():
return False
windll = WinDLL(SOFT_DOG_LIBRARY)
if not windll:
LOGI("Load soft dog library: " + SOFT_DOG_LIBRARY + " failed!!!")
return False
LOGE("Load soft dog library: " + SOFT_DOG_LIBRARY + " Success.")
if not windll.DogRead:
LOGE("Can not find function entry: DogRead")
return False
dataBytes = c_ulong(90)
readAddr = c_ulong(0)
windll.DogRead.argtypes = [c_ulong, c_ulong, c_char_p]
windll.DogRead(dataBytes, readAddr, outBuffer)
return True
def readSoftDogData():
softDogData = ""
outBuffer = (c_char * SOFT_DOG_DATA_SIZE)('\0')
if _readSoftDogData(outBuffer):
for item in outBuffer:
if item == '\0':
break
softDogData = softDogData + item
return softDogData
print readSoftDogData()
|
<commit_before><commit_msg>Add read soft dog data function.
Signed-off-by: Yunchao Chen <c67c666c8087d4aedc12b7d32092a3c04fc5fd4b@xiaoyezi.com><commit_after>from ctypes import *
SOFT_DOG_DATA_SIZE = 256
SOFT_DOG_LIBRARY = "win32dll.dll"
def LOGE(message):
print message
def LOGI(message):
print message
def checkSoftDogLibraryIsExist():
LOGI(SOFT_DOG_LIBRARY + " is exist.")
return True
def _readSoftDogData(outBuffer):
# TODO
# Check SOFT_DOG_LIBRARY if is exits.
if not checkSoftDogLibraryIsExist():
return False
windll = WinDLL(SOFT_DOG_LIBRARY)
if not windll:
LOGI("Load soft dog library: " + SOFT_DOG_LIBRARY + " failed!!!")
return False
LOGE("Load soft dog library: " + SOFT_DOG_LIBRARY + " Success.")
if not windll.DogRead:
LOGE("Can not find function entry: DogRead")
return False
dataBytes = c_ulong(90)
readAddr = c_ulong(0)
windll.DogRead.argtypes = [c_ulong, c_ulong, c_char_p]
windll.DogRead(dataBytes, readAddr, outBuffer)
return True
def readSoftDogData():
softDogData = ""
outBuffer = (c_char * SOFT_DOG_DATA_SIZE)('\0')
if _readSoftDogData(outBuffer):
for item in outBuffer:
if item == '\0':
break
softDogData = softDogData + item
return softDogData
print readSoftDogData()
|
|
3512a5cf05a1063375ad394d79ce6824fe4c66ef
|
src/ggrc/migrations/versions/20150611124244_1d1e9807c46c_drop_is_enabled_column.py
|
src/ggrc/migrations/versions/20150611124244_1d1e9807c46c_drop_is_enabled_column.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Drop is_enabled column
Revision ID: 1d1e9807c46c
Revises: 3261848aaa2b
Create Date: 2015-06-11 12:42:44.520839
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '1d1e9807c46c'
down_revision = '3261848aaa2b'
def upgrade():
op.drop_column('people', 'is_enabled')
def downgrade():
op.add_column(
'people',
sa.Column('is_enabled',
mysql.TINYINT(display_width=1),
autoincrement=False, nullable=False))
|
Drop is_enabled column from people table
|
Drop is_enabled column from people table
|
Python
|
apache-2.0
|
andrei-karalionak/ggrc-core,hyperNURb/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,selahssea/ggrc-core,hasanalom/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,jmakov/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,hasanalom/ggrc-core,hyperNURb/ggrc-core,AleksNeStu/ggrc-core,hasanalom/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,hasanalom/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,hyperNURb/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,prasannav7/ggrc-core
|
Drop is_enabled column from people table
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Drop is_enabled column
Revision ID: 1d1e9807c46c
Revises: 3261848aaa2b
Create Date: 2015-06-11 12:42:44.520839
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '1d1e9807c46c'
down_revision = '3261848aaa2b'
def upgrade():
op.drop_column('people', 'is_enabled')
def downgrade():
op.add_column(
'people',
sa.Column('is_enabled',
mysql.TINYINT(display_width=1),
autoincrement=False, nullable=False))
|
<commit_before><commit_msg>Drop is_enabled column from people table<commit_after>
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Drop is_enabled column
Revision ID: 1d1e9807c46c
Revises: 3261848aaa2b
Create Date: 2015-06-11 12:42:44.520839
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '1d1e9807c46c'
down_revision = '3261848aaa2b'
def upgrade():
op.drop_column('people', 'is_enabled')
def downgrade():
op.add_column(
'people',
sa.Column('is_enabled',
mysql.TINYINT(display_width=1),
autoincrement=False, nullable=False))
|
Drop is_enabled column from people table# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Drop is_enabled column
Revision ID: 1d1e9807c46c
Revises: 3261848aaa2b
Create Date: 2015-06-11 12:42:44.520839
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '1d1e9807c46c'
down_revision = '3261848aaa2b'
def upgrade():
op.drop_column('people', 'is_enabled')
def downgrade():
op.add_column(
'people',
sa.Column('is_enabled',
mysql.TINYINT(display_width=1),
autoincrement=False, nullable=False))
|
<commit_before><commit_msg>Drop is_enabled column from people table<commit_after># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Drop is_enabled column
Revision ID: 1d1e9807c46c
Revises: 3261848aaa2b
Create Date: 2015-06-11 12:42:44.520839
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '1d1e9807c46c'
down_revision = '3261848aaa2b'
def upgrade():
op.drop_column('people', 'is_enabled')
def downgrade():
op.add_column(
'people',
sa.Column('is_enabled',
mysql.TINYINT(display_width=1),
autoincrement=False, nullable=False))
|
|
bb1ce3c92de3ad1390034db2c916d1b82e65c70c
|
project/user/tests/test_login_manager.py
|
project/user/tests/test_login_manager.py
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
from project.tests.base import UtilsTestCase
import base64
import project.home.login_manager as lm
class LoginManagerUtilsTestCase(UtilsTestCase):
def setUp(self):
super(LoginManagerUtilsTestCase, self).setUp()
self.user = self.create_user(is_admin=True)
def test_load_user(self):
"""
Ensure load_user method is properly working
"""
# test: invalid user id arguments
self.assertFalse(lm.load_user(None))
self.assertFalse(lm.load_user("None"))
self.assertFalse(lm.load_user(""))
self.assertFalse(lm.load_user("?"))
# test: valid user id
self.assertEqual(self.user, lm.load_user(self.user.id))
def test_load_user_from_request(self):
"""
Ensure load_user_from_request is working fine either for authentication by api_key and Basic
"""
# test: invalid arguments
reqctx = self.app.test_request_context("?", headers=[])
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: invalid api key
reqctx = self.app.test_request_context("?api_key=1234")
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid api key
reqctx = self.app.test_request_context("?api_key={}".format(self.user.api_key))
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
# test: invalid auth Basic String
reqctx = self.app.test_request_context(headers={'Authorization': b"lol?"})
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid auth Basic but invalid base64 user:password
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic lol?"})
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid auth Basic and valid base64 api_key
api_key = base64.b64encode(bytes(self.user.api_key, 'utf8'))
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic " + api_key})
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
# test: valid auth Basic and valid base64 userpassword
userpass = base64.b64encode(b"username:password")
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic " + userpass})
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
|
Refactor location of tests for login_manager.py
|
Refactor location of tests for login_manager.py
|
Python
|
mit
|
andreffs18/flask-template-project,andreffs18/flask-template-project,andreffs18/flask-template-project
|
Refactor location of tests for login_manager.py
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
from project.tests.base import UtilsTestCase
import base64
import project.home.login_manager as lm
class LoginManagerUtilsTestCase(UtilsTestCase):
def setUp(self):
super(LoginManagerUtilsTestCase, self).setUp()
self.user = self.create_user(is_admin=True)
def test_load_user(self):
"""
Ensure load_user method is properly working
"""
# test: invalid user id arguments
self.assertFalse(lm.load_user(None))
self.assertFalse(lm.load_user("None"))
self.assertFalse(lm.load_user(""))
self.assertFalse(lm.load_user("?"))
# test: valid user id
self.assertEqual(self.user, lm.load_user(self.user.id))
def test_load_user_from_request(self):
"""
Ensure load_user_from_request is working fine either for authentication by api_key and Basic
"""
# test: invalid arguments
reqctx = self.app.test_request_context("?", headers=[])
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: invalid api key
reqctx = self.app.test_request_context("?api_key=1234")
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid api key
reqctx = self.app.test_request_context("?api_key={}".format(self.user.api_key))
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
# test: invalid auth Basic String
reqctx = self.app.test_request_context(headers={'Authorization': b"lol?"})
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid auth Basic but invalid base64 user:password
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic lol?"})
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid auth Basic and valid base64 api_key
api_key = base64.b64encode(bytes(self.user.api_key, 'utf8'))
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic " + api_key})
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
# test: valid auth Basic and valid base64 userpassword
userpass = base64.b64encode(b"username:password")
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic " + userpass})
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
|
<commit_before><commit_msg>Refactor location of tests for login_manager.py<commit_after>
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
from project.tests.base import UtilsTestCase
import base64
import project.home.login_manager as lm
class LoginManagerUtilsTestCase(UtilsTestCase):
def setUp(self):
super(LoginManagerUtilsTestCase, self).setUp()
self.user = self.create_user(is_admin=True)
def test_load_user(self):
"""
Ensure load_user method is properly working
"""
# test: invalid user id arguments
self.assertFalse(lm.load_user(None))
self.assertFalse(lm.load_user("None"))
self.assertFalse(lm.load_user(""))
self.assertFalse(lm.load_user("?"))
# test: valid user id
self.assertEqual(self.user, lm.load_user(self.user.id))
def test_load_user_from_request(self):
"""
Ensure load_user_from_request is working fine either for authentication by api_key and Basic
"""
# test: invalid arguments
reqctx = self.app.test_request_context("?", headers=[])
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: invalid api key
reqctx = self.app.test_request_context("?api_key=1234")
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid api key
reqctx = self.app.test_request_context("?api_key={}".format(self.user.api_key))
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
# test: invalid auth Basic String
reqctx = self.app.test_request_context(headers={'Authorization': b"lol?"})
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid auth Basic but invalid base64 user:password
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic lol?"})
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid auth Basic and valid base64 api_key
api_key = base64.b64encode(bytes(self.user.api_key, 'utf8'))
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic " + api_key})
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
# test: valid auth Basic and valid base64 userpassword
userpass = base64.b64encode(b"username:password")
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic " + userpass})
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
|
Refactor location of tests for login_manager.py# !/usr/bin/python
# -*- coding: utf-8 -*-
from project.tests.base import UtilsTestCase
import base64
import project.home.login_manager as lm
class LoginManagerUtilsTestCase(UtilsTestCase):
def setUp(self):
super(LoginManagerUtilsTestCase, self).setUp()
self.user = self.create_user(is_admin=True)
def test_load_user(self):
"""
Ensure load_user method is properly working
"""
# test: invalid user id arguments
self.assertFalse(lm.load_user(None))
self.assertFalse(lm.load_user("None"))
self.assertFalse(lm.load_user(""))
self.assertFalse(lm.load_user("?"))
# test: valid user id
self.assertEqual(self.user, lm.load_user(self.user.id))
def test_load_user_from_request(self):
"""
Ensure load_user_from_request is working fine either for authentication by api_key and Basic
"""
# test: invalid arguments
reqctx = self.app.test_request_context("?", headers=[])
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: invalid api key
reqctx = self.app.test_request_context("?api_key=1234")
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid api key
reqctx = self.app.test_request_context("?api_key={}".format(self.user.api_key))
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
# test: invalid auth Basic String
reqctx = self.app.test_request_context(headers={'Authorization': b"lol?"})
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid auth Basic but invalid base64 user:password
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic lol?"})
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid auth Basic and valid base64 api_key
api_key = base64.b64encode(bytes(self.user.api_key, 'utf8'))
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic " + api_key})
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
# test: valid auth Basic and valid base64 userpassword
userpass = base64.b64encode(b"username:password")
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic " + userpass})
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
|
<commit_before><commit_msg>Refactor location of tests for login_manager.py<commit_after># !/usr/bin/python
# -*- coding: utf-8 -*-
from project.tests.base import UtilsTestCase
import base64
import project.home.login_manager as lm
class LoginManagerUtilsTestCase(UtilsTestCase):
def setUp(self):
super(LoginManagerUtilsTestCase, self).setUp()
self.user = self.create_user(is_admin=True)
def test_load_user(self):
"""
Ensure load_user method is properly working
"""
# test: invalid user id arguments
self.assertFalse(lm.load_user(None))
self.assertFalse(lm.load_user("None"))
self.assertFalse(lm.load_user(""))
self.assertFalse(lm.load_user("?"))
# test: valid user id
self.assertEqual(self.user, lm.load_user(self.user.id))
def test_load_user_from_request(self):
"""
Ensure load_user_from_request is working fine either for authentication by api_key and Basic
"""
# test: invalid arguments
reqctx = self.app.test_request_context("?", headers=[])
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: invalid api key
reqctx = self.app.test_request_context("?api_key=1234")
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid api key
reqctx = self.app.test_request_context("?api_key={}".format(self.user.api_key))
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
# test: invalid auth Basic String
reqctx = self.app.test_request_context(headers={'Authorization': b"lol?"})
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid auth Basic but invalid base64 user:password
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic lol?"})
self.assertIsNone(lm.load_user_from_request(reqctx.request))
# test: valid auth Basic and valid base64 api_key
api_key = base64.b64encode(bytes(self.user.api_key, 'utf8'))
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic " + api_key})
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
# test: valid auth Basic and valid base64 userpassword
userpass = base64.b64encode(b"username:password")
reqctx = self.app.test_request_context(headers={'Authorization': b"Basic " + userpass})
response = lm.load_user_from_request(reqctx.request)
self.assertIsNotNone(response)
self.assertEqual(response, self.user)
|
|
39b771089507b6c510837808018125f0e6adf611
|
opps/images/widgets.py
|
opps/images/widgets.py
|
from django import forms
from django.conf import settings
from django.template.loader import render_to_string
class MultipleUpload(forms.FileInput):
def render(self, name, value, attrs=None):
_value = ""
if value:
_value = "{0}{1}".format(settings.MEDIA_URL, value)
return render_to_string("admin/opps/images/multiupload.html",
{"name": name, "value": _value,
"STATIC_URL": settings.STATIC_URL})
|
Create form widget multiple upload
|
Create form widget multiple upload
|
Python
|
mit
|
jeanmask/opps,opps/opps,jeanmask/opps,YACOWS/opps,williamroot/opps,YACOWS/opps,williamroot/opps,YACOWS/opps,jeanmask/opps,opps/opps,opps/opps,YACOWS/opps,williamroot/opps,williamroot/opps,opps/opps,jeanmask/opps
|
Create form widget multiple upload
|
from django import forms
from django.conf import settings
from django.template.loader import render_to_string
class MultipleUpload(forms.FileInput):
def render(self, name, value, attrs=None):
_value = ""
if value:
_value = "{0}{1}".format(settings.MEDIA_URL, value)
return render_to_string("admin/opps/images/multiupload.html",
{"name": name, "value": _value,
"STATIC_URL": settings.STATIC_URL})
|
<commit_before><commit_msg>Create form widget multiple upload<commit_after>
|
from django import forms
from django.conf import settings
from django.template.loader import render_to_string
class MultipleUpload(forms.FileInput):
def render(self, name, value, attrs=None):
_value = ""
if value:
_value = "{0}{1}".format(settings.MEDIA_URL, value)
return render_to_string("admin/opps/images/multiupload.html",
{"name": name, "value": _value,
"STATIC_URL": settings.STATIC_URL})
|
Create form widget multiple uploadfrom django import forms
from django.conf import settings
from django.template.loader import render_to_string
class MultipleUpload(forms.FileInput):
def render(self, name, value, attrs=None):
_value = ""
if value:
_value = "{0}{1}".format(settings.MEDIA_URL, value)
return render_to_string("admin/opps/images/multiupload.html",
{"name": name, "value": _value,
"STATIC_URL": settings.STATIC_URL})
|
<commit_before><commit_msg>Create form widget multiple upload<commit_after>from django import forms
from django.conf import settings
from django.template.loader import render_to_string
class MultipleUpload(forms.FileInput):
def render(self, name, value, attrs=None):
_value = ""
if value:
_value = "{0}{1}".format(settings.MEDIA_URL, value)
return render_to_string("admin/opps/images/multiupload.html",
{"name": name, "value": _value,
"STATIC_URL": settings.STATIC_URL})
|
|
47ea6eab0d224f80caad1cf2bc0cec7081562b52
|
stdnum/us/atin.py
|
stdnum/us/atin.py
|
# atin.py - functions for handling ATINs
#
# Copyright (C) 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""ATIN (U.S. Adoption Taxpayer Identification Number).
An Adoption Taxpayer Identification Number (ATIN) is a temporary
nine-digit number issued by the United States IRS for a child for whom the
adopting parents cannot obtain a Social Security Number.
>>> validate('123-45-6789')
'123456789'
>>> validate('1234-56789') # dash in the wrong place
Traceback (most recent call last):
...
InvalidFormat: ...
>>> format('123456789')
'123-45-6789'
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
# regular expression for matching ATINs
_atin_re = re.compile('^[0-9]{3}-?[0-9]{2}-?[0-9]{4}$')
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, '-').strip()
def validate(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length and formatting if it is present."""
match = _atin_re.search(clean(number, '').strip())
if not match:
raise InvalidFormat()
# sadly, no more information on ATIN number validation was found
return compact(number)
def is_valid(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length and formatting if it is present."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
if len(number) == 9:
number = number[:3] + '-' + number[3:5] + '-' + number[5:]
return number
|
Add a United States ATIN module
|
Add a United States ATIN module
An Adoption Taxpayer Identification Number (ATIN) is a temporary
nine-digit number issued by the United States IRS for a child for whom
the adopting parents cannot obtain a Social Security Number.
|
Python
|
lgpl-2.1
|
arthurdejong/python-stdnum,dchoruzy/python-stdnum,t0mk/python-stdnum,tonyseek/python-stdnum,holvi/python-stdnum,arthurdejong/python-stdnum,holvi/python-stdnum,holvi/python-stdnum,arthurdejong/python-stdnum
|
Add a United States ATIN module
An Adoption Taxpayer Identification Number (ATIN) is a temporary
nine-digit number issued by the United States IRS for a child for whom
the adopting parents cannot obtain a Social Security Number.
|
# atin.py - functions for handling ATINs
#
# Copyright (C) 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""ATIN (U.S. Adoption Taxpayer Identification Number).
An Adoption Taxpayer Identification Number (ATIN) is a temporary
nine-digit number issued by the United States IRS for a child for whom the
adopting parents cannot obtain a Social Security Number.
>>> validate('123-45-6789')
'123456789'
>>> validate('1234-56789') # dash in the wrong place
Traceback (most recent call last):
...
InvalidFormat: ...
>>> format('123456789')
'123-45-6789'
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
# regular expression for matching ATINs
_atin_re = re.compile('^[0-9]{3}-?[0-9]{2}-?[0-9]{4}$')
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, '-').strip()
def validate(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length and formatting if it is present."""
match = _atin_re.search(clean(number, '').strip())
if not match:
raise InvalidFormat()
# sadly, no more information on ATIN number validation was found
return compact(number)
def is_valid(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length and formatting if it is present."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
if len(number) == 9:
number = number[:3] + '-' + number[3:5] + '-' + number[5:]
return number
|
<commit_before><commit_msg>Add a United States ATIN module
An Adoption Taxpayer Identification Number (ATIN) is a temporary
nine-digit number issued by the United States IRS for a child for whom
the adopting parents cannot obtain a Social Security Number.<commit_after>
|
# atin.py - functions for handling ATINs
#
# Copyright (C) 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""ATIN (U.S. Adoption Taxpayer Identification Number).
An Adoption Taxpayer Identification Number (ATIN) is a temporary
nine-digit number issued by the United States IRS for a child for whom the
adopting parents cannot obtain a Social Security Number.
>>> validate('123-45-6789')
'123456789'
>>> validate('1234-56789') # dash in the wrong place
Traceback (most recent call last):
...
InvalidFormat: ...
>>> format('123456789')
'123-45-6789'
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
# regular expression for matching ATINs
_atin_re = re.compile('^[0-9]{3}-?[0-9]{2}-?[0-9]{4}$')
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, '-').strip()
def validate(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length and formatting if it is present."""
match = _atin_re.search(clean(number, '').strip())
if not match:
raise InvalidFormat()
# sadly, no more information on ATIN number validation was found
return compact(number)
def is_valid(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length and formatting if it is present."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
if len(number) == 9:
number = number[:3] + '-' + number[3:5] + '-' + number[5:]
return number
|
Add a United States ATIN module
An Adoption Taxpayer Identification Number (ATIN) is a temporary
nine-digit number issued by the United States IRS for a child for whom
the adopting parents cannot obtain a Social Security Number.# atin.py - functions for handling ATINs
#
# Copyright (C) 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""ATIN (U.S. Adoption Taxpayer Identification Number).
An Adoption Taxpayer Identification Number (ATIN) is a temporary
nine-digit number issued by the United States IRS for a child for whom the
adopting parents cannot obtain a Social Security Number.
>>> validate('123-45-6789')
'123456789'
>>> validate('1234-56789') # dash in the wrong place
Traceback (most recent call last):
...
InvalidFormat: ...
>>> format('123456789')
'123-45-6789'
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
# regular expression for matching ATINs
_atin_re = re.compile('^[0-9]{3}-?[0-9]{2}-?[0-9]{4}$')
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, '-').strip()
def validate(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length and formatting if it is present."""
match = _atin_re.search(clean(number, '').strip())
if not match:
raise InvalidFormat()
# sadly, no more information on ATIN number validation was found
return compact(number)
def is_valid(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length and formatting if it is present."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
if len(number) == 9:
number = number[:3] + '-' + number[3:5] + '-' + number[5:]
return number
|
<commit_before><commit_msg>Add a United States ATIN module
An Adoption Taxpayer Identification Number (ATIN) is a temporary
nine-digit number issued by the United States IRS for a child for whom
the adopting parents cannot obtain a Social Security Number.<commit_after># atin.py - functions for handling ATINs
#
# Copyright (C) 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""ATIN (U.S. Adoption Taxpayer Identification Number).
An Adoption Taxpayer Identification Number (ATIN) is a temporary
nine-digit number issued by the United States IRS for a child for whom the
adopting parents cannot obtain a Social Security Number.
>>> validate('123-45-6789')
'123456789'
>>> validate('1234-56789') # dash in the wrong place
Traceback (most recent call last):
...
InvalidFormat: ...
>>> format('123456789')
'123-45-6789'
"""
import re
from stdnum.exceptions import *
from stdnum.util import clean
# regular expression for matching ATINs
_atin_re = re.compile('^[0-9]{3}-?[0-9]{2}-?[0-9]{4}$')
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, '-').strip()
def validate(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length and formatting if it is present."""
match = _atin_re.search(clean(number, '').strip())
if not match:
raise InvalidFormat()
# sadly, no more information on ATIN number validation was found
return compact(number)
def is_valid(number):
"""Checks to see if the number provided is a valid ATIN. This checks
the length and formatting if it is present."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
if len(number) == 9:
number = number[:3] + '-' + number[3:5] + '-' + number[5:]
return number
|
|
dee765818c054959a663f31c82ac34bcc908739f
|
cla_public/apps/checker/tests/test_review_page.py
|
cla_public/apps/checker/tests/test_review_page.py
|
from collections import defaultdict
import logging
import unittest
from bs4 import BeautifulSoup
from cla_public.app import create_app
from cla_public.apps.checker.constants import YES, NO
logging.getLogger('MARKDOWN').setLevel(logging.WARNING)
class TestReviewPage(unittest.TestCase):
def setUp(self):
app = create_app('config/testing.py')
self.client = app.test_client()
def assertReviewSection(self, url, html):
soup = BeautifulSoup(html)
sections = soup.select('.main-content h2 a[href="{0}"]'.format(url))
self.assertEqual(
1, len(sections),
'Section not present: {0}'.format(url))
def setProblem(self, problem):
with self.client.session_transaction() as session:
session['ProblemForm'] = {
'categories': problem}
def setAboutYouAnswers(self, **kwargs):
answers = defaultdict(lambda: NO)
answers.update(kwargs)
with self.client.session_transaction() as session:
session['AboutYouForm'] = answers
def setBenefits(self, passported=None, *benefits):
if passported is True:
benefits = ['income_support']
elif passported is False:
benefits = ['other-benefit']
with self.client.session_transaction() as session:
session['YourBenefitsForm'] = {
'benefits': benefits}
def test_review_page_about_you(self):
self.setProblem('debt')
self.setAboutYouAnswers(on_benefits=YES)
self.setBenefits(passported=False)
response = self.client.get('/review')
self.assertReviewSection('/about', response.data)
def test_review_page_bug_passported_about_you_missing(self):
self.setProblem('debt')
self.setAboutYouAnswers(on_benefits=YES)
self.setBenefits(passported=True)
response = self.client.get('/review')
self.assertReviewSection('/about', response.data)
|
Add failing test for missing About You section on review page when on passported benefits
|
Add failing test for missing About You section on review page when on passported benefits
|
Python
|
mit
|
ministryofjustice/cla_public,ministryofjustice/cla_public,ministryofjustice/cla_public,ministryofjustice/cla_public
|
Add failing test for missing About You section on review page when on passported benefits
|
from collections import defaultdict
import logging
import unittest
from bs4 import BeautifulSoup
from cla_public.app import create_app
from cla_public.apps.checker.constants import YES, NO
logging.getLogger('MARKDOWN').setLevel(logging.WARNING)
class TestReviewPage(unittest.TestCase):
def setUp(self):
app = create_app('config/testing.py')
self.client = app.test_client()
def assertReviewSection(self, url, html):
soup = BeautifulSoup(html)
sections = soup.select('.main-content h2 a[href="{0}"]'.format(url))
self.assertEqual(
1, len(sections),
'Section not present: {0}'.format(url))
def setProblem(self, problem):
with self.client.session_transaction() as session:
session['ProblemForm'] = {
'categories': problem}
def setAboutYouAnswers(self, **kwargs):
answers = defaultdict(lambda: NO)
answers.update(kwargs)
with self.client.session_transaction() as session:
session['AboutYouForm'] = answers
def setBenefits(self, passported=None, *benefits):
if passported is True:
benefits = ['income_support']
elif passported is False:
benefits = ['other-benefit']
with self.client.session_transaction() as session:
session['YourBenefitsForm'] = {
'benefits': benefits}
def test_review_page_about_you(self):
self.setProblem('debt')
self.setAboutYouAnswers(on_benefits=YES)
self.setBenefits(passported=False)
response = self.client.get('/review')
self.assertReviewSection('/about', response.data)
def test_review_page_bug_passported_about_you_missing(self):
self.setProblem('debt')
self.setAboutYouAnswers(on_benefits=YES)
self.setBenefits(passported=True)
response = self.client.get('/review')
self.assertReviewSection('/about', response.data)
|
<commit_before><commit_msg>Add failing test for missing About You section on review page when on passported benefits<commit_after>
|
from collections import defaultdict
import logging
import unittest
from bs4 import BeautifulSoup
from cla_public.app import create_app
from cla_public.apps.checker.constants import YES, NO
logging.getLogger('MARKDOWN').setLevel(logging.WARNING)
class TestReviewPage(unittest.TestCase):
def setUp(self):
app = create_app('config/testing.py')
self.client = app.test_client()
def assertReviewSection(self, url, html):
soup = BeautifulSoup(html)
sections = soup.select('.main-content h2 a[href="{0}"]'.format(url))
self.assertEqual(
1, len(sections),
'Section not present: {0}'.format(url))
def setProblem(self, problem):
with self.client.session_transaction() as session:
session['ProblemForm'] = {
'categories': problem}
def setAboutYouAnswers(self, **kwargs):
answers = defaultdict(lambda: NO)
answers.update(kwargs)
with self.client.session_transaction() as session:
session['AboutYouForm'] = answers
def setBenefits(self, passported=None, *benefits):
if passported is True:
benefits = ['income_support']
elif passported is False:
benefits = ['other-benefit']
with self.client.session_transaction() as session:
session['YourBenefitsForm'] = {
'benefits': benefits}
def test_review_page_about_you(self):
self.setProblem('debt')
self.setAboutYouAnswers(on_benefits=YES)
self.setBenefits(passported=False)
response = self.client.get('/review')
self.assertReviewSection('/about', response.data)
def test_review_page_bug_passported_about_you_missing(self):
self.setProblem('debt')
self.setAboutYouAnswers(on_benefits=YES)
self.setBenefits(passported=True)
response = self.client.get('/review')
self.assertReviewSection('/about', response.data)
|
Add failing test for missing About You section on review page when on passported benefitsfrom collections import defaultdict
import logging
import unittest
from bs4 import BeautifulSoup
from cla_public.app import create_app
from cla_public.apps.checker.constants import YES, NO
logging.getLogger('MARKDOWN').setLevel(logging.WARNING)
class TestReviewPage(unittest.TestCase):
def setUp(self):
app = create_app('config/testing.py')
self.client = app.test_client()
def assertReviewSection(self, url, html):
soup = BeautifulSoup(html)
sections = soup.select('.main-content h2 a[href="{0}"]'.format(url))
self.assertEqual(
1, len(sections),
'Section not present: {0}'.format(url))
def setProblem(self, problem):
with self.client.session_transaction() as session:
session['ProblemForm'] = {
'categories': problem}
def setAboutYouAnswers(self, **kwargs):
answers = defaultdict(lambda: NO)
answers.update(kwargs)
with self.client.session_transaction() as session:
session['AboutYouForm'] = answers
def setBenefits(self, passported=None, *benefits):
if passported is True:
benefits = ['income_support']
elif passported is False:
benefits = ['other-benefit']
with self.client.session_transaction() as session:
session['YourBenefitsForm'] = {
'benefits': benefits}
def test_review_page_about_you(self):
self.setProblem('debt')
self.setAboutYouAnswers(on_benefits=YES)
self.setBenefits(passported=False)
response = self.client.get('/review')
self.assertReviewSection('/about', response.data)
def test_review_page_bug_passported_about_you_missing(self):
self.setProblem('debt')
self.setAboutYouAnswers(on_benefits=YES)
self.setBenefits(passported=True)
response = self.client.get('/review')
self.assertReviewSection('/about', response.data)
|
<commit_before><commit_msg>Add failing test for missing About You section on review page when on passported benefits<commit_after>from collections import defaultdict
import logging
import unittest
from bs4 import BeautifulSoup
from cla_public.app import create_app
from cla_public.apps.checker.constants import YES, NO
logging.getLogger('MARKDOWN').setLevel(logging.WARNING)
class TestReviewPage(unittest.TestCase):
def setUp(self):
app = create_app('config/testing.py')
self.client = app.test_client()
def assertReviewSection(self, url, html):
soup = BeautifulSoup(html)
sections = soup.select('.main-content h2 a[href="{0}"]'.format(url))
self.assertEqual(
1, len(sections),
'Section not present: {0}'.format(url))
def setProblem(self, problem):
with self.client.session_transaction() as session:
session['ProblemForm'] = {
'categories': problem}
def setAboutYouAnswers(self, **kwargs):
answers = defaultdict(lambda: NO)
answers.update(kwargs)
with self.client.session_transaction() as session:
session['AboutYouForm'] = answers
def setBenefits(self, passported=None, *benefits):
if passported is True:
benefits = ['income_support']
elif passported is False:
benefits = ['other-benefit']
with self.client.session_transaction() as session:
session['YourBenefitsForm'] = {
'benefits': benefits}
def test_review_page_about_you(self):
self.setProblem('debt')
self.setAboutYouAnswers(on_benefits=YES)
self.setBenefits(passported=False)
response = self.client.get('/review')
self.assertReviewSection('/about', response.data)
def test_review_page_bug_passported_about_you_missing(self):
self.setProblem('debt')
self.setAboutYouAnswers(on_benefits=YES)
self.setBenefits(passported=True)
response = self.client.get('/review')
self.assertReviewSection('/about', response.data)
|
|
1a8e623e13323187dc6b40a7358d6293d4f39706
|
github2jenkins.py
|
github2jenkins.py
|
#!/usr/bin/env python
"""
Create Jenkins job corresponding to each Github repository.
"""
import os
import getpass
import github3
import jenkinsapi
# Github user(s) which repositories are to be created in Jenkins
GITHUB_USERS = ["taverna"]
# Branches which existance means a corresponding Jenkins job is
# created. The job will be called $repository-$branch, except for
# the master branch, which is simply $repository
BRANCHES = ["master", "maintenance"]
# Jenkins instance where jobs will be created, e.g.
# http://localhost:8080/jenkins/
JENKINS = "http://build.mygrid.org.uk/ci/"
# Pre-existing Jenkins job which config is
# to be used as a template for any new jobs
#
# Note: The template must be both a valid
# Jenkins name and a Github repository
# as naive search-replace is used on the
# Jenkins Job Config XML
# The string "master" will be search-replaced for
# other branches
JENKINS_JOB_TEMPLATE = "taverna-wsdl-activity"
# The pre-configured user/repo substring of the github URLs in the
# Jenkins job-template - this will be search-replaced to
# $user/$repo
JENKINS_JOB_TEMPLATE_REPO = "taverna/taverna-wsdl-activity"
# Jenkins user with write-access in Jenkins
# The library will prompt on the console at runtime for
# the jenkins password.
#
# Set the user to None for readonly mode, in which case
# new Jenkins jobs will not be created, but their name
# printed on the console.
#
JENKINS_USER = os.environ.get("JENKINS_USER") or getpass.getuser()
class Github2JenkinsException(Exception):
pass
gh = github3.GitHub()
_jenkins = None
def jenkins():
global _jenkins
if _jenkins is not None:
return _jenkins
password = os.environ.get("JENKINS_PASSWORD")
if JENKINS_USER and not password:
# Need to ask for password
print "Jenkins:", JENKINS
password = getpass.getpass("Password for user " + JENKINS_USER +
" [empty for read-only]: ")
if not password:
_jenkins = jenkinsapi.jenkins.Jenkins(JENKINS)
else:
_jenkins = jenkinsapi.jenkins.Jenkins(JENKINS, JENKINS_USER, password)
return _jenkins
def repos(username, must_have_branch):
for repo in gh.iter_user_repos(username):
if repo.branch(must_have_branch):
yield repo
_jenkins_template = None
def create_template(job_name, repository):
global _jenkins_template
if _jenkins_template is not None:
return _jenkins_template
_jenkins_template = jenkins()[JENKINS_JOB_TEMPLATE]
if not _jenkins_template:
raise Github2JenkinsException("Can't find template " + JENKINS_JOB_TEMPLATE)
return _jenkins_template
|
Create Jenkins jobs per Github repository
|
Create Jenkins jobs per Github repository
|
Python
|
mit
|
stain/github2jenkins
|
Create Jenkins jobs per Github repository
|
#!/usr/bin/env python
"""
Create Jenkins job corresponding to each Github repository.
"""
import os
import getpass
import github3
import jenkinsapi
# Github user(s) which repositories are to be created in Jenkins
GITHUB_USERS = ["taverna"]
# Branches which existance means a corresponding Jenkins job is
# created. The job will be called $repository-$branch, except for
# the master branch, which is simply $repository
BRANCHES = ["master", "maintenance"]
# Jenkins instance where jobs will be created, e.g.
# http://localhost:8080/jenkins/
JENKINS = "http://build.mygrid.org.uk/ci/"
# Pre-existing Jenkins job which config is
# to be used as a template for any new jobs
#
# Note: The template must be both a valid
# Jenkins name and a Github repository
# as naive search-replace is used on the
# Jenkins Job Config XML
# The string "master" will be search-replaced for
# other branches
JENKINS_JOB_TEMPLATE = "taverna-wsdl-activity"
# The pre-configured user/repo substring of the github URLs in the
# Jenkins job-template - this will be search-replaced to
# $user/$repo
JENKINS_JOB_TEMPLATE_REPO = "taverna/taverna-wsdl-activity"
# Jenkins user with write-access in Jenkins
# The library will prompt on the console at runtime for
# the jenkins password.
#
# Set the user to None for readonly mode, in which case
# new Jenkins jobs will not be created, but their name
# printed on the console.
#
JENKINS_USER = os.environ.get("JENKINS_USER") or getpass.getuser()
class Github2JenkinsException(Exception):
pass
gh = github3.GitHub()
_jenkins = None
def jenkins():
global _jenkins
if _jenkins is not None:
return _jenkins
password = os.environ.get("JENKINS_PASSWORD")
if JENKINS_USER and not password:
# Need to ask for password
print "Jenkins:", JENKINS
password = getpass.getpass("Password for user " + JENKINS_USER +
" [empty for read-only]: ")
if not password:
_jenkins = jenkinsapi.jenkins.Jenkins(JENKINS)
else:
_jenkins = jenkinsapi.jenkins.Jenkins(JENKINS, JENKINS_USER, password)
return _jenkins
def repos(username, must_have_branch):
for repo in gh.iter_user_repos(username):
if repo.branch(must_have_branch):
yield repo
_jenkins_template = None
def create_template(job_name, repository):
global _jenkins_template
if _jenkins_template is not None:
return _jenkins_template
_jenkins_template = jenkins()[JENKINS_JOB_TEMPLATE]
if not _jenkins_template:
raise Github2JenkinsException("Can't find template " + JENKINS_JOB_TEMPLATE)
return _jenkins_template
|
<commit_before><commit_msg>Create Jenkins jobs per Github repository<commit_after>
|
#!/usr/bin/env python
"""
Create Jenkins job corresponding to each Github repository.
"""
import os
import getpass
import github3
import jenkinsapi
# Github user(s) which repositories are to be created in Jenkins
GITHUB_USERS = ["taverna"]
# Branches which existance means a corresponding Jenkins job is
# created. The job will be called $repository-$branch, except for
# the master branch, which is simply $repository
BRANCHES = ["master", "maintenance"]
# Jenkins instance where jobs will be created, e.g.
# http://localhost:8080/jenkins/
JENKINS = "http://build.mygrid.org.uk/ci/"
# Pre-existing Jenkins job which config is
# to be used as a template for any new jobs
#
# Note: The template must be both a valid
# Jenkins name and a Github repository
# as naive search-replace is used on the
# Jenkins Job Config XML
# The string "master" will be search-replaced for
# other branches
JENKINS_JOB_TEMPLATE = "taverna-wsdl-activity"
# The pre-configured user/repo substring of the github URLs in the
# Jenkins job-template - this will be search-replaced to
# $user/$repo
JENKINS_JOB_TEMPLATE_REPO = "taverna/taverna-wsdl-activity"
# Jenkins user with write-access in Jenkins
# The library will prompt on the console at runtime for
# the jenkins password.
#
# Set the user to None for readonly mode, in which case
# new Jenkins jobs will not be created, but their name
# printed on the console.
#
JENKINS_USER = os.environ.get("JENKINS_USER") or getpass.getuser()
class Github2JenkinsException(Exception):
pass
gh = github3.GitHub()
_jenkins = None
def jenkins():
global _jenkins
if _jenkins is not None:
return _jenkins
password = os.environ.get("JENKINS_PASSWORD")
if JENKINS_USER and not password:
# Need to ask for password
print "Jenkins:", JENKINS
password = getpass.getpass("Password for user " + JENKINS_USER +
" [empty for read-only]: ")
if not password:
_jenkins = jenkinsapi.jenkins.Jenkins(JENKINS)
else:
_jenkins = jenkinsapi.jenkins.Jenkins(JENKINS, JENKINS_USER, password)
return _jenkins
def repos(username, must_have_branch):
for repo in gh.iter_user_repos(username):
if repo.branch(must_have_branch):
yield repo
_jenkins_template = None
def create_template(job_name, repository):
global _jenkins_template
if _jenkins_template is not None:
return _jenkins_template
_jenkins_template = jenkins()[JENKINS_JOB_TEMPLATE]
if not _jenkins_template:
raise Github2JenkinsException("Can't find template " + JENKINS_JOB_TEMPLATE)
return _jenkins_template
|
Create Jenkins jobs per Github repository#!/usr/bin/env python
"""
Create Jenkins job corresponding to each Github repository.
"""
import os
import getpass
import github3
import jenkinsapi
# Github user(s) which repositories are to be created in Jenkins
GITHUB_USERS = ["taverna"]
# Branches which existance means a corresponding Jenkins job is
# created. The job will be called $repository-$branch, except for
# the master branch, which is simply $repository
BRANCHES = ["master", "maintenance"]
# Jenkins instance where jobs will be created, e.g.
# http://localhost:8080/jenkins/
JENKINS = "http://build.mygrid.org.uk/ci/"
# Pre-existing Jenkins job which config is
# to be used as a template for any new jobs
#
# Note: The template must be both a valid
# Jenkins name and a Github repository
# as naive search-replace is used on the
# Jenkins Job Config XML
# The string "master" will be search-replaced for
# other branches
JENKINS_JOB_TEMPLATE = "taverna-wsdl-activity"
# The pre-configured user/repo substring of the github URLs in the
# Jenkins job-template - this will be search-replaced to
# $user/$repo
JENKINS_JOB_TEMPLATE_REPO = "taverna/taverna-wsdl-activity"
# Jenkins user with write-access in Jenkins
# The library will prompt on the console at runtime for
# the jenkins password.
#
# Set the user to None for readonly mode, in which case
# new Jenkins jobs will not be created, but their name
# printed on the console.
#
JENKINS_USER = os.environ.get("JENKINS_USER") or getpass.getuser()
class Github2JenkinsException(Exception):
pass
gh = github3.GitHub()
_jenkins = None
def jenkins():
global _jenkins
if _jenkins is not None:
return _jenkins
password = os.environ.get("JENKINS_PASSWORD")
if JENKINS_USER and not password:
# Need to ask for password
print "Jenkins:", JENKINS
password = getpass.getpass("Password for user " + JENKINS_USER +
" [empty for read-only]: ")
if not password:
_jenkins = jenkinsapi.jenkins.Jenkins(JENKINS)
else:
_jenkins = jenkinsapi.jenkins.Jenkins(JENKINS, JENKINS_USER, password)
return _jenkins
def repos(username, must_have_branch):
for repo in gh.iter_user_repos(username):
if repo.branch(must_have_branch):
yield repo
_jenkins_template = None
def create_template(job_name, repository):
global _jenkins_template
if _jenkins_template is not None:
return _jenkins_template
_jenkins_template = jenkins()[JENKINS_JOB_TEMPLATE]
if not _jenkins_template:
raise Github2JenkinsException("Can't find template " + JENKINS_JOB_TEMPLATE)
return _jenkins_template
|
<commit_before><commit_msg>Create Jenkins jobs per Github repository<commit_after>#!/usr/bin/env python
"""
Create Jenkins job corresponding to each Github repository.
"""
import os
import getpass
import github3
import jenkinsapi
# Github user(s) which repositories are to be created in Jenkins
GITHUB_USERS = ["taverna"]
# Branches which existance means a corresponding Jenkins job is
# created. The job will be called $repository-$branch, except for
# the master branch, which is simply $repository
BRANCHES = ["master", "maintenance"]
# Jenkins instance where jobs will be created, e.g.
# http://localhost:8080/jenkins/
JENKINS = "http://build.mygrid.org.uk/ci/"
# Pre-existing Jenkins job which config is
# to be used as a template for any new jobs
#
# Note: The template must be both a valid
# Jenkins name and a Github repository
# as naive search-replace is used on the
# Jenkins Job Config XML
# The string "master" will be search-replaced for
# other branches
JENKINS_JOB_TEMPLATE = "taverna-wsdl-activity"
# The pre-configured user/repo substring of the github URLs in the
# Jenkins job-template - this will be search-replaced to
# $user/$repo
JENKINS_JOB_TEMPLATE_REPO = "taverna/taverna-wsdl-activity"
# Jenkins user with write-access in Jenkins
# The library will prompt on the console at runtime for
# the jenkins password.
#
# Set the user to None for readonly mode, in which case
# new Jenkins jobs will not be created, but their name
# printed on the console.
#
JENKINS_USER = os.environ.get("JENKINS_USER") or getpass.getuser()
class Github2JenkinsException(Exception):
pass
gh = github3.GitHub()
_jenkins = None
def jenkins():
global _jenkins
if _jenkins is not None:
return _jenkins
password = os.environ.get("JENKINS_PASSWORD")
if JENKINS_USER and not password:
# Need to ask for password
print "Jenkins:", JENKINS
password = getpass.getpass("Password for user " + JENKINS_USER +
" [empty for read-only]: ")
if not password:
_jenkins = jenkinsapi.jenkins.Jenkins(JENKINS)
else:
_jenkins = jenkinsapi.jenkins.Jenkins(JENKINS, JENKINS_USER, password)
return _jenkins
def repos(username, must_have_branch):
for repo in gh.iter_user_repos(username):
if repo.branch(must_have_branch):
yield repo
_jenkins_template = None
def create_template(job_name, repository):
global _jenkins_template
if _jenkins_template is not None:
return _jenkins_template
_jenkins_template = jenkins()[JENKINS_JOB_TEMPLATE]
if not _jenkins_template:
raise Github2JenkinsException("Can't find template " + JENKINS_JOB_TEMPLATE)
return _jenkins_template
|
|
a91523943d6fb6790e91c3b348ab665c9b63c608
|
apps/network/tests/test_routes/test_setup.py
|
apps/network/tests/test_routes/test_setup.py
|
def test_initial_setup(client):
result = client.post("/setup/", data={"setup": "setup_configs_sample"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Running initial setup!"}
def test_get_setup(client):
result = client.get("/setup/")
assert result.status_code == 200
assert result.get_json() == {"setup": {}}
|
ADD Network setup unit tests
|
ADD Network setup unit tests
|
Python
|
apache-2.0
|
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
|
ADD Network setup unit tests
|
def test_initial_setup(client):
result = client.post("/setup/", data={"setup": "setup_configs_sample"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Running initial setup!"}
def test_get_setup(client):
result = client.get("/setup/")
assert result.status_code == 200
assert result.get_json() == {"setup": {}}
|
<commit_before><commit_msg>ADD Network setup unit tests<commit_after>
|
def test_initial_setup(client):
result = client.post("/setup/", data={"setup": "setup_configs_sample"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Running initial setup!"}
def test_get_setup(client):
result = client.get("/setup/")
assert result.status_code == 200
assert result.get_json() == {"setup": {}}
|
ADD Network setup unit tests
def test_initial_setup(client):
result = client.post("/setup/", data={"setup": "setup_configs_sample"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Running initial setup!"}
def test_get_setup(client):
result = client.get("/setup/")
assert result.status_code == 200
assert result.get_json() == {"setup": {}}
|
<commit_before><commit_msg>ADD Network setup unit tests<commit_after>
def test_initial_setup(client):
result = client.post("/setup/", data={"setup": "setup_configs_sample"})
assert result.status_code == 200
assert result.get_json() == {"msg": "Running initial setup!"}
def test_get_setup(client):
result = client.get("/setup/")
assert result.status_code == 200
assert result.get_json() == {"setup": {}}
|
|
971958a489f61920911b5d44f9491fa163017069
|
py/find-k-closest-elements.py
|
py/find-k-closest-elements.py
|
class Solution(object):
def findClosestElements(self, arr, k, x):
"""
:type arr: List[int]
:type k: int
:type x: int
:rtype: List[int]
"""
larr = len(arr)
L, U = -1, larr
while L + 1 < U:
mid = L + (U - L) / 2
if arr[mid] > x:
U = mid
else:
L = mid
Lary, Uary = [], []
while len(Lary) + len(Uary) < k:
Lcand, Ucand = None, None
if L >= 0:
Lcand = arr[L]
if U < larr:
Ucand = arr[U]
if Lcand is None:
Uary.append(Ucand)
U += 1
elif Ucand is None:
Lary.append(Lcand)
L -= 1
else:
if abs(Lcand - x) <= abs(Ucand - x):
Lary.append(Lcand)
L -= 1
else:
Uary.append(Ucand)
U += 1
return Lary[::-1] + Uary
|
Add py solution for 658. Find K Closest Elements
|
Add py solution for 658. Find K Closest Elements
658. Find K Closest Elements: https://leetcode.com/problems/find-k-closest-elements/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 658. Find K Closest Elements
658. Find K Closest Elements: https://leetcode.com/problems/find-k-closest-elements/
|
class Solution(object):
def findClosestElements(self, arr, k, x):
"""
:type arr: List[int]
:type k: int
:type x: int
:rtype: List[int]
"""
larr = len(arr)
L, U = -1, larr
while L + 1 < U:
mid = L + (U - L) / 2
if arr[mid] > x:
U = mid
else:
L = mid
Lary, Uary = [], []
while len(Lary) + len(Uary) < k:
Lcand, Ucand = None, None
if L >= 0:
Lcand = arr[L]
if U < larr:
Ucand = arr[U]
if Lcand is None:
Uary.append(Ucand)
U += 1
elif Ucand is None:
Lary.append(Lcand)
L -= 1
else:
if abs(Lcand - x) <= abs(Ucand - x):
Lary.append(Lcand)
L -= 1
else:
Uary.append(Ucand)
U += 1
return Lary[::-1] + Uary
|
<commit_before><commit_msg>Add py solution for 658. Find K Closest Elements
658. Find K Closest Elements: https://leetcode.com/problems/find-k-closest-elements/<commit_after>
|
class Solution(object):
def findClosestElements(self, arr, k, x):
"""
:type arr: List[int]
:type k: int
:type x: int
:rtype: List[int]
"""
larr = len(arr)
L, U = -1, larr
while L + 1 < U:
mid = L + (U - L) / 2
if arr[mid] > x:
U = mid
else:
L = mid
Lary, Uary = [], []
while len(Lary) + len(Uary) < k:
Lcand, Ucand = None, None
if L >= 0:
Lcand = arr[L]
if U < larr:
Ucand = arr[U]
if Lcand is None:
Uary.append(Ucand)
U += 1
elif Ucand is None:
Lary.append(Lcand)
L -= 1
else:
if abs(Lcand - x) <= abs(Ucand - x):
Lary.append(Lcand)
L -= 1
else:
Uary.append(Ucand)
U += 1
return Lary[::-1] + Uary
|
Add py solution for 658. Find K Closest Elements
658. Find K Closest Elements: https://leetcode.com/problems/find-k-closest-elements/class Solution(object):
def findClosestElements(self, arr, k, x):
"""
:type arr: List[int]
:type k: int
:type x: int
:rtype: List[int]
"""
larr = len(arr)
L, U = -1, larr
while L + 1 < U:
mid = L + (U - L) / 2
if arr[mid] > x:
U = mid
else:
L = mid
Lary, Uary = [], []
while len(Lary) + len(Uary) < k:
Lcand, Ucand = None, None
if L >= 0:
Lcand = arr[L]
if U < larr:
Ucand = arr[U]
if Lcand is None:
Uary.append(Ucand)
U += 1
elif Ucand is None:
Lary.append(Lcand)
L -= 1
else:
if abs(Lcand - x) <= abs(Ucand - x):
Lary.append(Lcand)
L -= 1
else:
Uary.append(Ucand)
U += 1
return Lary[::-1] + Uary
|
<commit_before><commit_msg>Add py solution for 658. Find K Closest Elements
658. Find K Closest Elements: https://leetcode.com/problems/find-k-closest-elements/<commit_after>class Solution(object):
def findClosestElements(self, arr, k, x):
"""
:type arr: List[int]
:type k: int
:type x: int
:rtype: List[int]
"""
larr = len(arr)
L, U = -1, larr
while L + 1 < U:
mid = L + (U - L) / 2
if arr[mid] > x:
U = mid
else:
L = mid
Lary, Uary = [], []
while len(Lary) + len(Uary) < k:
Lcand, Ucand = None, None
if L >= 0:
Lcand = arr[L]
if U < larr:
Ucand = arr[U]
if Lcand is None:
Uary.append(Ucand)
U += 1
elif Ucand is None:
Lary.append(Lcand)
L -= 1
else:
if abs(Lcand - x) <= abs(Ucand - x):
Lary.append(Lcand)
L -= 1
else:
Uary.append(Ucand)
U += 1
return Lary[::-1] + Uary
|
|
311a952851e9d2f859607060c387df8079b8ecce
|
txircd/modules/core/channellevel.py
|
txircd/modules/core/channellevel.py
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ChannelLevel(ModuleData):
implements(IModuleData)
name = "ChannelLevel"
core = True
def actions(self):
return [ ("checkchannellevel", 1, self.levelCheck),
("checkexemptchanops", 1, self.exemptCheck) ]
def minLevelFromConfig(self, configKey, checkType, defaultLevel):
configLevel = self.ircd.config.get(configKey, {}).get(checkType, defaultLevel)
try:
minLevel = int(configLevel)
except ValueError:
if configLevel not in self.ircd.channelStatuses:
return False # If the status doesn't exist, then, to be safe, we must assume NOBODY is above the line.
minLevel = self.ircd.channelStatuses[configLevel][1]
return minLevel
def levelCheck(self, levelType, channel, user):
minLevel = self.minLevelFromConfig("channel_minimum_level", levelType, 100)
return channel.userRank(user) >= minLevel
def exemptCheck(self, exemptType, channel, user):
minLevel = self.minLevelFromConfig("channel_exempt_level", exemptType, 0)
if not minLevel:
return False # No minimum level == no exemptions
return channel.userRank(user) >= minLevel
chanLevel = ChannelLevel()
|
Implement a consistent way to check channel rank requirements
|
Implement a consistent way to check channel rank requirements
|
Python
|
bsd-3-clause
|
ElementalAlchemist/txircd,Heufneutje/txircd
|
Implement a consistent way to check channel rank requirements
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ChannelLevel(ModuleData):
implements(IModuleData)
name = "ChannelLevel"
core = True
def actions(self):
return [ ("checkchannellevel", 1, self.levelCheck),
("checkexemptchanops", 1, self.exemptCheck) ]
def minLevelFromConfig(self, configKey, checkType, defaultLevel):
configLevel = self.ircd.config.get(configKey, {}).get(checkType, defaultLevel)
try:
minLevel = int(configLevel)
except ValueError:
if configLevel not in self.ircd.channelStatuses:
return False # If the status doesn't exist, then, to be safe, we must assume NOBODY is above the line.
minLevel = self.ircd.channelStatuses[configLevel][1]
return minLevel
def levelCheck(self, levelType, channel, user):
minLevel = self.minLevelFromConfig("channel_minimum_level", levelType, 100)
return channel.userRank(user) >= minLevel
def exemptCheck(self, exemptType, channel, user):
minLevel = self.minLevelFromConfig("channel_exempt_level", exemptType, 0)
if not minLevel:
return False # No minimum level == no exemptions
return channel.userRank(user) >= minLevel
chanLevel = ChannelLevel()
|
<commit_before><commit_msg>Implement a consistent way to check channel rank requirements<commit_after>
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ChannelLevel(ModuleData):
implements(IModuleData)
name = "ChannelLevel"
core = True
def actions(self):
return [ ("checkchannellevel", 1, self.levelCheck),
("checkexemptchanops", 1, self.exemptCheck) ]
def minLevelFromConfig(self, configKey, checkType, defaultLevel):
configLevel = self.ircd.config.get(configKey, {}).get(checkType, defaultLevel)
try:
minLevel = int(configLevel)
except ValueError:
if configLevel not in self.ircd.channelStatuses:
return False # If the status doesn't exist, then, to be safe, we must assume NOBODY is above the line.
minLevel = self.ircd.channelStatuses[configLevel][1]
return minLevel
def levelCheck(self, levelType, channel, user):
minLevel = self.minLevelFromConfig("channel_minimum_level", levelType, 100)
return channel.userRank(user) >= minLevel
def exemptCheck(self, exemptType, channel, user):
minLevel = self.minLevelFromConfig("channel_exempt_level", exemptType, 0)
if not minLevel:
return False # No minimum level == no exemptions
return channel.userRank(user) >= minLevel
chanLevel = ChannelLevel()
|
Implement a consistent way to check channel rank requirementsfrom twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ChannelLevel(ModuleData):
implements(IModuleData)
name = "ChannelLevel"
core = True
def actions(self):
return [ ("checkchannellevel", 1, self.levelCheck),
("checkexemptchanops", 1, self.exemptCheck) ]
def minLevelFromConfig(self, configKey, checkType, defaultLevel):
configLevel = self.ircd.config.get(configKey, {}).get(checkType, defaultLevel)
try:
minLevel = int(configLevel)
except ValueError:
if configLevel not in self.ircd.channelStatuses:
return False # If the status doesn't exist, then, to be safe, we must assume NOBODY is above the line.
minLevel = self.ircd.channelStatuses[configLevel][1]
return minLevel
def levelCheck(self, levelType, channel, user):
minLevel = self.minLevelFromConfig("channel_minimum_level", levelType, 100)
return channel.userRank(user) >= minLevel
def exemptCheck(self, exemptType, channel, user):
minLevel = self.minLevelFromConfig("channel_exempt_level", exemptType, 0)
if not minLevel:
return False # No minimum level == no exemptions
return channel.userRank(user) >= minLevel
chanLevel = ChannelLevel()
|
<commit_before><commit_msg>Implement a consistent way to check channel rank requirements<commit_after>from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ChannelLevel(ModuleData):
implements(IModuleData)
name = "ChannelLevel"
core = True
def actions(self):
return [ ("checkchannellevel", 1, self.levelCheck),
("checkexemptchanops", 1, self.exemptCheck) ]
def minLevelFromConfig(self, configKey, checkType, defaultLevel):
configLevel = self.ircd.config.get(configKey, {}).get(checkType, defaultLevel)
try:
minLevel = int(configLevel)
except ValueError:
if configLevel not in self.ircd.channelStatuses:
return False # If the status doesn't exist, then, to be safe, we must assume NOBODY is above the line.
minLevel = self.ircd.channelStatuses[configLevel][1]
return minLevel
def levelCheck(self, levelType, channel, user):
minLevel = self.minLevelFromConfig("channel_minimum_level", levelType, 100)
return channel.userRank(user) >= minLevel
def exemptCheck(self, exemptType, channel, user):
minLevel = self.minLevelFromConfig("channel_exempt_level", exemptType, 0)
if not minLevel:
return False # No minimum level == no exemptions
return channel.userRank(user) >= minLevel
chanLevel = ChannelLevel()
|
|
07b2fa2df9fb23dc1e3bdd56609b596555644d29
|
murano/tests/unit/packages/test_exceptions.py
|
murano/tests/unit/packages/test_exceptions.py
|
# Copyright (c) 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from murano.packages import exceptions
import murano.tests.unit.base as test_base
class TestExceptions(test_base.MuranoTestCase):
def test_package_class_load_error(self):
class_name = 'test class name'
message = 'test message'
error = exceptions.PackageClassLoadError(class_name=class_name,
message=message)
expected = 'Unable to load class "{0}" from package: {1}'\
.format(class_name, message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
def test_package_ui_load_error(self):
messages = ['', 'test_message']
for message in messages:
error = exceptions.PackageUILoadError(message=message)
expected = 'Unable to load ui definition from package'
if message:
expected += ': {0}'.format(message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
def test_package_format_error(self):
messages = ['', 'test_message']
for message in messages:
error = exceptions.PackageFormatError(message=message)
expected = 'Incorrect package format'
if message:
expected += ': {0}'.format(message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
|
Increase unit test coverage for exceptions in Packages API.
|
Increase unit test coverage for exceptions in Packages API.
Implements bp: murano-unit-test-coverage
Co-Authored-By: David Purcell <david.purcell@att.com>
Co-Authored-By: Samantha Blanco <samantha.blanco@att.com>
Co-Authored-By: Julian Sy <julian.sy@att.com>
Change-Id: Idabe7f63135766c57ed30e5988b71a756671999e
|
Python
|
apache-2.0
|
openstack/murano,openstack/murano
|
Increase unit test coverage for exceptions in Packages API.
Implements bp: murano-unit-test-coverage
Co-Authored-By: David Purcell <david.purcell@att.com>
Co-Authored-By: Samantha Blanco <samantha.blanco@att.com>
Co-Authored-By: Julian Sy <julian.sy@att.com>
Change-Id: Idabe7f63135766c57ed30e5988b71a756671999e
|
# Copyright (c) 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from murano.packages import exceptions
import murano.tests.unit.base as test_base
class TestExceptions(test_base.MuranoTestCase):
def test_package_class_load_error(self):
class_name = 'test class name'
message = 'test message'
error = exceptions.PackageClassLoadError(class_name=class_name,
message=message)
expected = 'Unable to load class "{0}" from package: {1}'\
.format(class_name, message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
def test_package_ui_load_error(self):
messages = ['', 'test_message']
for message in messages:
error = exceptions.PackageUILoadError(message=message)
expected = 'Unable to load ui definition from package'
if message:
expected += ': {0}'.format(message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
def test_package_format_error(self):
messages = ['', 'test_message']
for message in messages:
error = exceptions.PackageFormatError(message=message)
expected = 'Incorrect package format'
if message:
expected += ': {0}'.format(message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
|
<commit_before><commit_msg>Increase unit test coverage for exceptions in Packages API.
Implements bp: murano-unit-test-coverage
Co-Authored-By: David Purcell <david.purcell@att.com>
Co-Authored-By: Samantha Blanco <samantha.blanco@att.com>
Co-Authored-By: Julian Sy <julian.sy@att.com>
Change-Id: Idabe7f63135766c57ed30e5988b71a756671999e<commit_after>
|
# Copyright (c) 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from murano.packages import exceptions
import murano.tests.unit.base as test_base
class TestExceptions(test_base.MuranoTestCase):
def test_package_class_load_error(self):
class_name = 'test class name'
message = 'test message'
error = exceptions.PackageClassLoadError(class_name=class_name,
message=message)
expected = 'Unable to load class "{0}" from package: {1}'\
.format(class_name, message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
def test_package_ui_load_error(self):
messages = ['', 'test_message']
for message in messages:
error = exceptions.PackageUILoadError(message=message)
expected = 'Unable to load ui definition from package'
if message:
expected += ': {0}'.format(message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
def test_package_format_error(self):
messages = ['', 'test_message']
for message in messages:
error = exceptions.PackageFormatError(message=message)
expected = 'Incorrect package format'
if message:
expected += ': {0}'.format(message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
|
Increase unit test coverage for exceptions in Packages API.
Implements bp: murano-unit-test-coverage
Co-Authored-By: David Purcell <david.purcell@att.com>
Co-Authored-By: Samantha Blanco <samantha.blanco@att.com>
Co-Authored-By: Julian Sy <julian.sy@att.com>
Change-Id: Idabe7f63135766c57ed30e5988b71a756671999e# Copyright (c) 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from murano.packages import exceptions
import murano.tests.unit.base as test_base
class TestExceptions(test_base.MuranoTestCase):
def test_package_class_load_error(self):
class_name = 'test class name'
message = 'test message'
error = exceptions.PackageClassLoadError(class_name=class_name,
message=message)
expected = 'Unable to load class "{0}" from package: {1}'\
.format(class_name, message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
def test_package_ui_load_error(self):
messages = ['', 'test_message']
for message in messages:
error = exceptions.PackageUILoadError(message=message)
expected = 'Unable to load ui definition from package'
if message:
expected += ': {0}'.format(message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
def test_package_format_error(self):
messages = ['', 'test_message']
for message in messages:
error = exceptions.PackageFormatError(message=message)
expected = 'Incorrect package format'
if message:
expected += ': {0}'.format(message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
|
<commit_before><commit_msg>Increase unit test coverage for exceptions in Packages API.
Implements bp: murano-unit-test-coverage
Co-Authored-By: David Purcell <david.purcell@att.com>
Co-Authored-By: Samantha Blanco <samantha.blanco@att.com>
Co-Authored-By: Julian Sy <julian.sy@att.com>
Change-Id: Idabe7f63135766c57ed30e5988b71a756671999e<commit_after># Copyright (c) 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from murano.packages import exceptions
import murano.tests.unit.base as test_base
class TestExceptions(test_base.MuranoTestCase):
def test_package_class_load_error(self):
class_name = 'test class name'
message = 'test message'
error = exceptions.PackageClassLoadError(class_name=class_name,
message=message)
expected = 'Unable to load class "{0}" from package: {1}'\
.format(class_name, message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
def test_package_ui_load_error(self):
messages = ['', 'test_message']
for message in messages:
error = exceptions.PackageUILoadError(message=message)
expected = 'Unable to load ui definition from package'
if message:
expected += ': {0}'.format(message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
def test_package_format_error(self):
messages = ['', 'test_message']
for message in messages:
error = exceptions.PackageFormatError(message=message)
expected = 'Incorrect package format'
if message:
expected += ': {0}'.format(message)
if six.PY2:
self.assertEqual(expected, error.message)
elif six.PY34:
self.assertEqual(expected, error.args[0])
|
|
87afc656e837d1d48fe050d69e793caa5f4a674d
|
sts/util/socket_mux/pox_monkeypatcher.py
|
sts/util/socket_mux/pox_monkeypatcher.py
|
from server_socket_multiplexer import ServerMultiplexedSelect,ServerMockSocket
import select
import socket
# Note: Make sure that this module is loaded after all other modules except
# of_01
def launch():
# Server side:
# - Instantiate ServerMultipexedSelect (this will create a true
# socket for the pinger)
# - override select.select with ServerMultiplexedSelect
# - override socket.socket
# - takes two params: protocol, socket type
# - if not SOCK_STREAM type, return a normal socket
# - we don't know bind address until bind() is called
# - after bind(), create true socket, create SocketDemultiplexer
# All subsequent sockets will be instantiated through accept()
mux_select = ServerMultiplexedSelect()
# Monkey patch select.select
select._old_select = select.select
select.select = mux_select.select
# Monkey patch socket.socket
socket._old_socket = socket.socket
def socket_patch(protocol, sock_type):
if sock_type == socket.SOCK_STREAM:
return ServerMockSocket(protocol, sock_type,
set_true_listen_socket=mux_select.set_true_listen_socket)
else:
socket._old_socket(protocol, sock_type)
socket.socket = socket_patch
# TODO(cs): will mux_select be GC'ed?
|
Implement monkey patching. It works!
|
Implement monkey patching. It works!
Needed to fix a few bugs along the way:
- make sure bind() isn't called twice -- need to be careful about the order
we load POX modules
- need to actually send data in send() [even if non-blocking]
- don't assume that only MockSockets are passed into select. The client can
also pass in wrappers of MockSockets (e.g. IOWorkers, or Connections).
Therefore, need to track which sockets are ready to send according to
MockSocket.fileno()
|
Python
|
apache-2.0
|
ucb-sts/sts,jmiserez/sts,ucb-sts/sts,jmiserez/sts
|
Implement monkey patching. It works!
Needed to fix a few bugs along the way:
- make sure bind() isn't called twice -- need to be careful about the order
we load POX modules
- need to actually send data in send() [even if non-blocking]
- don't assume that only MockSockets are passed into select. The client can
also pass in wrappers of MockSockets (e.g. IOWorkers, or Connections).
Therefore, need to track which sockets are ready to send according to
MockSocket.fileno()
|
from server_socket_multiplexer import ServerMultiplexedSelect,ServerMockSocket
import select
import socket
# Note: Make sure that this module is loaded after all other modules except
# of_01
def launch():
# Server side:
# - Instantiate ServerMultipexedSelect (this will create a true
# socket for the pinger)
# - override select.select with ServerMultiplexedSelect
# - override socket.socket
# - takes two params: protocol, socket type
# - if not SOCK_STREAM type, return a normal socket
# - we don't know bind address until bind() is called
# - after bind(), create true socket, create SocketDemultiplexer
# All subsequent sockets will be instantiated through accept()
mux_select = ServerMultiplexedSelect()
# Monkey patch select.select
select._old_select = select.select
select.select = mux_select.select
# Monkey patch socket.socket
socket._old_socket = socket.socket
def socket_patch(protocol, sock_type):
if sock_type == socket.SOCK_STREAM:
return ServerMockSocket(protocol, sock_type,
set_true_listen_socket=mux_select.set_true_listen_socket)
else:
socket._old_socket(protocol, sock_type)
socket.socket = socket_patch
# TODO(cs): will mux_select be GC'ed?
|
<commit_before><commit_msg>Implement monkey patching. It works!
Needed to fix a few bugs along the way:
- make sure bind() isn't called twice -- need to be careful about the order
we load POX modules
- need to actually send data in send() [even if non-blocking]
- don't assume that only MockSockets are passed into select. The client can
also pass in wrappers of MockSockets (e.g. IOWorkers, or Connections).
Therefore, need to track which sockets are ready to send according to
MockSocket.fileno()<commit_after>
|
from server_socket_multiplexer import ServerMultiplexedSelect,ServerMockSocket
import select
import socket
# Note: Make sure that this module is loaded after all other modules except
# of_01
def launch():
# Server side:
# - Instantiate ServerMultipexedSelect (this will create a true
# socket for the pinger)
# - override select.select with ServerMultiplexedSelect
# - override socket.socket
# - takes two params: protocol, socket type
# - if not SOCK_STREAM type, return a normal socket
# - we don't know bind address until bind() is called
# - after bind(), create true socket, create SocketDemultiplexer
# All subsequent sockets will be instantiated through accept()
mux_select = ServerMultiplexedSelect()
# Monkey patch select.select
select._old_select = select.select
select.select = mux_select.select
# Monkey patch socket.socket
socket._old_socket = socket.socket
def socket_patch(protocol, sock_type):
if sock_type == socket.SOCK_STREAM:
return ServerMockSocket(protocol, sock_type,
set_true_listen_socket=mux_select.set_true_listen_socket)
else:
socket._old_socket(protocol, sock_type)
socket.socket = socket_patch
# TODO(cs): will mux_select be GC'ed?
|
Implement monkey patching. It works!
Needed to fix a few bugs along the way:
- make sure bind() isn't called twice -- need to be careful about the order
we load POX modules
- need to actually send data in send() [even if non-blocking]
- don't assume that only MockSockets are passed into select. The client can
also pass in wrappers of MockSockets (e.g. IOWorkers, or Connections).
Therefore, need to track which sockets are ready to send according to
MockSocket.fileno()
from server_socket_multiplexer import ServerMultiplexedSelect,ServerMockSocket
import select
import socket
# Note: Make sure that this module is loaded after all other modules except
# of_01
def launch():
# Server side:
# - Instantiate ServerMultipexedSelect (this will create a true
# socket for the pinger)
# - override select.select with ServerMultiplexedSelect
# - override socket.socket
# - takes two params: protocol, socket type
# - if not SOCK_STREAM type, return a normal socket
# - we don't know bind address until bind() is called
# - after bind(), create true socket, create SocketDemultiplexer
# All subsequent sockets will be instantiated through accept()
mux_select = ServerMultiplexedSelect()
# Monkey patch select.select
select._old_select = select.select
select.select = mux_select.select
# Monkey patch socket.socket
socket._old_socket = socket.socket
def socket_patch(protocol, sock_type):
if sock_type == socket.SOCK_STREAM:
return ServerMockSocket(protocol, sock_type,
set_true_listen_socket=mux_select.set_true_listen_socket)
else:
socket._old_socket(protocol, sock_type)
socket.socket = socket_patch
# TODO(cs): will mux_select be GC'ed?
|
<commit_before><commit_msg>Implement monkey patching. It works!
Needed to fix a few bugs along the way:
- make sure bind() isn't called twice -- need to be careful about the order
we load POX modules
- need to actually send data in send() [even if non-blocking]
- don't assume that only MockSockets are passed into select. The client can
also pass in wrappers of MockSockets (e.g. IOWorkers, or Connections).
Therefore, need to track which sockets are ready to send according to
MockSocket.fileno()<commit_after>
from server_socket_multiplexer import ServerMultiplexedSelect,ServerMockSocket
import select
import socket
# Note: Make sure that this module is loaded after all other modules except
# of_01
def launch():
# Server side:
# - Instantiate ServerMultipexedSelect (this will create a true
# socket for the pinger)
# - override select.select with ServerMultiplexedSelect
# - override socket.socket
# - takes two params: protocol, socket type
# - if not SOCK_STREAM type, return a normal socket
# - we don't know bind address until bind() is called
# - after bind(), create true socket, create SocketDemultiplexer
# All subsequent sockets will be instantiated through accept()
mux_select = ServerMultiplexedSelect()
# Monkey patch select.select
select._old_select = select.select
select.select = mux_select.select
# Monkey patch socket.socket
socket._old_socket = socket.socket
def socket_patch(protocol, sock_type):
if sock_type == socket.SOCK_STREAM:
return ServerMockSocket(protocol, sock_type,
set_true_listen_socket=mux_select.set_true_listen_socket)
else:
socket._old_socket(protocol, sock_type)
socket.socket = socket_patch
# TODO(cs): will mux_select be GC'ed?
|
|
da95ece4472c1988ec8fe9056c8d5b91c79da322
|
server/user/changes.py
|
server/user/changes.py
|
from collections import defaultdict
from proposal.models import Change, Proposal
from proposal.views import proposal_json
from proposal.query import build_proposal_query
def find_updates(subscriptions, since):
change_summary = defaultdict(dict)
for subscription in subscriptions:
query_dict = subscription.query_dict
if not query_dict:
return None
query = build_proposal_query(query_dict)
proposals = Proposal.objects.filter(query, updated__gt=since)
if not proposals:
continue
ids = [proposal.pk for proposal in proposals]
changes = Change.objects.filter(created_at__gt=since,
proposal__in=ids)\
.order_by("created_at")
for change in changes:
if change.description:
desc = change.description
else:
desc = ""
change_summary[change.proposal_id][change.prop_path] = {
"description": desc
}
summary = []
for proposal in proposals:
if proposal.pk in change_summary:
summary.append({"proposal": proposal_json(proposal,
include_images=1,
include_documents=False),
"changes": change_summary[proposal.pk]})
return summary
|
Change summary map for generating notification emails
|
Change summary map for generating notification emails
|
Python
|
mit
|
cityofsomerville/citydash,cityofsomerville/cornerwise,cityofsomerville/citydash,codeforboston/cornerwise,cityofsomerville/cornerwise,codeforboston/cornerwise,cityofsomerville/citydash,cityofsomerville/cornerwise,codeforboston/cornerwise,cityofsomerville/citydash,cityofsomerville/cornerwise,codeforboston/cornerwise
|
Change summary map for generating notification emails
|
from collections import defaultdict
from proposal.models import Change, Proposal
from proposal.views import proposal_json
from proposal.query import build_proposal_query
def find_updates(subscriptions, since):
change_summary = defaultdict(dict)
for subscription in subscriptions:
query_dict = subscription.query_dict
if not query_dict:
return None
query = build_proposal_query(query_dict)
proposals = Proposal.objects.filter(query, updated__gt=since)
if not proposals:
continue
ids = [proposal.pk for proposal in proposals]
changes = Change.objects.filter(created_at__gt=since,
proposal__in=ids)\
.order_by("created_at")
for change in changes:
if change.description:
desc = change.description
else:
desc = ""
change_summary[change.proposal_id][change.prop_path] = {
"description": desc
}
summary = []
for proposal in proposals:
if proposal.pk in change_summary:
summary.append({"proposal": proposal_json(proposal,
include_images=1,
include_documents=False),
"changes": change_summary[proposal.pk]})
return summary
|
<commit_before><commit_msg>Change summary map for generating notification emails<commit_after>
|
from collections import defaultdict
from proposal.models import Change, Proposal
from proposal.views import proposal_json
from proposal.query import build_proposal_query
def find_updates(subscriptions, since):
change_summary = defaultdict(dict)
for subscription in subscriptions:
query_dict = subscription.query_dict
if not query_dict:
return None
query = build_proposal_query(query_dict)
proposals = Proposal.objects.filter(query, updated__gt=since)
if not proposals:
continue
ids = [proposal.pk for proposal in proposals]
changes = Change.objects.filter(created_at__gt=since,
proposal__in=ids)\
.order_by("created_at")
for change in changes:
if change.description:
desc = change.description
else:
desc = ""
change_summary[change.proposal_id][change.prop_path] = {
"description": desc
}
summary = []
for proposal in proposals:
if proposal.pk in change_summary:
summary.append({"proposal": proposal_json(proposal,
include_images=1,
include_documents=False),
"changes": change_summary[proposal.pk]})
return summary
|
Change summary map for generating notification emailsfrom collections import defaultdict
from proposal.models import Change, Proposal
from proposal.views import proposal_json
from proposal.query import build_proposal_query
def find_updates(subscriptions, since):
change_summary = defaultdict(dict)
for subscription in subscriptions:
query_dict = subscription.query_dict
if not query_dict:
return None
query = build_proposal_query(query_dict)
proposals = Proposal.objects.filter(query, updated__gt=since)
if not proposals:
continue
ids = [proposal.pk for proposal in proposals]
changes = Change.objects.filter(created_at__gt=since,
proposal__in=ids)\
.order_by("created_at")
for change in changes:
if change.description:
desc = change.description
else:
desc = ""
change_summary[change.proposal_id][change.prop_path] = {
"description": desc
}
summary = []
for proposal in proposals:
if proposal.pk in change_summary:
summary.append({"proposal": proposal_json(proposal,
include_images=1,
include_documents=False),
"changes": change_summary[proposal.pk]})
return summary
|
<commit_before><commit_msg>Change summary map for generating notification emails<commit_after>from collections import defaultdict
from proposal.models import Change, Proposal
from proposal.views import proposal_json
from proposal.query import build_proposal_query
def find_updates(subscriptions, since):
change_summary = defaultdict(dict)
for subscription in subscriptions:
query_dict = subscription.query_dict
if not query_dict:
return None
query = build_proposal_query(query_dict)
proposals = Proposal.objects.filter(query, updated__gt=since)
if not proposals:
continue
ids = [proposal.pk for proposal in proposals]
changes = Change.objects.filter(created_at__gt=since,
proposal__in=ids)\
.order_by("created_at")
for change in changes:
if change.description:
desc = change.description
else:
desc = ""
change_summary[change.proposal_id][change.prop_path] = {
"description": desc
}
summary = []
for proposal in proposals:
if proposal.pk in change_summary:
summary.append({"proposal": proposal_json(proposal,
include_images=1,
include_documents=False),
"changes": change_summary[proposal.pk]})
return summary
|
|
e793c88f68429718c8293225c7099dcb31635d3d
|
accelerator/migrations/0022_add_meeting_info_on_office_hour_model.py
|
accelerator/migrations/0022_add_meeting_info_on_office_hour_model.py
|
# Generated by Django 2.2.10 on 2020-05-21 18:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0021_add_nullable_startup_and_program_fields'),
]
operations = [
migrations.AddField(
model_name='mentorprogramofficehour',
name='meeting_info',
field=models.CharField(blank=True, default='', max_length=100),
),
]
|
Add a migration to add meeting_info
|
[AC-7818] Add a migration to add meeting_info
|
Python
|
mit
|
masschallenge/django-accelerator,masschallenge/django-accelerator
|
[AC-7818] Add a migration to add meeting_info
|
# Generated by Django 2.2.10 on 2020-05-21 18:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0021_add_nullable_startup_and_program_fields'),
]
operations = [
migrations.AddField(
model_name='mentorprogramofficehour',
name='meeting_info',
field=models.CharField(blank=True, default='', max_length=100),
),
]
|
<commit_before><commit_msg>[AC-7818] Add a migration to add meeting_info<commit_after>
|
# Generated by Django 2.2.10 on 2020-05-21 18:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0021_add_nullable_startup_and_program_fields'),
]
operations = [
migrations.AddField(
model_name='mentorprogramofficehour',
name='meeting_info',
field=models.CharField(blank=True, default='', max_length=100),
),
]
|
[AC-7818] Add a migration to add meeting_info# Generated by Django 2.2.10 on 2020-05-21 18:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0021_add_nullable_startup_and_program_fields'),
]
operations = [
migrations.AddField(
model_name='mentorprogramofficehour',
name='meeting_info',
field=models.CharField(blank=True, default='', max_length=100),
),
]
|
<commit_before><commit_msg>[AC-7818] Add a migration to add meeting_info<commit_after># Generated by Django 2.2.10 on 2020-05-21 18:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0021_add_nullable_startup_and_program_fields'),
]
operations = [
migrations.AddField(
model_name='mentorprogramofficehour',
name='meeting_info',
field=models.CharField(blank=True, default='', max_length=100),
),
]
|
|
8c8dd6930a7e9ae983dbc28a095767157ced70bd
|
src/conditionals/exercise6.py
|
src/conditionals/exercise6.py
|
# Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message. If the
# score is between 0.0 and 1.0, print a grade using the following table
score = raw_input('Type the score: ')
try:
score = float(score)
except:
print 'Bad Score'
if( score >=0.90 ):
print 'A'
elif( score >=0.80 ):
print 'B'
elif( score >=0.70 ):
print 'C'
elif( score >=0.60 ):
print 'D'
elif( score < 0.60 ):
print 'F'
else:
print 'Bad Score'
exit()
|
Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message. If the score is between 0.0 and 1.0, print a grade using the following table
|
Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message. If the
score is between 0.0 and 1.0, print a grade using the following table
|
Python
|
mit
|
let42/python-course
|
Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message. If the
score is between 0.0 and 1.0, print a grade using the following table
|
# Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message. If the
# score is between 0.0 and 1.0, print a grade using the following table
score = raw_input('Type the score: ')
try:
score = float(score)
except:
print 'Bad Score'
if( score >=0.90 ):
print 'A'
elif( score >=0.80 ):
print 'B'
elif( score >=0.70 ):
print 'C'
elif( score >=0.60 ):
print 'D'
elif( score < 0.60 ):
print 'F'
else:
print 'Bad Score'
exit()
|
<commit_before><commit_msg> Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message. If the
score is between 0.0 and 1.0, print a grade using the following table<commit_after>
|
# Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message. If the
# score is between 0.0 and 1.0, print a grade using the following table
score = raw_input('Type the score: ')
try:
score = float(score)
except:
print 'Bad Score'
if( score >=0.90 ):
print 'A'
elif( score >=0.80 ):
print 'B'
elif( score >=0.70 ):
print 'C'
elif( score >=0.60 ):
print 'D'
elif( score < 0.60 ):
print 'F'
else:
print 'Bad Score'
exit()
|
Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message. If the
score is between 0.0 and 1.0, print a grade using the following table# Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message. If the
# score is between 0.0 and 1.0, print a grade using the following table
score = raw_input('Type the score: ')
try:
score = float(score)
except:
print 'Bad Score'
if( score >=0.90 ):
print 'A'
elif( score >=0.80 ):
print 'B'
elif( score >=0.70 ):
print 'C'
elif( score >=0.60 ):
print 'D'
elif( score < 0.60 ):
print 'F'
else:
print 'Bad Score'
exit()
|
<commit_before><commit_msg> Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message. If the
score is between 0.0 and 1.0, print a grade using the following table<commit_after># Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message. If the
# score is between 0.0 and 1.0, print a grade using the following table
score = raw_input('Type the score: ')
try:
score = float(score)
except:
print 'Bad Score'
if( score >=0.90 ):
print 'A'
elif( score >=0.80 ):
print 'B'
elif( score >=0.70 ):
print 'C'
elif( score >=0.60 ):
print 'D'
elif( score < 0.60 ):
print 'F'
else:
print 'Bad Score'
exit()
|
|
19ff1e1999a5132ba7dd84baf446023870620ee9
|
python/download_fungi_genomes_refseq.py
|
python/download_fungi_genomes_refseq.py
|
#!/usr/bin/env python
from __future__ import print_function
import ftplib
import sys
genome_list_fh = open("downloaded_genomes.txt","w")
ftp = ftplib.FTP("ftp.ncbi.nlm.nih.gov")
ftp.connect()
ftp.login()
base_path="/genomes/refseq/fungi"
ftp.cwd(base_path)
#Get species
species_list = ftp.nlst()
#for each species
for species in species_list:
print("Species: {}".format(species))
species_path = base_path + "/{}/latest_assembly_versions".format(species)
#Go into species assembly directory
try:
ftp.cwd(species_path)
except Exception as e:
print("+ Cannot go into dir {}".format(species_path),file=sys.stderr)
print(e,file=sys.stderr)
continue
#Get assemblies
assemblies = ftp.nlst()
for asm in assemblies:
print("Entering assembly {}".format(asm))
try:
ftp.cwd(species_path + "/" + asm)
except Exception as e:
print("+ Cannot enter dir {}".format(species_path+"/"+asm),file=sys.stderr)
print(e,file=sys.stderr)
continue
asm_file_list = ftp.nlst()
files_to_dl = [f for f in asm_file_list if (f.endswith("genomic.fna.gz")) and ("cds_from_genomic" not in f) and ("rna_from_genomic" not in f)]
if not files_to_dl:
#Try downloading the cds
files_to_dl = [f for f in asm_file_list if (f.endswith("cds_from_genomic.fna.gz")) ]
for filename in files_to_dl:
print("Downloading file {}".format(filename))
print('{}\t{}\t{}'.format(species,asm,filename),file=genome_list_fh)
try:
with open(filename,"wb") as fh:
ftp.retrbinary("RETR {}".format(filename), fh.write, 8*1024)
except Exception as e:
print(">Error downloading {}".format(asm_filename),file=sys.stderr)
print(e,file=sys.stderr)
ftp.close()
|
Add script to download refseq fungal genomes via FTP
|
Add script to download refseq fungal genomes via FTP
|
Python
|
apache-2.0
|
maubarsom/biotico-tools,maubarsom/biotico-tools,maubarsom/biotico-tools,maubarsom/biotico-tools,maubarsom/biotico-tools
|
Add script to download refseq fungal genomes via FTP
|
#!/usr/bin/env python
from __future__ import print_function
import ftplib
import sys
genome_list_fh = open("downloaded_genomes.txt","w")
ftp = ftplib.FTP("ftp.ncbi.nlm.nih.gov")
ftp.connect()
ftp.login()
base_path="/genomes/refseq/fungi"
ftp.cwd(base_path)
#Get species
species_list = ftp.nlst()
#for each species
for species in species_list:
print("Species: {}".format(species))
species_path = base_path + "/{}/latest_assembly_versions".format(species)
#Go into species assembly directory
try:
ftp.cwd(species_path)
except Exception as e:
print("+ Cannot go into dir {}".format(species_path),file=sys.stderr)
print(e,file=sys.stderr)
continue
#Get assemblies
assemblies = ftp.nlst()
for asm in assemblies:
print("Entering assembly {}".format(asm))
try:
ftp.cwd(species_path + "/" + asm)
except Exception as e:
print("+ Cannot enter dir {}".format(species_path+"/"+asm),file=sys.stderr)
print(e,file=sys.stderr)
continue
asm_file_list = ftp.nlst()
files_to_dl = [f for f in asm_file_list if (f.endswith("genomic.fna.gz")) and ("cds_from_genomic" not in f) and ("rna_from_genomic" not in f)]
if not files_to_dl:
#Try downloading the cds
files_to_dl = [f for f in asm_file_list if (f.endswith("cds_from_genomic.fna.gz")) ]
for filename in files_to_dl:
print("Downloading file {}".format(filename))
print('{}\t{}\t{}'.format(species,asm,filename),file=genome_list_fh)
try:
with open(filename,"wb") as fh:
ftp.retrbinary("RETR {}".format(filename), fh.write, 8*1024)
except Exception as e:
print(">Error downloading {}".format(asm_filename),file=sys.stderr)
print(e,file=sys.stderr)
ftp.close()
|
<commit_before><commit_msg>Add script to download refseq fungal genomes via FTP<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
import ftplib
import sys
genome_list_fh = open("downloaded_genomes.txt","w")
ftp = ftplib.FTP("ftp.ncbi.nlm.nih.gov")
ftp.connect()
ftp.login()
base_path="/genomes/refseq/fungi"
ftp.cwd(base_path)
#Get species
species_list = ftp.nlst()
#for each species
for species in species_list:
print("Species: {}".format(species))
species_path = base_path + "/{}/latest_assembly_versions".format(species)
#Go into species assembly directory
try:
ftp.cwd(species_path)
except Exception as e:
print("+ Cannot go into dir {}".format(species_path),file=sys.stderr)
print(e,file=sys.stderr)
continue
#Get assemblies
assemblies = ftp.nlst()
for asm in assemblies:
print("Entering assembly {}".format(asm))
try:
ftp.cwd(species_path + "/" + asm)
except Exception as e:
print("+ Cannot enter dir {}".format(species_path+"/"+asm),file=sys.stderr)
print(e,file=sys.stderr)
continue
asm_file_list = ftp.nlst()
files_to_dl = [f for f in asm_file_list if (f.endswith("genomic.fna.gz")) and ("cds_from_genomic" not in f) and ("rna_from_genomic" not in f)]
if not files_to_dl:
#Try downloading the cds
files_to_dl = [f for f in asm_file_list if (f.endswith("cds_from_genomic.fna.gz")) ]
for filename in files_to_dl:
print("Downloading file {}".format(filename))
print('{}\t{}\t{}'.format(species,asm,filename),file=genome_list_fh)
try:
with open(filename,"wb") as fh:
ftp.retrbinary("RETR {}".format(filename), fh.write, 8*1024)
except Exception as e:
print(">Error downloading {}".format(asm_filename),file=sys.stderr)
print(e,file=sys.stderr)
ftp.close()
|
Add script to download refseq fungal genomes via FTP#!/usr/bin/env python
from __future__ import print_function
import ftplib
import sys
genome_list_fh = open("downloaded_genomes.txt","w")
ftp = ftplib.FTP("ftp.ncbi.nlm.nih.gov")
ftp.connect()
ftp.login()
base_path="/genomes/refseq/fungi"
ftp.cwd(base_path)
#Get species
species_list = ftp.nlst()
#for each species
for species in species_list:
print("Species: {}".format(species))
species_path = base_path + "/{}/latest_assembly_versions".format(species)
#Go into species assembly directory
try:
ftp.cwd(species_path)
except Exception as e:
print("+ Cannot go into dir {}".format(species_path),file=sys.stderr)
print(e,file=sys.stderr)
continue
#Get assemblies
assemblies = ftp.nlst()
for asm in assemblies:
print("Entering assembly {}".format(asm))
try:
ftp.cwd(species_path + "/" + asm)
except Exception as e:
print("+ Cannot enter dir {}".format(species_path+"/"+asm),file=sys.stderr)
print(e,file=sys.stderr)
continue
asm_file_list = ftp.nlst()
files_to_dl = [f for f in asm_file_list if (f.endswith("genomic.fna.gz")) and ("cds_from_genomic" not in f) and ("rna_from_genomic" not in f)]
if not files_to_dl:
#Try downloading the cds
files_to_dl = [f for f in asm_file_list if (f.endswith("cds_from_genomic.fna.gz")) ]
for filename in files_to_dl:
print("Downloading file {}".format(filename))
print('{}\t{}\t{}'.format(species,asm,filename),file=genome_list_fh)
try:
with open(filename,"wb") as fh:
ftp.retrbinary("RETR {}".format(filename), fh.write, 8*1024)
except Exception as e:
print(">Error downloading {}".format(asm_filename),file=sys.stderr)
print(e,file=sys.stderr)
ftp.close()
|
<commit_before><commit_msg>Add script to download refseq fungal genomes via FTP<commit_after>#!/usr/bin/env python
from __future__ import print_function
import ftplib
import sys
genome_list_fh = open("downloaded_genomes.txt","w")
ftp = ftplib.FTP("ftp.ncbi.nlm.nih.gov")
ftp.connect()
ftp.login()
base_path="/genomes/refseq/fungi"
ftp.cwd(base_path)
#Get species
species_list = ftp.nlst()
#for each species
for species in species_list:
print("Species: {}".format(species))
species_path = base_path + "/{}/latest_assembly_versions".format(species)
#Go into species assembly directory
try:
ftp.cwd(species_path)
except Exception as e:
print("+ Cannot go into dir {}".format(species_path),file=sys.stderr)
print(e,file=sys.stderr)
continue
#Get assemblies
assemblies = ftp.nlst()
for asm in assemblies:
print("Entering assembly {}".format(asm))
try:
ftp.cwd(species_path + "/" + asm)
except Exception as e:
print("+ Cannot enter dir {}".format(species_path+"/"+asm),file=sys.stderr)
print(e,file=sys.stderr)
continue
asm_file_list = ftp.nlst()
files_to_dl = [f for f in asm_file_list if (f.endswith("genomic.fna.gz")) and ("cds_from_genomic" not in f) and ("rna_from_genomic" not in f)]
if not files_to_dl:
#Try downloading the cds
files_to_dl = [f for f in asm_file_list if (f.endswith("cds_from_genomic.fna.gz")) ]
for filename in files_to_dl:
print("Downloading file {}".format(filename))
print('{}\t{}\t{}'.format(species,asm,filename),file=genome_list_fh)
try:
with open(filename,"wb") as fh:
ftp.retrbinary("RETR {}".format(filename), fh.write, 8*1024)
except Exception as e:
print(">Error downloading {}".format(asm_filename),file=sys.stderr)
print(e,file=sys.stderr)
ftp.close()
|
|
23bc365763b0d07d80015b72d43e12a4d0d91bc7
|
keylime/migrations/versions/8da20383f6e1_extend_ip_field.py
|
keylime/migrations/versions/8da20383f6e1_extend_ip_field.py
|
"""extend_ip_field
Revision ID: 8da20383f6e1
Revises: eeb702f77d7d
Create Date: 2021-01-14 10:50:56.275257
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8da20383f6e1'
down_revision = 'eeb702f77d7d'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_registrar():
pass
def downgrade_registrar():
pass
def upgrade_cloud_verifier():
with op.batch_alter_table('verifiermain') as batch_op:
batch_op.alter_column('ip', existing_type=sa.String(length=15),
type_=sa.String(length=255), existing_nullable=True)
def downgrade_cloud_verifier():
pass
|
Extend capacity of verifiermain.ip field
|
Extend capacity of verifiermain.ip field
Add migration script to extend the ip field capacity to hold up
to 255 characters.
Resolves: #498
Signed-off-by: Kaifeng Wang <5bfbeb450d06b1aa9e4417d3c7676f1231c80181@gmail.com>
|
Python
|
bsd-2-clause
|
mit-ll/python-keylime,mit-ll/python-keylime,mit-ll/python-keylime,mit-ll/python-keylime
|
Extend capacity of verifiermain.ip field
Add migration script to extend the ip field capacity to hold up
to 255 characters.
Resolves: #498
Signed-off-by: Kaifeng Wang <5bfbeb450d06b1aa9e4417d3c7676f1231c80181@gmail.com>
|
"""extend_ip_field
Revision ID: 8da20383f6e1
Revises: eeb702f77d7d
Create Date: 2021-01-14 10:50:56.275257
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8da20383f6e1'
down_revision = 'eeb702f77d7d'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_registrar():
pass
def downgrade_registrar():
pass
def upgrade_cloud_verifier():
with op.batch_alter_table('verifiermain') as batch_op:
batch_op.alter_column('ip', existing_type=sa.String(length=15),
type_=sa.String(length=255), existing_nullable=True)
def downgrade_cloud_verifier():
pass
|
<commit_before><commit_msg>Extend capacity of verifiermain.ip field
Add migration script to extend the ip field capacity to hold up
to 255 characters.
Resolves: #498
Signed-off-by: Kaifeng Wang <5bfbeb450d06b1aa9e4417d3c7676f1231c80181@gmail.com><commit_after>
|
"""extend_ip_field
Revision ID: 8da20383f6e1
Revises: eeb702f77d7d
Create Date: 2021-01-14 10:50:56.275257
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8da20383f6e1'
down_revision = 'eeb702f77d7d'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_registrar():
pass
def downgrade_registrar():
pass
def upgrade_cloud_verifier():
with op.batch_alter_table('verifiermain') as batch_op:
batch_op.alter_column('ip', existing_type=sa.String(length=15),
type_=sa.String(length=255), existing_nullable=True)
def downgrade_cloud_verifier():
pass
|
Extend capacity of verifiermain.ip field
Add migration script to extend the ip field capacity to hold up
to 255 characters.
Resolves: #498
Signed-off-by: Kaifeng Wang <5bfbeb450d06b1aa9e4417d3c7676f1231c80181@gmail.com>"""extend_ip_field
Revision ID: 8da20383f6e1
Revises: eeb702f77d7d
Create Date: 2021-01-14 10:50:56.275257
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8da20383f6e1'
down_revision = 'eeb702f77d7d'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_registrar():
pass
def downgrade_registrar():
pass
def upgrade_cloud_verifier():
with op.batch_alter_table('verifiermain') as batch_op:
batch_op.alter_column('ip', existing_type=sa.String(length=15),
type_=sa.String(length=255), existing_nullable=True)
def downgrade_cloud_verifier():
pass
|
<commit_before><commit_msg>Extend capacity of verifiermain.ip field
Add migration script to extend the ip field capacity to hold up
to 255 characters.
Resolves: #498
Signed-off-by: Kaifeng Wang <5bfbeb450d06b1aa9e4417d3c7676f1231c80181@gmail.com><commit_after>"""extend_ip_field
Revision ID: 8da20383f6e1
Revises: eeb702f77d7d
Create Date: 2021-01-14 10:50:56.275257
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8da20383f6e1'
down_revision = 'eeb702f77d7d'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_registrar():
pass
def downgrade_registrar():
pass
def upgrade_cloud_verifier():
with op.batch_alter_table('verifiermain') as batch_op:
batch_op.alter_column('ip', existing_type=sa.String(length=15),
type_=sa.String(length=255), existing_nullable=True)
def downgrade_cloud_verifier():
pass
|
|
229dd6c82e38237e0948b9e5091647990fc11af4
|
src/ggrc/migrations/versions/20170130060409_57940269e30_migrate_requests_change_logs.py
|
src/ggrc/migrations/versions/20170130060409_57940269e30_migrate_requests_change_logs.py
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Migrate requests change logs
Create Date: 2017-01-30 06:04:09.538516
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '57940269e30'
down_revision = '562ec606ff7c'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("""
update revisions
join requests on revisions.resource_type = 'Request' and
revisions.resource_id = requests.id
join assessments on requests.slug = assessments.slug
set resource_type = 'Assessment',
resource_id = assessments.id
""")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
|
Migrate Request revisions to Assessment revisions
|
Migrate Request revisions to Assessment revisions
|
Python
|
apache-2.0
|
plamut/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,plamut/ggrc-core
|
Migrate Request revisions to Assessment revisions
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Migrate requests change logs
Create Date: 2017-01-30 06:04:09.538516
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '57940269e30'
down_revision = '562ec606ff7c'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("""
update revisions
join requests on revisions.resource_type = 'Request' and
revisions.resource_id = requests.id
join assessments on requests.slug = assessments.slug
set resource_type = 'Assessment',
resource_id = assessments.id
""")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
|
<commit_before><commit_msg>Migrate Request revisions to Assessment revisions<commit_after>
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Migrate requests change logs
Create Date: 2017-01-30 06:04:09.538516
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '57940269e30'
down_revision = '562ec606ff7c'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("""
update revisions
join requests on revisions.resource_type = 'Request' and
revisions.resource_id = requests.id
join assessments on requests.slug = assessments.slug
set resource_type = 'Assessment',
resource_id = assessments.id
""")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
|
Migrate Request revisions to Assessment revisions# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Migrate requests change logs
Create Date: 2017-01-30 06:04:09.538516
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '57940269e30'
down_revision = '562ec606ff7c'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("""
update revisions
join requests on revisions.resource_type = 'Request' and
revisions.resource_id = requests.id
join assessments on requests.slug = assessments.slug
set resource_type = 'Assessment',
resource_id = assessments.id
""")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
|
<commit_before><commit_msg>Migrate Request revisions to Assessment revisions<commit_after># Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Migrate requests change logs
Create Date: 2017-01-30 06:04:09.538516
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '57940269e30'
down_revision = '562ec606ff7c'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("""
update revisions
join requests on revisions.resource_type = 'Request' and
revisions.resource_id = requests.id
join assessments on requests.slug = assessments.slug
set resource_type = 'Assessment',
resource_id = assessments.id
""")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
|
|
da0a626ca6e05d918381653d7c219300d2e12ece
|
mosqito/tests/loudness/test_loudness_ecma.py
|
mosqito/tests/loudness/test_loudness_ecma.py
|
# -*- coding: utf-8 -*-
# Third party imports
import pytest
from numpy import mean
# Local application imports
from mosqito.functions.hearing_model.sine_wave_generator import sine_wave_generator
from mosqito.functions.hearing_model.comp_loudness import comp_loudness
from mosqito.functions.hearing_model.sone2phone import sone2phone
@pytest.mark.loudness_ecma # to skip or run only loudness ecma tests
def test_loudness_ecma():
"""Test function for the Loudness_ecma calculation
Parameters
----------
None
Outputs
-------
None
"""
signal, samples = sine_wave_generator(
fs=48000,
t=1,
spl_value=60,
freq=1000,
)
t_array = comp_loudness(signal, validation=False)[1]
mean_loudness_value = mean(t_array[:, 0])
phon_loudness_value = sone2phone(mean_loudness_value)
assert phon_loudness_value < 60.1 and phon_loudness_value > 59.9
# test de la fonction
if __name__ == "__main__":
test_loudness_ecma()
|
Test for ECMA loudness computation
|
[NF] Test for ECMA loudness computation
|
Python
|
apache-2.0
|
Eomys/MoSQITo
|
[NF] Test for ECMA loudness computation
|
# -*- coding: utf-8 -*-
# Third party imports
import pytest
from numpy import mean
# Local application imports
from mosqito.functions.hearing_model.sine_wave_generator import sine_wave_generator
from mosqito.functions.hearing_model.comp_loudness import comp_loudness
from mosqito.functions.hearing_model.sone2phone import sone2phone
@pytest.mark.loudness_ecma # to skip or run only loudness ecma tests
def test_loudness_ecma():
"""Test function for the Loudness_ecma calculation
Parameters
----------
None
Outputs
-------
None
"""
signal, samples = sine_wave_generator(
fs=48000,
t=1,
spl_value=60,
freq=1000,
)
t_array = comp_loudness(signal, validation=False)[1]
mean_loudness_value = mean(t_array[:, 0])
phon_loudness_value = sone2phone(mean_loudness_value)
assert phon_loudness_value < 60.1 and phon_loudness_value > 59.9
# test de la fonction
if __name__ == "__main__":
test_loudness_ecma()
|
<commit_before><commit_msg>[NF] Test for ECMA loudness computation<commit_after>
|
# -*- coding: utf-8 -*-
# Third party imports
import pytest
from numpy import mean
# Local application imports
from mosqito.functions.hearing_model.sine_wave_generator import sine_wave_generator
from mosqito.functions.hearing_model.comp_loudness import comp_loudness
from mosqito.functions.hearing_model.sone2phone import sone2phone
@pytest.mark.loudness_ecma # to skip or run only loudness ecma tests
def test_loudness_ecma():
"""Test function for the Loudness_ecma calculation
Parameters
----------
None
Outputs
-------
None
"""
signal, samples = sine_wave_generator(
fs=48000,
t=1,
spl_value=60,
freq=1000,
)
t_array = comp_loudness(signal, validation=False)[1]
mean_loudness_value = mean(t_array[:, 0])
phon_loudness_value = sone2phone(mean_loudness_value)
assert phon_loudness_value < 60.1 and phon_loudness_value > 59.9
# test de la fonction
if __name__ == "__main__":
test_loudness_ecma()
|
[NF] Test for ECMA loudness computation# -*- coding: utf-8 -*-
# Third party imports
import pytest
from numpy import mean
# Local application imports
from mosqito.functions.hearing_model.sine_wave_generator import sine_wave_generator
from mosqito.functions.hearing_model.comp_loudness import comp_loudness
from mosqito.functions.hearing_model.sone2phone import sone2phone
@pytest.mark.loudness_ecma # to skip or run only loudness ecma tests
def test_loudness_ecma():
"""Test function for the Loudness_ecma calculation
Parameters
----------
None
Outputs
-------
None
"""
signal, samples = sine_wave_generator(
fs=48000,
t=1,
spl_value=60,
freq=1000,
)
t_array = comp_loudness(signal, validation=False)[1]
mean_loudness_value = mean(t_array[:, 0])
phon_loudness_value = sone2phone(mean_loudness_value)
assert phon_loudness_value < 60.1 and phon_loudness_value > 59.9
# test de la fonction
if __name__ == "__main__":
test_loudness_ecma()
|
<commit_before><commit_msg>[NF] Test for ECMA loudness computation<commit_after># -*- coding: utf-8 -*-
# Third party imports
import pytest
from numpy import mean
# Local application imports
from mosqito.functions.hearing_model.sine_wave_generator import sine_wave_generator
from mosqito.functions.hearing_model.comp_loudness import comp_loudness
from mosqito.functions.hearing_model.sone2phone import sone2phone
@pytest.mark.loudness_ecma # to skip or run only loudness ecma tests
def test_loudness_ecma():
"""Test function for the Loudness_ecma calculation
Parameters
----------
None
Outputs
-------
None
"""
signal, samples = sine_wave_generator(
fs=48000,
t=1,
spl_value=60,
freq=1000,
)
t_array = comp_loudness(signal, validation=False)[1]
mean_loudness_value = mean(t_array[:, 0])
phon_loudness_value = sone2phone(mean_loudness_value)
assert phon_loudness_value < 60.1 and phon_loudness_value > 59.9
# test de la fonction
if __name__ == "__main__":
test_loudness_ecma()
|
|
9564848e61df038396fbab95995a40f2e5a5970a
|
l10n_br_zip/__openerp__.py
|
l10n_br_zip/__openerp__.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localisation ZIP Codes',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'version': '8.0.1.0.1',
'depends': [
'l10n_br_base',
],
'data': [
'views/l10n_br_zip_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/res_bank_view.xml',
'wizard/l10n_br_zip_search_view.xml',
'security/ir.model.access.csv',
],
'test': ['test/zip_demo.yml'],
'category': 'Localization',
'installable': False,
}
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localisation ZIP Codes',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'version': '9.0.1.0.0',
'depends': [
'l10n_br_base',
],
'data': [
'views/l10n_br_zip_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/res_bank_view.xml',
'wizard/l10n_br_zip_search_view.xml',
'security/ir.model.access.csv',
],
'test': [
'test/zip_demo.yml'
],
'category': 'Localization',
'installable': True,
}
|
Change the version of module.
|
[MIG] Change the version of module.
|
Python
|
agpl-3.0
|
kmee/l10n-brazil,kmee/l10n-brazil
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localisation ZIP Codes',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'version': '8.0.1.0.1',
'depends': [
'l10n_br_base',
],
'data': [
'views/l10n_br_zip_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/res_bank_view.xml',
'wizard/l10n_br_zip_search_view.xml',
'security/ir.model.access.csv',
],
'test': ['test/zip_demo.yml'],
'category': 'Localization',
'installable': False,
}
[MIG] Change the version of module.
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localisation ZIP Codes',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'version': '9.0.1.0.0',
'depends': [
'l10n_br_base',
],
'data': [
'views/l10n_br_zip_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/res_bank_view.xml',
'wizard/l10n_br_zip_search_view.xml',
'security/ir.model.access.csv',
],
'test': [
'test/zip_demo.yml'
],
'category': 'Localization',
'installable': True,
}
|
<commit_before># -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localisation ZIP Codes',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'version': '8.0.1.0.1',
'depends': [
'l10n_br_base',
],
'data': [
'views/l10n_br_zip_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/res_bank_view.xml',
'wizard/l10n_br_zip_search_view.xml',
'security/ir.model.access.csv',
],
'test': ['test/zip_demo.yml'],
'category': 'Localization',
'installable': False,
}
<commit_msg>[MIG] Change the version of module.<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localisation ZIP Codes',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'version': '9.0.1.0.0',
'depends': [
'l10n_br_base',
],
'data': [
'views/l10n_br_zip_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/res_bank_view.xml',
'wizard/l10n_br_zip_search_view.xml',
'security/ir.model.access.csv',
],
'test': [
'test/zip_demo.yml'
],
'category': 'Localization',
'installable': True,
}
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localisation ZIP Codes',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'version': '8.0.1.0.1',
'depends': [
'l10n_br_base',
],
'data': [
'views/l10n_br_zip_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/res_bank_view.xml',
'wizard/l10n_br_zip_search_view.xml',
'security/ir.model.access.csv',
],
'test': ['test/zip_demo.yml'],
'category': 'Localization',
'installable': False,
}
[MIG] Change the version of module.# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localisation ZIP Codes',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'version': '9.0.1.0.0',
'depends': [
'l10n_br_base',
],
'data': [
'views/l10n_br_zip_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/res_bank_view.xml',
'wizard/l10n_br_zip_search_view.xml',
'security/ir.model.access.csv',
],
'test': [
'test/zip_demo.yml'
],
'category': 'Localization',
'installable': True,
}
|
<commit_before># -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localisation ZIP Codes',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'version': '8.0.1.0.1',
'depends': [
'l10n_br_base',
],
'data': [
'views/l10n_br_zip_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/res_bank_view.xml',
'wizard/l10n_br_zip_search_view.xml',
'security/ir.model.access.csv',
],
'test': ['test/zip_demo.yml'],
'category': 'Localization',
'installable': False,
}
<commit_msg>[MIG] Change the version of module.<commit_after># -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localisation ZIP Codes',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'version': '9.0.1.0.0',
'depends': [
'l10n_br_base',
],
'data': [
'views/l10n_br_zip_view.xml',
'views/res_partner_view.xml',
'views/res_company_view.xml',
'views/res_bank_view.xml',
'wizard/l10n_br_zip_search_view.xml',
'security/ir.model.access.csv',
],
'test': [
'test/zip_demo.yml'
],
'category': 'Localization',
'installable': True,
}
|
5190338818e6eac093525bc2b188070105ce7891
|
django_project/localities/tests/test_managementcommands.py
|
django_project/localities/tests/test_managementcommands.py
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.core.management import call_command
from django.core.management.base import CommandError
from .model_factories import AttributeF, DomainSpecification3AF
from ..models import Locality, Value
class TestManagementCommands(TestCase):
def test_import_csv(self):
attr1 = AttributeF.create(key='name')
attr2 = AttributeF.create(key='url')
attr3 = AttributeF.create(key='services')
DomainSpecification3AF.create(
name='Test', spec1__attribute=attr1, spec2__attribute=attr2,
spec3__attribute=attr3
)
call_command(
'import_csv', 'Test', 'test_imp',
'./localities/tests/test_data/test_csv_import_ok.tsv',
'./localities/tests/test_data/test_csv_import_map.json',
use_tabs=True
)
self.assertEqual(Locality.objects.count(), 3)
self.assertEqual(Value.objects.count(), 8)
def test_import_csv_bad_arguments(self):
self.assertRaises(
CommandError, call_command, 'import_csv', 'Test', 'test_imp'
)
|
Add tests for Locality management commands
|
Add tests for Locality management commands
|
Python
|
bsd-2-clause
|
ismailsunni/healthsites,ismailsunni/healthsites,ismailsunni/healthsites,ismailsunni/healthsites
|
Add tests for Locality management commands
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.core.management import call_command
from django.core.management.base import CommandError
from .model_factories import AttributeF, DomainSpecification3AF
from ..models import Locality, Value
class TestManagementCommands(TestCase):
def test_import_csv(self):
attr1 = AttributeF.create(key='name')
attr2 = AttributeF.create(key='url')
attr3 = AttributeF.create(key='services')
DomainSpecification3AF.create(
name='Test', spec1__attribute=attr1, spec2__attribute=attr2,
spec3__attribute=attr3
)
call_command(
'import_csv', 'Test', 'test_imp',
'./localities/tests/test_data/test_csv_import_ok.tsv',
'./localities/tests/test_data/test_csv_import_map.json',
use_tabs=True
)
self.assertEqual(Locality.objects.count(), 3)
self.assertEqual(Value.objects.count(), 8)
def test_import_csv_bad_arguments(self):
self.assertRaises(
CommandError, call_command, 'import_csv', 'Test', 'test_imp'
)
|
<commit_before><commit_msg>Add tests for Locality management commands<commit_after>
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.core.management import call_command
from django.core.management.base import CommandError
from .model_factories import AttributeF, DomainSpecification3AF
from ..models import Locality, Value
class TestManagementCommands(TestCase):
def test_import_csv(self):
attr1 = AttributeF.create(key='name')
attr2 = AttributeF.create(key='url')
attr3 = AttributeF.create(key='services')
DomainSpecification3AF.create(
name='Test', spec1__attribute=attr1, spec2__attribute=attr2,
spec3__attribute=attr3
)
call_command(
'import_csv', 'Test', 'test_imp',
'./localities/tests/test_data/test_csv_import_ok.tsv',
'./localities/tests/test_data/test_csv_import_map.json',
use_tabs=True
)
self.assertEqual(Locality.objects.count(), 3)
self.assertEqual(Value.objects.count(), 8)
def test_import_csv_bad_arguments(self):
self.assertRaises(
CommandError, call_command, 'import_csv', 'Test', 'test_imp'
)
|
Add tests for Locality management commands# -*- coding: utf-8 -*-
from django.test import TestCase
from django.core.management import call_command
from django.core.management.base import CommandError
from .model_factories import AttributeF, DomainSpecification3AF
from ..models import Locality, Value
class TestManagementCommands(TestCase):
def test_import_csv(self):
attr1 = AttributeF.create(key='name')
attr2 = AttributeF.create(key='url')
attr3 = AttributeF.create(key='services')
DomainSpecification3AF.create(
name='Test', spec1__attribute=attr1, spec2__attribute=attr2,
spec3__attribute=attr3
)
call_command(
'import_csv', 'Test', 'test_imp',
'./localities/tests/test_data/test_csv_import_ok.tsv',
'./localities/tests/test_data/test_csv_import_map.json',
use_tabs=True
)
self.assertEqual(Locality.objects.count(), 3)
self.assertEqual(Value.objects.count(), 8)
def test_import_csv_bad_arguments(self):
self.assertRaises(
CommandError, call_command, 'import_csv', 'Test', 'test_imp'
)
|
<commit_before><commit_msg>Add tests for Locality management commands<commit_after># -*- coding: utf-8 -*-
from django.test import TestCase
from django.core.management import call_command
from django.core.management.base import CommandError
from .model_factories import AttributeF, DomainSpecification3AF
from ..models import Locality, Value
class TestManagementCommands(TestCase):
def test_import_csv(self):
attr1 = AttributeF.create(key='name')
attr2 = AttributeF.create(key='url')
attr3 = AttributeF.create(key='services')
DomainSpecification3AF.create(
name='Test', spec1__attribute=attr1, spec2__attribute=attr2,
spec3__attribute=attr3
)
call_command(
'import_csv', 'Test', 'test_imp',
'./localities/tests/test_data/test_csv_import_ok.tsv',
'./localities/tests/test_data/test_csv_import_map.json',
use_tabs=True
)
self.assertEqual(Locality.objects.count(), 3)
self.assertEqual(Value.objects.count(), 8)
def test_import_csv_bad_arguments(self):
self.assertRaises(
CommandError, call_command, 'import_csv', 'Test', 'test_imp'
)
|
|
128d16d236a6e02218b9e3d9212d9f7d37fc99e3
|
scripts/registration_trends.py
|
scripts/registration_trends.py
|
from competition.models import Competition
import collections
import csv
import datetime
import itertools
import json
import StringIO
def daterange(start_date, end_date):
num_days = int((end_date - start_date).days) + 1
for n in range(num_days + 1):
yield start_date + datetime.timedelta(days=n)
def accumulate(registrations):
dates = [x.signup_date for x in registrations]
result = collections.OrderedDict()
for day in daterange(min(dates), max(dates)):
key = day.strftime("%m-%d-%Y")
result[key] = registrations.filter(signup_date__lt=day).count()
return result
def run():
for competition in Competition.objects.all():
registrations = competition.registration_set.filter(active=True)
csv_content = StringIO.StringIO()
writer = csv.writer(csv_content)
print "\n", competition.name
for date, count in accumulate(registrations).iteritems():
writer.writerow([date, count])
print csv_content.getvalue()
|
Add script to calculate registration trends
|
Add script to calculate registration trends
|
Python
|
bsd-3-clause
|
siggame/webserver,siggame/webserver,siggame/webserver
|
Add script to calculate registration trends
|
from competition.models import Competition
import collections
import csv
import datetime
import itertools
import json
import StringIO
def daterange(start_date, end_date):
num_days = int((end_date - start_date).days) + 1
for n in range(num_days + 1):
yield start_date + datetime.timedelta(days=n)
def accumulate(registrations):
dates = [x.signup_date for x in registrations]
result = collections.OrderedDict()
for day in daterange(min(dates), max(dates)):
key = day.strftime("%m-%d-%Y")
result[key] = registrations.filter(signup_date__lt=day).count()
return result
def run():
for competition in Competition.objects.all():
registrations = competition.registration_set.filter(active=True)
csv_content = StringIO.StringIO()
writer = csv.writer(csv_content)
print "\n", competition.name
for date, count in accumulate(registrations).iteritems():
writer.writerow([date, count])
print csv_content.getvalue()
|
<commit_before><commit_msg>Add script to calculate registration trends<commit_after>
|
from competition.models import Competition
import collections
import csv
import datetime
import itertools
import json
import StringIO
def daterange(start_date, end_date):
num_days = int((end_date - start_date).days) + 1
for n in range(num_days + 1):
yield start_date + datetime.timedelta(days=n)
def accumulate(registrations):
dates = [x.signup_date for x in registrations]
result = collections.OrderedDict()
for day in daterange(min(dates), max(dates)):
key = day.strftime("%m-%d-%Y")
result[key] = registrations.filter(signup_date__lt=day).count()
return result
def run():
for competition in Competition.objects.all():
registrations = competition.registration_set.filter(active=True)
csv_content = StringIO.StringIO()
writer = csv.writer(csv_content)
print "\n", competition.name
for date, count in accumulate(registrations).iteritems():
writer.writerow([date, count])
print csv_content.getvalue()
|
Add script to calculate registration trendsfrom competition.models import Competition
import collections
import csv
import datetime
import itertools
import json
import StringIO
def daterange(start_date, end_date):
num_days = int((end_date - start_date).days) + 1
for n in range(num_days + 1):
yield start_date + datetime.timedelta(days=n)
def accumulate(registrations):
dates = [x.signup_date for x in registrations]
result = collections.OrderedDict()
for day in daterange(min(dates), max(dates)):
key = day.strftime("%m-%d-%Y")
result[key] = registrations.filter(signup_date__lt=day).count()
return result
def run():
for competition in Competition.objects.all():
registrations = competition.registration_set.filter(active=True)
csv_content = StringIO.StringIO()
writer = csv.writer(csv_content)
print "\n", competition.name
for date, count in accumulate(registrations).iteritems():
writer.writerow([date, count])
print csv_content.getvalue()
|
<commit_before><commit_msg>Add script to calculate registration trends<commit_after>from competition.models import Competition
import collections
import csv
import datetime
import itertools
import json
import StringIO
def daterange(start_date, end_date):
num_days = int((end_date - start_date).days) + 1
for n in range(num_days + 1):
yield start_date + datetime.timedelta(days=n)
def accumulate(registrations):
dates = [x.signup_date for x in registrations]
result = collections.OrderedDict()
for day in daterange(min(dates), max(dates)):
key = day.strftime("%m-%d-%Y")
result[key] = registrations.filter(signup_date__lt=day).count()
return result
def run():
for competition in Competition.objects.all():
registrations = competition.registration_set.filter(active=True)
csv_content = StringIO.StringIO()
writer = csv.writer(csv_content)
print "\n", competition.name
for date, count in accumulate(registrations).iteritems():
writer.writerow([date, count])
print csv_content.getvalue()
|
|
14d9f4c6cdea8b2bac1ea199509a5da49ae3d85a
|
osf/migrations/0037_remove_emails_for_unconfirmed_users.py
|
osf/migrations/0037_remove_emails_for_unconfirmed_users.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-16 19:59
"""Removes Email objects that associated with unconfirmed users. These
were incorrectly created in 0033_user_emails_to_fk.
"""
from __future__ import unicode_literals
from django.db import migrations
def remove_emails(state, *args, **kwargs):
Email = state.get_model('osf', 'email')
Email.objects.filter(user__date_confirmed__isnull=True).delete()
# copied from 0033_user_emails_to_fk
def restore_emails(state, *args, **kwargs):
Email = state.get_model('osf', 'email')
OSFUser = state.get_model('osf', 'osfuser')
for user in OSFUser.objects.filter(date_confirmed__isnull=True).values('id', 'username', 'is_active'):
uid = user['id']
primary_email = user['username'].lower().strip()
active = user['is_active']
if active or not Email.objects.filter(address=primary_email).exists():
_, created = Email.objects.get_or_create(address=primary_email, user_id=uid)
assert created, 'Email object for username {} already exists'.format(primary_email)
class Migration(migrations.Migration):
dependencies = [
('osf', '0036_auto_20170605_1520'),
]
operations = [
migrations.RunPython(
remove_emails, restore_emails
),
]
|
Add migration to remove Emails associated with unconfirmed users
|
Add migration to remove Emails associated with unconfirmed users
There should not be any Email records for unconfirmed users.
These users would not be able to log in nor confirm their email
|
Python
|
apache-2.0
|
felliott/osf.io,chrisseto/osf.io,leb2dg/osf.io,cslzchen/osf.io,pattisdr/osf.io,erinspace/osf.io,caneruguz/osf.io,sloria/osf.io,chrisseto/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,HalcyonChimera/osf.io,pattisdr/osf.io,mattclark/osf.io,adlius/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,cslzchen/osf.io,TomBaxter/osf.io,HalcyonChimera/osf.io,TomBaxter/osf.io,binoculars/osf.io,caneruguz/osf.io,laurenrevere/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,chrisseto/osf.io,cslzchen/osf.io,crcresearch/osf.io,felliott/osf.io,adlius/osf.io,aaxelb/osf.io,leb2dg/osf.io,caneruguz/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,caneruguz/osf.io,aaxelb/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,binoculars/osf.io,chennan47/osf.io,erinspace/osf.io,crcresearch/osf.io,icereval/osf.io,Johnetordoff/osf.io,chrisseto/osf.io,felliott/osf.io,crcresearch/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,mattclark/osf.io,laurenrevere/osf.io,chennan47/osf.io,icereval/osf.io,adlius/osf.io,baylee-d/osf.io,aaxelb/osf.io,adlius/osf.io,icereval/osf.io,sloria/osf.io,binoculars/osf.io,TomBaxter/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,caseyrollins/osf.io,mfraezz/osf.io,erinspace/osf.io,mfraezz/osf.io,mattclark/osf.io,leb2dg/osf.io,chennan47/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,laurenrevere/osf.io,felliott/osf.io,Johnetordoff/osf.io,leb2dg/osf.io,caseyrollins/osf.io,mfraezz/osf.io
|
Add migration to remove Emails associated with unconfirmed users
There should not be any Email records for unconfirmed users.
These users would not be able to log in nor confirm their email
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-16 19:59
"""Removes Email objects that associated with unconfirmed users. These
were incorrectly created in 0033_user_emails_to_fk.
"""
from __future__ import unicode_literals
from django.db import migrations
def remove_emails(state, *args, **kwargs):
Email = state.get_model('osf', 'email')
Email.objects.filter(user__date_confirmed__isnull=True).delete()
# copied from 0033_user_emails_to_fk
def restore_emails(state, *args, **kwargs):
Email = state.get_model('osf', 'email')
OSFUser = state.get_model('osf', 'osfuser')
for user in OSFUser.objects.filter(date_confirmed__isnull=True).values('id', 'username', 'is_active'):
uid = user['id']
primary_email = user['username'].lower().strip()
active = user['is_active']
if active or not Email.objects.filter(address=primary_email).exists():
_, created = Email.objects.get_or_create(address=primary_email, user_id=uid)
assert created, 'Email object for username {} already exists'.format(primary_email)
class Migration(migrations.Migration):
dependencies = [
('osf', '0036_auto_20170605_1520'),
]
operations = [
migrations.RunPython(
remove_emails, restore_emails
),
]
|
<commit_before><commit_msg>Add migration to remove Emails associated with unconfirmed users
There should not be any Email records for unconfirmed users.
These users would not be able to log in nor confirm their email<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-16 19:59
"""Removes Email objects that associated with unconfirmed users. These
were incorrectly created in 0033_user_emails_to_fk.
"""
from __future__ import unicode_literals
from django.db import migrations
def remove_emails(state, *args, **kwargs):
Email = state.get_model('osf', 'email')
Email.objects.filter(user__date_confirmed__isnull=True).delete()
# copied from 0033_user_emails_to_fk
def restore_emails(state, *args, **kwargs):
Email = state.get_model('osf', 'email')
OSFUser = state.get_model('osf', 'osfuser')
for user in OSFUser.objects.filter(date_confirmed__isnull=True).values('id', 'username', 'is_active'):
uid = user['id']
primary_email = user['username'].lower().strip()
active = user['is_active']
if active or not Email.objects.filter(address=primary_email).exists():
_, created = Email.objects.get_or_create(address=primary_email, user_id=uid)
assert created, 'Email object for username {} already exists'.format(primary_email)
class Migration(migrations.Migration):
dependencies = [
('osf', '0036_auto_20170605_1520'),
]
operations = [
migrations.RunPython(
remove_emails, restore_emails
),
]
|
Add migration to remove Emails associated with unconfirmed users
There should not be any Email records for unconfirmed users.
These users would not be able to log in nor confirm their email# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-16 19:59
"""Removes Email objects that associated with unconfirmed users. These
were incorrectly created in 0033_user_emails_to_fk.
"""
from __future__ import unicode_literals
from django.db import migrations
def remove_emails(state, *args, **kwargs):
Email = state.get_model('osf', 'email')
Email.objects.filter(user__date_confirmed__isnull=True).delete()
# copied from 0033_user_emails_to_fk
def restore_emails(state, *args, **kwargs):
Email = state.get_model('osf', 'email')
OSFUser = state.get_model('osf', 'osfuser')
for user in OSFUser.objects.filter(date_confirmed__isnull=True).values('id', 'username', 'is_active'):
uid = user['id']
primary_email = user['username'].lower().strip()
active = user['is_active']
if active or not Email.objects.filter(address=primary_email).exists():
_, created = Email.objects.get_or_create(address=primary_email, user_id=uid)
assert created, 'Email object for username {} already exists'.format(primary_email)
class Migration(migrations.Migration):
dependencies = [
('osf', '0036_auto_20170605_1520'),
]
operations = [
migrations.RunPython(
remove_emails, restore_emails
),
]
|
<commit_before><commit_msg>Add migration to remove Emails associated with unconfirmed users
There should not be any Email records for unconfirmed users.
These users would not be able to log in nor confirm their email<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-16 19:59
"""Removes Email objects that associated with unconfirmed users. These
were incorrectly created in 0033_user_emails_to_fk.
"""
from __future__ import unicode_literals
from django.db import migrations
def remove_emails(state, *args, **kwargs):
Email = state.get_model('osf', 'email')
Email.objects.filter(user__date_confirmed__isnull=True).delete()
# copied from 0033_user_emails_to_fk
def restore_emails(state, *args, **kwargs):
Email = state.get_model('osf', 'email')
OSFUser = state.get_model('osf', 'osfuser')
for user in OSFUser.objects.filter(date_confirmed__isnull=True).values('id', 'username', 'is_active'):
uid = user['id']
primary_email = user['username'].lower().strip()
active = user['is_active']
if active or not Email.objects.filter(address=primary_email).exists():
_, created = Email.objects.get_or_create(address=primary_email, user_id=uid)
assert created, 'Email object for username {} already exists'.format(primary_email)
class Migration(migrations.Migration):
dependencies = [
('osf', '0036_auto_20170605_1520'),
]
operations = [
migrations.RunPython(
remove_emails, restore_emails
),
]
|
|
7a3f15c011447374dcd782a7eb5953534c25f09f
|
test/acceptance/test_cli_vital.py
|
test/acceptance/test_cli_vital.py
|
import unittest
from pathlib import Path
import subprocess
class TestVintDoNotDiedWhenLintingVital(unittest.TestCase):
def assertVintStillAlive(self, cmd):
try:
got_output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as err:
got_output = err.output
unexpected_keyword = r'^Traceback'
self.assertNotRegex(got_output, unexpected_keyword)
def assertNotRegex(self, string, pattern):
assertNotRegexpMatches = getattr(self, 'assertNotRegexpMatches', None)
if assertNotRegexpMatches:
assertNotRegexpMatches(string, pattern)
return
super(TestVintDoNotDiedWhenLintingVital, self).assertNotRegex(string, pattern)
def test_not_died_when_linting_vital(self):
vital_dir = str(Path('test', 'fixture', 'cli', 'vital.vim'))
cmd = ['vint', vital_dir]
self.assertVintStillAlive(cmd)
if __name__ == '__main__':
unittest.main()
|
Add acceptance test for vital
|
Add acceptance test for vital
|
Python
|
mit
|
Kuniwak/vint,RianFuro/vint,Kuniwak/vint,RianFuro/vint
|
Add acceptance test for vital
|
import unittest
from pathlib import Path
import subprocess
class TestVintDoNotDiedWhenLintingVital(unittest.TestCase):
def assertVintStillAlive(self, cmd):
try:
got_output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as err:
got_output = err.output
unexpected_keyword = r'^Traceback'
self.assertNotRegex(got_output, unexpected_keyword)
def assertNotRegex(self, string, pattern):
assertNotRegexpMatches = getattr(self, 'assertNotRegexpMatches', None)
if assertNotRegexpMatches:
assertNotRegexpMatches(string, pattern)
return
super(TestVintDoNotDiedWhenLintingVital, self).assertNotRegex(string, pattern)
def test_not_died_when_linting_vital(self):
vital_dir = str(Path('test', 'fixture', 'cli', 'vital.vim'))
cmd = ['vint', vital_dir]
self.assertVintStillAlive(cmd)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add acceptance test for vital<commit_after>
|
import unittest
from pathlib import Path
import subprocess
class TestVintDoNotDiedWhenLintingVital(unittest.TestCase):
def assertVintStillAlive(self, cmd):
try:
got_output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as err:
got_output = err.output
unexpected_keyword = r'^Traceback'
self.assertNotRegex(got_output, unexpected_keyword)
def assertNotRegex(self, string, pattern):
assertNotRegexpMatches = getattr(self, 'assertNotRegexpMatches', None)
if assertNotRegexpMatches:
assertNotRegexpMatches(string, pattern)
return
super(TestVintDoNotDiedWhenLintingVital, self).assertNotRegex(string, pattern)
def test_not_died_when_linting_vital(self):
vital_dir = str(Path('test', 'fixture', 'cli', 'vital.vim'))
cmd = ['vint', vital_dir]
self.assertVintStillAlive(cmd)
if __name__ == '__main__':
unittest.main()
|
Add acceptance test for vitalimport unittest
from pathlib import Path
import subprocess
class TestVintDoNotDiedWhenLintingVital(unittest.TestCase):
def assertVintStillAlive(self, cmd):
try:
got_output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as err:
got_output = err.output
unexpected_keyword = r'^Traceback'
self.assertNotRegex(got_output, unexpected_keyword)
def assertNotRegex(self, string, pattern):
assertNotRegexpMatches = getattr(self, 'assertNotRegexpMatches', None)
if assertNotRegexpMatches:
assertNotRegexpMatches(string, pattern)
return
super(TestVintDoNotDiedWhenLintingVital, self).assertNotRegex(string, pattern)
def test_not_died_when_linting_vital(self):
vital_dir = str(Path('test', 'fixture', 'cli', 'vital.vim'))
cmd = ['vint', vital_dir]
self.assertVintStillAlive(cmd)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add acceptance test for vital<commit_after>import unittest
from pathlib import Path
import subprocess
class TestVintDoNotDiedWhenLintingVital(unittest.TestCase):
def assertVintStillAlive(self, cmd):
try:
got_output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as err:
got_output = err.output
unexpected_keyword = r'^Traceback'
self.assertNotRegex(got_output, unexpected_keyword)
def assertNotRegex(self, string, pattern):
assertNotRegexpMatches = getattr(self, 'assertNotRegexpMatches', None)
if assertNotRegexpMatches:
assertNotRegexpMatches(string, pattern)
return
super(TestVintDoNotDiedWhenLintingVital, self).assertNotRegex(string, pattern)
def test_not_died_when_linting_vital(self):
vital_dir = str(Path('test', 'fixture', 'cli', 'vital.vim'))
cmd = ['vint', vital_dir]
self.assertVintStillAlive(cmd)
if __name__ == '__main__':
unittest.main()
|
|
f5ac5b27f1c586e0f3e65ceab633fdae0b040454
|
tests/test_compound.py
|
tests/test_compound.py
|
# -*- coding: utf-8 -*-
import unittest
from formencode import compound, Invalid
from formencode.validators import DictConverter
class TestAllCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.All(
validators=[DictConverter({2:1}), DictConverter({3:2})])
def test_to_python(self):
self.assertEqual(self.validator.to_python(3), 1)
def test_from_python(self):
self.assertEqual(self.validator.from_python(1), 3)
class TestAnyCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.Any(
validators=[DictConverter({2:'c'}), DictConverter({2:'b'}), DictConverter({1:'b'})])
def test_to_python(self):
# Should stop before 'c' coming from the right.
self.assertEqual(self.validator.to_python(2), 'b')
def test_from_python(self):
# Should stop before 1 coming from the left.
self.assertEqual(self.validator.from_python('b'), 2)
def test_to_python_error(self):
try:
self.validator.to_python(3)
except Invalid, e:
self.failUnless('Enter a value from: 2' in str(e))
else:
self.fail('Invalid should be raised when no validator succeeds.')
class TestPipeCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.Pipe(
validators=[DictConverter({1: 2}), DictConverter({2: 3})])
def test_to_python(self):
self.assertEqual(self.validator.to_python(1), 3)
def test_from_python(self):
self.assertEqual(self.validator.from_python(3), 1)
|
Add some basic tests for compound validators which can be expanded upon later.
|
Add some basic tests for compound validators which can be expanded upon later.
|
Python
|
mit
|
formencode/formencode,formencode/formencode,systemctl/formencode,jvanasco/formencode,formencode/formencode,jvanasco/formencode,genixpro/formencode,genixpro/formencode,genixpro/formencode,systemctl/formencode,jvanasco/formencode,systemctl/formencode
|
Add some basic tests for compound validators which can be expanded upon later.
|
# -*- coding: utf-8 -*-
import unittest
from formencode import compound, Invalid
from formencode.validators import DictConverter
class TestAllCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.All(
validators=[DictConverter({2:1}), DictConverter({3:2})])
def test_to_python(self):
self.assertEqual(self.validator.to_python(3), 1)
def test_from_python(self):
self.assertEqual(self.validator.from_python(1), 3)
class TestAnyCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.Any(
validators=[DictConverter({2:'c'}), DictConverter({2:'b'}), DictConverter({1:'b'})])
def test_to_python(self):
# Should stop before 'c' coming from the right.
self.assertEqual(self.validator.to_python(2), 'b')
def test_from_python(self):
# Should stop before 1 coming from the left.
self.assertEqual(self.validator.from_python('b'), 2)
def test_to_python_error(self):
try:
self.validator.to_python(3)
except Invalid, e:
self.failUnless('Enter a value from: 2' in str(e))
else:
self.fail('Invalid should be raised when no validator succeeds.')
class TestPipeCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.Pipe(
validators=[DictConverter({1: 2}), DictConverter({2: 3})])
def test_to_python(self):
self.assertEqual(self.validator.to_python(1), 3)
def test_from_python(self):
self.assertEqual(self.validator.from_python(3), 1)
|
<commit_before><commit_msg>Add some basic tests for compound validators which can be expanded upon later.<commit_after>
|
# -*- coding: utf-8 -*-
import unittest
from formencode import compound, Invalid
from formencode.validators import DictConverter
class TestAllCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.All(
validators=[DictConverter({2:1}), DictConverter({3:2})])
def test_to_python(self):
self.assertEqual(self.validator.to_python(3), 1)
def test_from_python(self):
self.assertEqual(self.validator.from_python(1), 3)
class TestAnyCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.Any(
validators=[DictConverter({2:'c'}), DictConverter({2:'b'}), DictConverter({1:'b'})])
def test_to_python(self):
# Should stop before 'c' coming from the right.
self.assertEqual(self.validator.to_python(2), 'b')
def test_from_python(self):
# Should stop before 1 coming from the left.
self.assertEqual(self.validator.from_python('b'), 2)
def test_to_python_error(self):
try:
self.validator.to_python(3)
except Invalid, e:
self.failUnless('Enter a value from: 2' in str(e))
else:
self.fail('Invalid should be raised when no validator succeeds.')
class TestPipeCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.Pipe(
validators=[DictConverter({1: 2}), DictConverter({2: 3})])
def test_to_python(self):
self.assertEqual(self.validator.to_python(1), 3)
def test_from_python(self):
self.assertEqual(self.validator.from_python(3), 1)
|
Add some basic tests for compound validators which can be expanded upon later.# -*- coding: utf-8 -*-
import unittest
from formencode import compound, Invalid
from formencode.validators import DictConverter
class TestAllCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.All(
validators=[DictConverter({2:1}), DictConverter({3:2})])
def test_to_python(self):
self.assertEqual(self.validator.to_python(3), 1)
def test_from_python(self):
self.assertEqual(self.validator.from_python(1), 3)
class TestAnyCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.Any(
validators=[DictConverter({2:'c'}), DictConverter({2:'b'}), DictConverter({1:'b'})])
def test_to_python(self):
# Should stop before 'c' coming from the right.
self.assertEqual(self.validator.to_python(2), 'b')
def test_from_python(self):
# Should stop before 1 coming from the left.
self.assertEqual(self.validator.from_python('b'), 2)
def test_to_python_error(self):
try:
self.validator.to_python(3)
except Invalid, e:
self.failUnless('Enter a value from: 2' in str(e))
else:
self.fail('Invalid should be raised when no validator succeeds.')
class TestPipeCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.Pipe(
validators=[DictConverter({1: 2}), DictConverter({2: 3})])
def test_to_python(self):
self.assertEqual(self.validator.to_python(1), 3)
def test_from_python(self):
self.assertEqual(self.validator.from_python(3), 1)
|
<commit_before><commit_msg>Add some basic tests for compound validators which can be expanded upon later.<commit_after># -*- coding: utf-8 -*-
import unittest
from formencode import compound, Invalid
from formencode.validators import DictConverter
class TestAllCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.All(
validators=[DictConverter({2:1}), DictConverter({3:2})])
def test_to_python(self):
self.assertEqual(self.validator.to_python(3), 1)
def test_from_python(self):
self.assertEqual(self.validator.from_python(1), 3)
class TestAnyCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.Any(
validators=[DictConverter({2:'c'}), DictConverter({2:'b'}), DictConverter({1:'b'})])
def test_to_python(self):
# Should stop before 'c' coming from the right.
self.assertEqual(self.validator.to_python(2), 'b')
def test_from_python(self):
# Should stop before 1 coming from the left.
self.assertEqual(self.validator.from_python('b'), 2)
def test_to_python_error(self):
try:
self.validator.to_python(3)
except Invalid, e:
self.failUnless('Enter a value from: 2' in str(e))
else:
self.fail('Invalid should be raised when no validator succeeds.')
class TestPipeCompoundValidator(unittest.TestCase):
def setUp(self):
self.validator = compound.Pipe(
validators=[DictConverter({1: 2}), DictConverter({2: 3})])
def test_to_python(self):
self.assertEqual(self.validator.to_python(1), 3)
def test_from_python(self):
self.assertEqual(self.validator.from_python(3), 1)
|
|
25669d7946c5c9faa5f07348580809b33d9ee1d2
|
tests/test_settings.py
|
tests/test_settings.py
|
from flask import url_for
import pytest
import roomcontrol.utils.localstorage as ls
TEST_FILE = """
[settings]
serverip=0.0.0.0
notify=False
sendpic=True
"""
@pytest.fixture
def storage_file(tmpdir):
p = tmpdir.join('test_storage.in')
p.write(TEST_FILE)
ls.set_storage_file(str(p))
def test_response_get_settings(storage_file, client):
assert client.get(url_for('main.settings')).status_code == 200
def test_get_returns_right_vals(storage_file, client):
res = client.get(url_for('main.settings'))
assert res.json == {
'serverip': '0.0.0.0',
'notify': 'False',
'sendpic': 'True'
}
def test_post_response_ok(storage_file, client):
res = client.post(
url_for('main.settings'),
data='{}',
headers={'Content-Type': 'application/json'})
assert res.status_code == 200
def test_post_response_sends_ok_message(storage_file, client):
res = client.post(
url_for('main.settings'),
data='{}',
headers={'Content-Type': 'application/json'})
assert res.data == b'settings updated'
def test_post_updates_all_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"serverip": "127.0.0.1", "notify": true, "sendpic": false}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
assert settings['serverip'] == '127.0.0.1'
assert settings['notify'] == 'True'
assert settings['sendpic'] == 'False'
def test_post_updates_some_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"notify": true, "sendpic": false}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
assert settings['serverip'] == '0.0.0.0'
assert settings['notify'] == 'True'
assert settings['sendpic'] == 'False'
def test_post_ignores_unexistant_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"fantasy": "fiction"}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
with pytest.raises(KeyError):
assert settings['fantasy'] == 'fiction'
|
Add tests for settings handlers
|
Add tests for settings handlers
|
Python
|
mit
|
miguelfrde/roomcontrol_backend
|
Add tests for settings handlers
|
from flask import url_for
import pytest
import roomcontrol.utils.localstorage as ls
TEST_FILE = """
[settings]
serverip=0.0.0.0
notify=False
sendpic=True
"""
@pytest.fixture
def storage_file(tmpdir):
p = tmpdir.join('test_storage.in')
p.write(TEST_FILE)
ls.set_storage_file(str(p))
def test_response_get_settings(storage_file, client):
assert client.get(url_for('main.settings')).status_code == 200
def test_get_returns_right_vals(storage_file, client):
res = client.get(url_for('main.settings'))
assert res.json == {
'serverip': '0.0.0.0',
'notify': 'False',
'sendpic': 'True'
}
def test_post_response_ok(storage_file, client):
res = client.post(
url_for('main.settings'),
data='{}',
headers={'Content-Type': 'application/json'})
assert res.status_code == 200
def test_post_response_sends_ok_message(storage_file, client):
res = client.post(
url_for('main.settings'),
data='{}',
headers={'Content-Type': 'application/json'})
assert res.data == b'settings updated'
def test_post_updates_all_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"serverip": "127.0.0.1", "notify": true, "sendpic": false}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
assert settings['serverip'] == '127.0.0.1'
assert settings['notify'] == 'True'
assert settings['sendpic'] == 'False'
def test_post_updates_some_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"notify": true, "sendpic": false}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
assert settings['serverip'] == '0.0.0.0'
assert settings['notify'] == 'True'
assert settings['sendpic'] == 'False'
def test_post_ignores_unexistant_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"fantasy": "fiction"}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
with pytest.raises(KeyError):
assert settings['fantasy'] == 'fiction'
|
<commit_before><commit_msg>Add tests for settings handlers<commit_after>
|
from flask import url_for
import pytest
import roomcontrol.utils.localstorage as ls
TEST_FILE = """
[settings]
serverip=0.0.0.0
notify=False
sendpic=True
"""
@pytest.fixture
def storage_file(tmpdir):
p = tmpdir.join('test_storage.in')
p.write(TEST_FILE)
ls.set_storage_file(str(p))
def test_response_get_settings(storage_file, client):
assert client.get(url_for('main.settings')).status_code == 200
def test_get_returns_right_vals(storage_file, client):
res = client.get(url_for('main.settings'))
assert res.json == {
'serverip': '0.0.0.0',
'notify': 'False',
'sendpic': 'True'
}
def test_post_response_ok(storage_file, client):
res = client.post(
url_for('main.settings'),
data='{}',
headers={'Content-Type': 'application/json'})
assert res.status_code == 200
def test_post_response_sends_ok_message(storage_file, client):
res = client.post(
url_for('main.settings'),
data='{}',
headers={'Content-Type': 'application/json'})
assert res.data == b'settings updated'
def test_post_updates_all_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"serverip": "127.0.0.1", "notify": true, "sendpic": false}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
assert settings['serverip'] == '127.0.0.1'
assert settings['notify'] == 'True'
assert settings['sendpic'] == 'False'
def test_post_updates_some_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"notify": true, "sendpic": false}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
assert settings['serverip'] == '0.0.0.0'
assert settings['notify'] == 'True'
assert settings['sendpic'] == 'False'
def test_post_ignores_unexistant_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"fantasy": "fiction"}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
with pytest.raises(KeyError):
assert settings['fantasy'] == 'fiction'
|
Add tests for settings handlersfrom flask import url_for
import pytest
import roomcontrol.utils.localstorage as ls
TEST_FILE = """
[settings]
serverip=0.0.0.0
notify=False
sendpic=True
"""
@pytest.fixture
def storage_file(tmpdir):
p = tmpdir.join('test_storage.in')
p.write(TEST_FILE)
ls.set_storage_file(str(p))
def test_response_get_settings(storage_file, client):
assert client.get(url_for('main.settings')).status_code == 200
def test_get_returns_right_vals(storage_file, client):
res = client.get(url_for('main.settings'))
assert res.json == {
'serverip': '0.0.0.0',
'notify': 'False',
'sendpic': 'True'
}
def test_post_response_ok(storage_file, client):
res = client.post(
url_for('main.settings'),
data='{}',
headers={'Content-Type': 'application/json'})
assert res.status_code == 200
def test_post_response_sends_ok_message(storage_file, client):
res = client.post(
url_for('main.settings'),
data='{}',
headers={'Content-Type': 'application/json'})
assert res.data == b'settings updated'
def test_post_updates_all_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"serverip": "127.0.0.1", "notify": true, "sendpic": false}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
assert settings['serverip'] == '127.0.0.1'
assert settings['notify'] == 'True'
assert settings['sendpic'] == 'False'
def test_post_updates_some_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"notify": true, "sendpic": false}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
assert settings['serverip'] == '0.0.0.0'
assert settings['notify'] == 'True'
assert settings['sendpic'] == 'False'
def test_post_ignores_unexistant_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"fantasy": "fiction"}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
with pytest.raises(KeyError):
assert settings['fantasy'] == 'fiction'
|
<commit_before><commit_msg>Add tests for settings handlers<commit_after>from flask import url_for
import pytest
import roomcontrol.utils.localstorage as ls
TEST_FILE = """
[settings]
serverip=0.0.0.0
notify=False
sendpic=True
"""
@pytest.fixture
def storage_file(tmpdir):
p = tmpdir.join('test_storage.in')
p.write(TEST_FILE)
ls.set_storage_file(str(p))
def test_response_get_settings(storage_file, client):
assert client.get(url_for('main.settings')).status_code == 200
def test_get_returns_right_vals(storage_file, client):
res = client.get(url_for('main.settings'))
assert res.json == {
'serverip': '0.0.0.0',
'notify': 'False',
'sendpic': 'True'
}
def test_post_response_ok(storage_file, client):
res = client.post(
url_for('main.settings'),
data='{}',
headers={'Content-Type': 'application/json'})
assert res.status_code == 200
def test_post_response_sends_ok_message(storage_file, client):
res = client.post(
url_for('main.settings'),
data='{}',
headers={'Content-Type': 'application/json'})
assert res.data == b'settings updated'
def test_post_updates_all_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"serverip": "127.0.0.1", "notify": true, "sendpic": false}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
assert settings['serverip'] == '127.0.0.1'
assert settings['notify'] == 'True'
assert settings['sendpic'] == 'False'
def test_post_updates_some_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"notify": true, "sendpic": false}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
assert settings['serverip'] == '0.0.0.0'
assert settings['notify'] == 'True'
assert settings['sendpic'] == 'False'
def test_post_ignores_unexistant_fields(storage_file, client):
client.post(
url_for('main.settings'),
data='{"fantasy": "fiction"}',
headers={'Content-Type': 'application/json'})
settings = ls.get_all('settings')
with pytest.raises(KeyError):
assert settings['fantasy'] == 'fiction'
|
|
d15472fec9b3bbc589099409376191eb7a93eac6
|
src/diamond/handler/zmq_pubsub.py
|
src/diamond/handler/zmq_pubsub.py
|
"""
Output the collected values to . Zer0MQ pub/sub channel
"""
from Handler import Handler
import zmq
class zmqHandler ( Handler ):
"""
Implements the abstract Handler class, sending data to a Zer0MQ pub channel
"""
def __init__( self, config=None ):
"""
Create a new instance of zmqHandler class
"""
# Initialize Handler
Handler.__init__(self,config)
# Initialize Data
self.context = None
self.socket = None
# Initialize Options
self.port = int( self.config['port'] )
# Create ZMQ pub socket and bind
self._bind()
def _bind(self):
"""
Create PUB socket and bind
"""
self.context = zmq.Context()
self.socket = self.contact.socket(zmq.PUB)
self.socket.bind("tcp://*:%i" % self.port )
def __del__(self):
"""
Destroy instance of the zmqHandler class
"""
pass
def process(self,metric):
"""
Process a metric and send it to zmq pub socket
"""
# Acquire a lock
self.lock.acquire()
# Send the data as ......
self.socket.send("%s" % str(metric) )
# Release lock
self.lock.release()
|
"""
Output the collected values to . Zer0MQ pub/sub channel
"""
from Handler import Handler
import zmq
class zmqHandler ( Handler ):
"""
Implements the abstract Handler class, sending data to a Zer0MQ pub channel
"""
def __init__( self, config=None ):
"""
Create a new instance of zmqHandler class
"""
# Initialize Handler
Handler.__init__(self,config)
# Initialize Data
self.context = None
self.socket = None
# Initialize Options
self.port = int( self.config['port'] )
# Create ZMQ pub socket and bind
self._bind()
def _bind(self):
"""
Create PUB socket and bind
"""
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUB)
self.socket.bind("tcp://*:%i" % self.port )
def __del__(self):
"""
Destroy instance of the zmqHandler class
"""
pass
def process(self,metric):
"""
Process a metric and send it to zmq pub socket
"""
# Acquire a lock
self.lock.acquire()
# Send the data as ......
self.socket.send("%s" % str(metric) )
# Release lock
self.lock.release()
|
Fix typo. contact should be context
|
Fix typo. contact should be context
|
Python
|
mit
|
works-mobile/Diamond,signalfx/Diamond,Ssawa/Diamond,Ensighten/Diamond,h00dy/Diamond,janisz/Diamond-1,thardie/Diamond,Ssawa/Diamond,saucelabs/Diamond,Basis/Diamond,Ensighten/Diamond,Netuitive/Diamond,cannium/Diamond,zoidbergwill/Diamond,bmhatfield/Diamond,datafiniti/Diamond,metamx/Diamond,mzupan/Diamond,cannium/Diamond,mfriedenhagen/Diamond,disqus/Diamond,CYBERBUGJR/Diamond,hvnsweeting/Diamond,jriguera/Diamond,gg7/diamond,Netuitive/Diamond,Ormod/Diamond,Slach/Diamond,socialwareinc/Diamond,Nihn/Diamond-1,zoidbergwill/Diamond,hvnsweeting/Diamond,codepython/Diamond,tuenti/Diamond,tuenti/Diamond,sebbrandt87/Diamond,saucelabs/Diamond,jumping/Diamond,Nihn/Diamond-1,ramjothikumar/Diamond,Precis/Diamond,cannium/Diamond,TAKEALOT/Diamond,szibis/Diamond,socialwareinc/Diamond,rtoma/Diamond,datafiniti/Diamond,EzyInsights/Diamond,Netuitive/netuitive-diamond,dcsquared13/Diamond,Slach/Diamond,Ormod/Diamond,russss/Diamond,mzupan/Diamond,MichaelDoyle/Diamond,hvnsweeting/Diamond,metamx/Diamond,tellapart/Diamond,hamelg/Diamond,jumping/Diamond,zoidbergwill/Diamond,MediaMath/Diamond,python-diamond/Diamond,sebbrandt87/Diamond,bmhatfield/Diamond,joel-airspring/Diamond,tusharmakkar08/Diamond,eMerzh/Diamond-1,codepython/Diamond,stuartbfox/Diamond,sebbrandt87/Diamond,MediaMath/Diamond,socialwareinc/Diamond,MediaMath/Diamond,signalfx/Diamond,TinLe/Diamond,ceph/Diamond,dcsquared13/Diamond,Ormod/Diamond,Netuitive/netuitive-diamond,hamelg/Diamond,jumping/Diamond,gg7/diamond,jaingaurav/Diamond,Clever/Diamond,python-diamond/Diamond,thardie/Diamond,TAKEALOT/Diamond,metamx/Diamond,dcsquared13/Diamond,actmd/Diamond,krbaker/Diamond,bmhatfield/Diamond,TinLe/Diamond,mzupan/Diamond,Basis/Diamond,TinLe/Diamond,actmd/Diamond,thardie/Diamond,russss/Diamond,codepython/Diamond,ceph/Diamond,Nihn/Diamond-1,anandbhoraskar/Diamond,Slach/Diamond,ramjothikumar/Diamond,Precis/Diamond,rtoma/Diamond,szibis/Diamond,janisz/Diamond-1,jaingaurav/Diamond,mfriedenhagen/Diamond,tellapart/Diamond,krbaker/Diamond,works-mobile/Diamond,tusharmakkar08/Diamond,gg7/diamond,janisz/Diamond-1,eMerzh/Diamond-1,datafiniti/Diamond,tuenti/Diamond,jaingaurav/Diamond,szibis/Diamond,anandbhoraskar/Diamond,Slach/Diamond,jaingaurav/Diamond,EzyInsights/Diamond,hvnsweeting/Diamond,acquia/Diamond,EzyInsights/Diamond,Netuitive/Diamond,jriguera/Diamond,anandbhoraskar/Diamond,joel-airspring/Diamond,eMerzh/Diamond-1,signalfx/Diamond,Precis/Diamond,h00dy/Diamond,acquia/Diamond,acquia/Diamond,stuartbfox/Diamond,EzyInsights/Diamond,TinLe/Diamond,janisz/Diamond-1,stuartbfox/Diamond,Clever/Diamond,CYBERBUGJR/Diamond,gg7/diamond,Netuitive/netuitive-diamond,tellapart/Diamond,russss/Diamond,eMerzh/Diamond-1,works-mobile/Diamond,Clever/Diamond,skbkontur/Diamond,jriguera/Diamond,TAKEALOT/Diamond,jumping/Diamond,ramjothikumar/Diamond,timchenxiaoyu/Diamond,Ensighten/Diamond,dcsquared13/Diamond,saucelabs/Diamond,ceph/Diamond,datafiniti/Diamond,rtoma/Diamond,Nihn/Diamond-1,CYBERBUGJR/Diamond,timchenxiaoyu/Diamond,timchenxiaoyu/Diamond,krbaker/Diamond,CYBERBUGJR/Diamond,Basis/Diamond,tusharmakkar08/Diamond,signalfx/Diamond,ramjothikumar/Diamond,jriguera/Diamond,Netuitive/netuitive-diamond,tellapart/Diamond,Precis/Diamond,disqus/Diamond,Ormod/Diamond,skbkontur/Diamond,skbkontur/Diamond,h00dy/Diamond,tuenti/Diamond,mfriedenhagen/Diamond,hamelg/Diamond,Basis/Diamond,mzupan/Diamond,python-diamond/Diamond,TAKEALOT/Diamond,russss/Diamond,stuartbfox/Diamond,mfriedenhagen/Diamond,actmd/Diamond,MichaelDoyle/Diamond,skbkontur/Diamond,bmhatfield/Diamond,timchenxiaoyu/Diamond,anandbhoraskar/Diamond,tusharmakkar08/Diamond,sebbrandt87/Diamond,zoidbergwill/Diamond,acquia/Diamond,joel-airspring/Diamond,cannium/Diamond,ceph/Diamond,codepython/Diamond,saucelabs/Diamond,hamelg/Diamond,Ssawa/Diamond,Ssawa/Diamond,actmd/Diamond,disqus/Diamond,joel-airspring/Diamond,MichaelDoyle/Diamond,krbaker/Diamond,socialwareinc/Diamond,rtoma/Diamond,MichaelDoyle/Diamond,szibis/Diamond,h00dy/Diamond,Clever/Diamond,thardie/Diamond,Ensighten/Diamond,works-mobile/Diamond,MediaMath/Diamond,Netuitive/Diamond
|
"""
Output the collected values to . Zer0MQ pub/sub channel
"""
from Handler import Handler
import zmq
class zmqHandler ( Handler ):
"""
Implements the abstract Handler class, sending data to a Zer0MQ pub channel
"""
def __init__( self, config=None ):
"""
Create a new instance of zmqHandler class
"""
# Initialize Handler
Handler.__init__(self,config)
# Initialize Data
self.context = None
self.socket = None
# Initialize Options
self.port = int( self.config['port'] )
# Create ZMQ pub socket and bind
self._bind()
def _bind(self):
"""
Create PUB socket and bind
"""
self.context = zmq.Context()
self.socket = self.contact.socket(zmq.PUB)
self.socket.bind("tcp://*:%i" % self.port )
def __del__(self):
"""
Destroy instance of the zmqHandler class
"""
pass
def process(self,metric):
"""
Process a metric and send it to zmq pub socket
"""
# Acquire a lock
self.lock.acquire()
# Send the data as ......
self.socket.send("%s" % str(metric) )
# Release lock
self.lock.release()
Fix typo. contact should be context
|
"""
Output the collected values to . Zer0MQ pub/sub channel
"""
from Handler import Handler
import zmq
class zmqHandler ( Handler ):
"""
Implements the abstract Handler class, sending data to a Zer0MQ pub channel
"""
def __init__( self, config=None ):
"""
Create a new instance of zmqHandler class
"""
# Initialize Handler
Handler.__init__(self,config)
# Initialize Data
self.context = None
self.socket = None
# Initialize Options
self.port = int( self.config['port'] )
# Create ZMQ pub socket and bind
self._bind()
def _bind(self):
"""
Create PUB socket and bind
"""
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUB)
self.socket.bind("tcp://*:%i" % self.port )
def __del__(self):
"""
Destroy instance of the zmqHandler class
"""
pass
def process(self,metric):
"""
Process a metric and send it to zmq pub socket
"""
# Acquire a lock
self.lock.acquire()
# Send the data as ......
self.socket.send("%s" % str(metric) )
# Release lock
self.lock.release()
|
<commit_before>"""
Output the collected values to . Zer0MQ pub/sub channel
"""
from Handler import Handler
import zmq
class zmqHandler ( Handler ):
"""
Implements the abstract Handler class, sending data to a Zer0MQ pub channel
"""
def __init__( self, config=None ):
"""
Create a new instance of zmqHandler class
"""
# Initialize Handler
Handler.__init__(self,config)
# Initialize Data
self.context = None
self.socket = None
# Initialize Options
self.port = int( self.config['port'] )
# Create ZMQ pub socket and bind
self._bind()
def _bind(self):
"""
Create PUB socket and bind
"""
self.context = zmq.Context()
self.socket = self.contact.socket(zmq.PUB)
self.socket.bind("tcp://*:%i" % self.port )
def __del__(self):
"""
Destroy instance of the zmqHandler class
"""
pass
def process(self,metric):
"""
Process a metric and send it to zmq pub socket
"""
# Acquire a lock
self.lock.acquire()
# Send the data as ......
self.socket.send("%s" % str(metric) )
# Release lock
self.lock.release()
<commit_msg>Fix typo. contact should be context<commit_after>
|
"""
Output the collected values to . Zer0MQ pub/sub channel
"""
from Handler import Handler
import zmq
class zmqHandler ( Handler ):
"""
Implements the abstract Handler class, sending data to a Zer0MQ pub channel
"""
def __init__( self, config=None ):
"""
Create a new instance of zmqHandler class
"""
# Initialize Handler
Handler.__init__(self,config)
# Initialize Data
self.context = None
self.socket = None
# Initialize Options
self.port = int( self.config['port'] )
# Create ZMQ pub socket and bind
self._bind()
def _bind(self):
"""
Create PUB socket and bind
"""
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUB)
self.socket.bind("tcp://*:%i" % self.port )
def __del__(self):
"""
Destroy instance of the zmqHandler class
"""
pass
def process(self,metric):
"""
Process a metric and send it to zmq pub socket
"""
# Acquire a lock
self.lock.acquire()
# Send the data as ......
self.socket.send("%s" % str(metric) )
# Release lock
self.lock.release()
|
"""
Output the collected values to . Zer0MQ pub/sub channel
"""
from Handler import Handler
import zmq
class zmqHandler ( Handler ):
"""
Implements the abstract Handler class, sending data to a Zer0MQ pub channel
"""
def __init__( self, config=None ):
"""
Create a new instance of zmqHandler class
"""
# Initialize Handler
Handler.__init__(self,config)
# Initialize Data
self.context = None
self.socket = None
# Initialize Options
self.port = int( self.config['port'] )
# Create ZMQ pub socket and bind
self._bind()
def _bind(self):
"""
Create PUB socket and bind
"""
self.context = zmq.Context()
self.socket = self.contact.socket(zmq.PUB)
self.socket.bind("tcp://*:%i" % self.port )
def __del__(self):
"""
Destroy instance of the zmqHandler class
"""
pass
def process(self,metric):
"""
Process a metric and send it to zmq pub socket
"""
# Acquire a lock
self.lock.acquire()
# Send the data as ......
self.socket.send("%s" % str(metric) )
# Release lock
self.lock.release()
Fix typo. contact should be context"""
Output the collected values to . Zer0MQ pub/sub channel
"""
from Handler import Handler
import zmq
class zmqHandler ( Handler ):
"""
Implements the abstract Handler class, sending data to a Zer0MQ pub channel
"""
def __init__( self, config=None ):
"""
Create a new instance of zmqHandler class
"""
# Initialize Handler
Handler.__init__(self,config)
# Initialize Data
self.context = None
self.socket = None
# Initialize Options
self.port = int( self.config['port'] )
# Create ZMQ pub socket and bind
self._bind()
def _bind(self):
"""
Create PUB socket and bind
"""
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUB)
self.socket.bind("tcp://*:%i" % self.port )
def __del__(self):
"""
Destroy instance of the zmqHandler class
"""
pass
def process(self,metric):
"""
Process a metric and send it to zmq pub socket
"""
# Acquire a lock
self.lock.acquire()
# Send the data as ......
self.socket.send("%s" % str(metric) )
# Release lock
self.lock.release()
|
<commit_before>"""
Output the collected values to . Zer0MQ pub/sub channel
"""
from Handler import Handler
import zmq
class zmqHandler ( Handler ):
"""
Implements the abstract Handler class, sending data to a Zer0MQ pub channel
"""
def __init__( self, config=None ):
"""
Create a new instance of zmqHandler class
"""
# Initialize Handler
Handler.__init__(self,config)
# Initialize Data
self.context = None
self.socket = None
# Initialize Options
self.port = int( self.config['port'] )
# Create ZMQ pub socket and bind
self._bind()
def _bind(self):
"""
Create PUB socket and bind
"""
self.context = zmq.Context()
self.socket = self.contact.socket(zmq.PUB)
self.socket.bind("tcp://*:%i" % self.port )
def __del__(self):
"""
Destroy instance of the zmqHandler class
"""
pass
def process(self,metric):
"""
Process a metric and send it to zmq pub socket
"""
# Acquire a lock
self.lock.acquire()
# Send the data as ......
self.socket.send("%s" % str(metric) )
# Release lock
self.lock.release()
<commit_msg>Fix typo. contact should be context<commit_after>"""
Output the collected values to . Zer0MQ pub/sub channel
"""
from Handler import Handler
import zmq
class zmqHandler ( Handler ):
"""
Implements the abstract Handler class, sending data to a Zer0MQ pub channel
"""
def __init__( self, config=None ):
"""
Create a new instance of zmqHandler class
"""
# Initialize Handler
Handler.__init__(self,config)
# Initialize Data
self.context = None
self.socket = None
# Initialize Options
self.port = int( self.config['port'] )
# Create ZMQ pub socket and bind
self._bind()
def _bind(self):
"""
Create PUB socket and bind
"""
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUB)
self.socket.bind("tcp://*:%i" % self.port )
def __del__(self):
"""
Destroy instance of the zmqHandler class
"""
pass
def process(self,metric):
"""
Process a metric and send it to zmq pub socket
"""
# Acquire a lock
self.lock.acquire()
# Send the data as ......
self.socket.send("%s" % str(metric) )
# Release lock
self.lock.release()
|
d62072cd1e01dc37563934cb1b87161ea06ebbaf
|
scripts/osfstorage/migrate_deleted_files.py
|
scripts/osfstorage/migrate_deleted_files.py
|
import logging
from modularodm import Q
from website.app import init_app
from website.addons.osfstorage.model import OsfStorageFileNode
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
def main():
for file in OsfStorageFileNode.find(Q('is_deleted', 'eq', True)):
file.delete()
logger.info(u'Moving {!r} to the trashed collections'.format(file))
if __name__ == '__main__':
scripts_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
main()
|
Add migration script for deleted osfstorage
|
Add migration script for deleted osfstorage
|
Python
|
apache-2.0
|
zachjanicki/osf.io,arpitar/osf.io,chennan47/osf.io,bdyetton/prettychart,DanielSBrown/osf.io,billyhunt/osf.io,alexschiller/osf.io,mfraezz/osf.io,chrisseto/osf.io,caseyrygt/osf.io,RomanZWang/osf.io,bdyetton/prettychart,lyndsysimon/osf.io,cldershem/osf.io,ZobairAlijan/osf.io,binoculars/osf.io,asanfilippo7/osf.io,Ghalko/osf.io,aaxelb/osf.io,amyshi188/osf.io,wearpants/osf.io,samchrisinger/osf.io,MerlinZhang/osf.io,brandonPurvis/osf.io,sloria/osf.io,Nesiehr/osf.io,bdyetton/prettychart,amyshi188/osf.io,acshi/osf.io,KAsante95/osf.io,RomanZWang/osf.io,caseyrygt/osf.io,samchrisinger/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,RomanZWang/osf.io,kch8qx/osf.io,wearpants/osf.io,chennan47/osf.io,revanthkolli/osf.io,ckc6cz/osf.io,fabianvf/osf.io,emetsger/osf.io,dplorimer/osf,kch8qx/osf.io,jinluyuan/osf.io,felliott/osf.io,RomanZWang/osf.io,mfraezz/osf.io,Nesiehr/osf.io,DanielSBrown/osf.io,crcresearch/osf.io,Ghalko/osf.io,brandonPurvis/osf.io,SSJohns/osf.io,leb2dg/osf.io,cwisecarver/osf.io,mluo613/osf.io,alexschiller/osf.io,ZobairAlijan/osf.io,GaryKriebel/osf.io,GageGaskins/osf.io,jnayak1/osf.io,zachjanicki/osf.io,samanehsan/osf.io,jmcarp/osf.io,jolene-esposito/osf.io,zkraime/osf.io,MerlinZhang/osf.io,HalcyonChimera/osf.io,TomBaxter/osf.io,icereval/osf.io,doublebits/osf.io,abought/osf.io,ckc6cz/osf.io,felliott/osf.io,caseyrollins/osf.io,cslzchen/osf.io,ticklemepierce/osf.io,sbt9uc/osf.io,sloria/osf.io,fabianvf/osf.io,erinspace/osf.io,RomanZWang/osf.io,chrisseto/osf.io,acshi/osf.io,baylee-d/osf.io,pattisdr/osf.io,adlius/osf.io,SSJohns/osf.io,haoyuchen1992/osf.io,reinaH/osf.io,barbour-em/osf.io,caseyrygt/osf.io,danielneis/osf.io,GaryKriebel/osf.io,zkraime/osf.io,chrisseto/osf.io,caneruguz/osf.io,mluo613/osf.io,CenterForOpenScience/osf.io,reinaH/osf.io,zkraime/osf.io,zachjanicki/osf.io,revanthkolli/osf.io,GageGaskins/osf.io,barbour-em/osf.io,pattisdr/osf.io,jmcarp/osf.io,asanfilippo7/osf.io,njantrania/osf.io,abought/osf.io,caneruguz/osf.io,doublebits/osf.io,amyshi188/osf.io,hmoco/osf.io,petermalcolm/osf.io,billyhunt/osf.io,ticklemepierce/osf.io,kch8qx/osf.io,ticklemepierce/osf.io,Ghalko/osf.io,jinluyuan/osf.io,njantrania/osf.io,cslzchen/osf.io,sloria/osf.io,HarryRybacki/osf.io,wearpants/osf.io,barbour-em/osf.io,asanfilippo7/osf.io,revanthkolli/osf.io,samanehsan/osf.io,cldershem/osf.io,dplorimer/osf,hmoco/osf.io,arpitar/osf.io,reinaH/osf.io,cwisecarver/osf.io,aaxelb/osf.io,njantrania/osf.io,kwierman/osf.io,GaryKriebel/osf.io,jeffreyliu3230/osf.io,GageGaskins/osf.io,brandonPurvis/osf.io,zamattiac/osf.io,laurenrevere/osf.io,fabianvf/osf.io,caseyrygt/osf.io,petermalcolm/osf.io,lyndsysimon/osf.io,felliott/osf.io,cosenal/osf.io,aaxelb/osf.io,abought/osf.io,leb2dg/osf.io,cslzchen/osf.io,danielneis/osf.io,cslzchen/osf.io,Nesiehr/osf.io,brandonPurvis/osf.io,laurenrevere/osf.io,leb2dg/osf.io,adlius/osf.io,sbt9uc/osf.io,monikagrabowska/osf.io,MerlinZhang/osf.io,alexschiller/osf.io,HarryRybacki/osf.io,ZobairAlijan/osf.io,haoyuchen1992/osf.io,saradbowman/osf.io,Ghalko/osf.io,GageGaskins/osf.io,TomHeatwole/osf.io,emetsger/osf.io,DanielSBrown/osf.io,TomHeatwole/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,kch8qx/osf.io,billyhunt/osf.io,crcresearch/osf.io,adlius/osf.io,revanthkolli/osf.io,CenterForOpenScience/osf.io,jeffreyliu3230/osf.io,GageGaskins/osf.io,zamattiac/osf.io,jolene-esposito/osf.io,binoculars/osf.io,mattclark/osf.io,HalcyonChimera/osf.io,doublebits/osf.io,KAsante95/osf.io,hmoco/osf.io,mluke93/osf.io,SSJohns/osf.io,acshi/osf.io,jnayak1/osf.io,emetsger/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,mluke93/osf.io,ticklemepierce/osf.io,monikagrabowska/osf.io,zachjanicki/osf.io,lyndsysimon/osf.io,dplorimer/osf,erinspace/osf.io,samanehsan/osf.io,kwierman/osf.io,icereval/osf.io,leb2dg/osf.io,samchrisinger/osf.io,cosenal/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,bdyetton/prettychart,caneruguz/osf.io,TomBaxter/osf.io,kwierman/osf.io,GaryKriebel/osf.io,alexschiller/osf.io,jolene-esposito/osf.io,fabianvf/osf.io,Johnetordoff/osf.io,haoyuchen1992/osf.io,arpitar/osf.io,ZobairAlijan/osf.io,TomHeatwole/osf.io,mluke93/osf.io,reinaH/osf.io,rdhyee/osf.io,emetsger/osf.io,monikagrabowska/osf.io,wearpants/osf.io,cosenal/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,danielneis/osf.io,rdhyee/osf.io,billyhunt/osf.io,billyhunt/osf.io,kch8qx/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,dplorimer/osf,HarryRybacki/osf.io,chrisseto/osf.io,danielneis/osf.io,baylee-d/osf.io,acshi/osf.io,jmcarp/osf.io,mattclark/osf.io,DanielSBrown/osf.io,arpitar/osf.io,sbt9uc/osf.io,TomBaxter/osf.io,cldershem/osf.io,ckc6cz/osf.io,kwierman/osf.io,njantrania/osf.io,mluo613/osf.io,binoculars/osf.io,jinluyuan/osf.io,zamattiac/osf.io,petermalcolm/osf.io,KAsante95/osf.io,mluke93/osf.io,jeffreyliu3230/osf.io,jinluyuan/osf.io,KAsante95/osf.io,caseyrollins/osf.io,crcresearch/osf.io,doublebits/osf.io,TomHeatwole/osf.io,caneruguz/osf.io,SSJohns/osf.io,cosenal/osf.io,cldershem/osf.io,MerlinZhang/osf.io,rdhyee/osf.io,acshi/osf.io,zkraime/osf.io,samchrisinger/osf.io,icereval/osf.io,cwisecarver/osf.io,mattclark/osf.io,doublebits/osf.io,petermalcolm/osf.io,hmoco/osf.io,jnayak1/osf.io,laurenrevere/osf.io,chennan47/osf.io,baylee-d/osf.io,lyndsysimon/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,samanehsan/osf.io,sbt9uc/osf.io,monikagrabowska/osf.io,mluo613/osf.io,ckc6cz/osf.io,jeffreyliu3230/osf.io,brandonPurvis/osf.io,KAsante95/osf.io,mluo613/osf.io,monikagrabowska/osf.io,HarryRybacki/osf.io,cwisecarver/osf.io,amyshi188/osf.io,brianjgeiger/osf.io,adlius/osf.io,abought/osf.io,haoyuchen1992/osf.io,barbour-em/osf.io,caseyrollins/osf.io,jnayak1/osf.io,jolene-esposito/osf.io,brianjgeiger/osf.io,zamattiac/osf.io,Nesiehr/osf.io,jmcarp/osf.io,felliott/osf.io,alexschiller/osf.io,asanfilippo7/osf.io,rdhyee/osf.io
|
Add migration script for deleted osfstorage
|
import logging
from modularodm import Q
from website.app import init_app
from website.addons.osfstorage.model import OsfStorageFileNode
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
def main():
for file in OsfStorageFileNode.find(Q('is_deleted', 'eq', True)):
file.delete()
logger.info(u'Moving {!r} to the trashed collections'.format(file))
if __name__ == '__main__':
scripts_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
main()
|
<commit_before><commit_msg>Add migration script for deleted osfstorage<commit_after>
|
import logging
from modularodm import Q
from website.app import init_app
from website.addons.osfstorage.model import OsfStorageFileNode
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
def main():
for file in OsfStorageFileNode.find(Q('is_deleted', 'eq', True)):
file.delete()
logger.info(u'Moving {!r} to the trashed collections'.format(file))
if __name__ == '__main__':
scripts_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
main()
|
Add migration script for deleted osfstorageimport logging
from modularodm import Q
from website.app import init_app
from website.addons.osfstorage.model import OsfStorageFileNode
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
def main():
for file in OsfStorageFileNode.find(Q('is_deleted', 'eq', True)):
file.delete()
logger.info(u'Moving {!r} to the trashed collections'.format(file))
if __name__ == '__main__':
scripts_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
main()
|
<commit_before><commit_msg>Add migration script for deleted osfstorage<commit_after>import logging
from modularodm import Q
from website.app import init_app
from website.addons.osfstorage.model import OsfStorageFileNode
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
def main():
for file in OsfStorageFileNode.find(Q('is_deleted', 'eq', True)):
file.delete()
logger.info(u'Moving {!r} to the trashed collections'.format(file))
if __name__ == '__main__':
scripts_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
main()
|
|
414f70d3c378353fb29664db3b4c980c664b2f60
|
txircd/modules/extra/conn_join.py
|
txircd/modules/extra/conn_join.py
|
from twisted.plugin import IPlugin
from txircd.channel import IRCChannel
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class AutoJoin(ModuleData):
implements(IPlugin, IModuleData)
name = "AutoJoin"
def hookIRCd(self, ircd):
self.ircd = ircd
def actions(self):
return [ ("welcome", 1, self.autoJoinChannels) ]
def autoJoinChannels(self, user):
for chanName in self.ircd.config.getWithDefault("client_join_on_connect", []):
if chanName[0] != "#":
chanName = "#{}".format(chanName)
if chanName in self.ircd.channels:
channel = self.ircd.channels[chanName]
else:
channel = IRCChannel(self.ircd, chanName)
user.joinChannel(channel)
autoJoin = AutoJoin()
|
Implement the autojoin on connect module
|
Implement the autojoin on connect module
|
Python
|
bsd-3-clause
|
ElementalAlchemist/txircd,Heufneutje/txircd
|
Implement the autojoin on connect module
|
from twisted.plugin import IPlugin
from txircd.channel import IRCChannel
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class AutoJoin(ModuleData):
implements(IPlugin, IModuleData)
name = "AutoJoin"
def hookIRCd(self, ircd):
self.ircd = ircd
def actions(self):
return [ ("welcome", 1, self.autoJoinChannels) ]
def autoJoinChannels(self, user):
for chanName in self.ircd.config.getWithDefault("client_join_on_connect", []):
if chanName[0] != "#":
chanName = "#{}".format(chanName)
if chanName in self.ircd.channels:
channel = self.ircd.channels[chanName]
else:
channel = IRCChannel(self.ircd, chanName)
user.joinChannel(channel)
autoJoin = AutoJoin()
|
<commit_before><commit_msg>Implement the autojoin on connect module<commit_after>
|
from twisted.plugin import IPlugin
from txircd.channel import IRCChannel
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class AutoJoin(ModuleData):
implements(IPlugin, IModuleData)
name = "AutoJoin"
def hookIRCd(self, ircd):
self.ircd = ircd
def actions(self):
return [ ("welcome", 1, self.autoJoinChannels) ]
def autoJoinChannels(self, user):
for chanName in self.ircd.config.getWithDefault("client_join_on_connect", []):
if chanName[0] != "#":
chanName = "#{}".format(chanName)
if chanName in self.ircd.channels:
channel = self.ircd.channels[chanName]
else:
channel = IRCChannel(self.ircd, chanName)
user.joinChannel(channel)
autoJoin = AutoJoin()
|
Implement the autojoin on connect modulefrom twisted.plugin import IPlugin
from txircd.channel import IRCChannel
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class AutoJoin(ModuleData):
implements(IPlugin, IModuleData)
name = "AutoJoin"
def hookIRCd(self, ircd):
self.ircd = ircd
def actions(self):
return [ ("welcome", 1, self.autoJoinChannels) ]
def autoJoinChannels(self, user):
for chanName in self.ircd.config.getWithDefault("client_join_on_connect", []):
if chanName[0] != "#":
chanName = "#{}".format(chanName)
if chanName in self.ircd.channels:
channel = self.ircd.channels[chanName]
else:
channel = IRCChannel(self.ircd, chanName)
user.joinChannel(channel)
autoJoin = AutoJoin()
|
<commit_before><commit_msg>Implement the autojoin on connect module<commit_after>from twisted.plugin import IPlugin
from txircd.channel import IRCChannel
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class AutoJoin(ModuleData):
implements(IPlugin, IModuleData)
name = "AutoJoin"
def hookIRCd(self, ircd):
self.ircd = ircd
def actions(self):
return [ ("welcome", 1, self.autoJoinChannels) ]
def autoJoinChannels(self, user):
for chanName in self.ircd.config.getWithDefault("client_join_on_connect", []):
if chanName[0] != "#":
chanName = "#{}".format(chanName)
if chanName in self.ircd.channels:
channel = self.ircd.channels[chanName]
else:
channel = IRCChannel(self.ircd, chanName)
user.joinChannel(channel)
autoJoin = AutoJoin()
|
|
50536bd56904158d0a5c5587c62b76a6ce8745be
|
tests.py
|
tests.py
|
import requests
from tornalet import tornalet
from trequests import setup_session
from tornado.ioloop import IOLoop
from tornado.escape import json_decode
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application, RequestHandler
setup_session()
class TestUtil(object):
def send(self, data):
return requests.post('http://httpbin.org/post', data=data).json()
class TestHandler(RequestHandler):
@tornalet
def get(self):
util = TestUtil()
data = {'foo': 'bar'}
response = util.send(data)
self.write(response)
class TestCase(AsyncHTTPTestCase):
def setUp(self):
self.application = None
super(TestCase, self).setUp()
self.get_app().callback_called = False
def get_new_ioloop(self):
return IOLoop().instance()
def get_app(self):
if not self.application:
self.application = Application(
[(r'/', TestHandler)]
)
return self.application
def _test_callback(self):
self.get_app().callback_called = True
def test_post(self):
"""Test using a library that POSTs to requestbin using requests.
"""
self.get_new_ioloop().add_callback(self._test_callback)
response = self.fetch('/')
data = json_decode(response.body)
self.assertEqual(response.code, 200)
self.assertEqual(data['form']['foo'], 'bar')
self.assertTrue(self.get_app().callback_called)
|
Add a basic unit test
|
Add a basic unit test
|
Python
|
bsd-3-clause
|
isaacnotnwton/trequests,1stvamp/trequests,pandada8/trequests
|
Add a basic unit test
|
import requests
from tornalet import tornalet
from trequests import setup_session
from tornado.ioloop import IOLoop
from tornado.escape import json_decode
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application, RequestHandler
setup_session()
class TestUtil(object):
def send(self, data):
return requests.post('http://httpbin.org/post', data=data).json()
class TestHandler(RequestHandler):
@tornalet
def get(self):
util = TestUtil()
data = {'foo': 'bar'}
response = util.send(data)
self.write(response)
class TestCase(AsyncHTTPTestCase):
def setUp(self):
self.application = None
super(TestCase, self).setUp()
self.get_app().callback_called = False
def get_new_ioloop(self):
return IOLoop().instance()
def get_app(self):
if not self.application:
self.application = Application(
[(r'/', TestHandler)]
)
return self.application
def _test_callback(self):
self.get_app().callback_called = True
def test_post(self):
"""Test using a library that POSTs to requestbin using requests.
"""
self.get_new_ioloop().add_callback(self._test_callback)
response = self.fetch('/')
data = json_decode(response.body)
self.assertEqual(response.code, 200)
self.assertEqual(data['form']['foo'], 'bar')
self.assertTrue(self.get_app().callback_called)
|
<commit_before><commit_msg>Add a basic unit test<commit_after>
|
import requests
from tornalet import tornalet
from trequests import setup_session
from tornado.ioloop import IOLoop
from tornado.escape import json_decode
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application, RequestHandler
setup_session()
class TestUtil(object):
def send(self, data):
return requests.post('http://httpbin.org/post', data=data).json()
class TestHandler(RequestHandler):
@tornalet
def get(self):
util = TestUtil()
data = {'foo': 'bar'}
response = util.send(data)
self.write(response)
class TestCase(AsyncHTTPTestCase):
def setUp(self):
self.application = None
super(TestCase, self).setUp()
self.get_app().callback_called = False
def get_new_ioloop(self):
return IOLoop().instance()
def get_app(self):
if not self.application:
self.application = Application(
[(r'/', TestHandler)]
)
return self.application
def _test_callback(self):
self.get_app().callback_called = True
def test_post(self):
"""Test using a library that POSTs to requestbin using requests.
"""
self.get_new_ioloop().add_callback(self._test_callback)
response = self.fetch('/')
data = json_decode(response.body)
self.assertEqual(response.code, 200)
self.assertEqual(data['form']['foo'], 'bar')
self.assertTrue(self.get_app().callback_called)
|
Add a basic unit testimport requests
from tornalet import tornalet
from trequests import setup_session
from tornado.ioloop import IOLoop
from tornado.escape import json_decode
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application, RequestHandler
setup_session()
class TestUtil(object):
def send(self, data):
return requests.post('http://httpbin.org/post', data=data).json()
class TestHandler(RequestHandler):
@tornalet
def get(self):
util = TestUtil()
data = {'foo': 'bar'}
response = util.send(data)
self.write(response)
class TestCase(AsyncHTTPTestCase):
def setUp(self):
self.application = None
super(TestCase, self).setUp()
self.get_app().callback_called = False
def get_new_ioloop(self):
return IOLoop().instance()
def get_app(self):
if not self.application:
self.application = Application(
[(r'/', TestHandler)]
)
return self.application
def _test_callback(self):
self.get_app().callback_called = True
def test_post(self):
"""Test using a library that POSTs to requestbin using requests.
"""
self.get_new_ioloop().add_callback(self._test_callback)
response = self.fetch('/')
data = json_decode(response.body)
self.assertEqual(response.code, 200)
self.assertEqual(data['form']['foo'], 'bar')
self.assertTrue(self.get_app().callback_called)
|
<commit_before><commit_msg>Add a basic unit test<commit_after>import requests
from tornalet import tornalet
from trequests import setup_session
from tornado.ioloop import IOLoop
from tornado.escape import json_decode
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application, RequestHandler
setup_session()
class TestUtil(object):
def send(self, data):
return requests.post('http://httpbin.org/post', data=data).json()
class TestHandler(RequestHandler):
@tornalet
def get(self):
util = TestUtil()
data = {'foo': 'bar'}
response = util.send(data)
self.write(response)
class TestCase(AsyncHTTPTestCase):
def setUp(self):
self.application = None
super(TestCase, self).setUp()
self.get_app().callback_called = False
def get_new_ioloop(self):
return IOLoop().instance()
def get_app(self):
if not self.application:
self.application = Application(
[(r'/', TestHandler)]
)
return self.application
def _test_callback(self):
self.get_app().callback_called = True
def test_post(self):
"""Test using a library that POSTs to requestbin using requests.
"""
self.get_new_ioloop().add_callback(self._test_callback)
response = self.fetch('/')
data = json_decode(response.body)
self.assertEqual(response.code, 200)
self.assertEqual(data['form']['foo'], 'bar')
self.assertTrue(self.get_app().callback_called)
|
|
20f255e5065d40be35228ca97244e19c5473ebe4
|
tests/unit/test_commands.py
|
tests/unit/test_commands.py
|
from django.core.management import call_command
from django.test import TestCase
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class SuspenderTestCase(TestCase):
@patch('hastexo.jobs.SuspenderJob')
def test_start_suspender(self, mock_suspender):
# We need to mock the scheduler here, because we obviously
# don't want to add an actual recurring job during testing
with patch('apscheduler.schedulers.blocking.BlockingScheduler'):
call_command('suspender')
# Did we create a new suspender job?
self.assertEqual(mock_suspender.call_count, 1)
class ReaperTestCase(TestCase):
@patch('hastexo.jobs.ReaperJob')
def test_start_reaper(self, mock_reaper):
with patch('apscheduler.schedulers.blocking.BlockingScheduler'):
call_command('reaper')
# Did we create a new reaper job?
self.assertEqual(mock_reaper.call_count, 1)
|
Add minimal unit tests for the manage.py commands
|
Add minimal unit tests for the manage.py commands
|
Python
|
agpl-3.0
|
hastexo/hastexo-xblock,hastexo/hastexo-xblock,hastexo/hastexo-xblock,hastexo/hastexo-xblock
|
Add minimal unit tests for the manage.py commands
|
from django.core.management import call_command
from django.test import TestCase
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class SuspenderTestCase(TestCase):
@patch('hastexo.jobs.SuspenderJob')
def test_start_suspender(self, mock_suspender):
# We need to mock the scheduler here, because we obviously
# don't want to add an actual recurring job during testing
with patch('apscheduler.schedulers.blocking.BlockingScheduler'):
call_command('suspender')
# Did we create a new suspender job?
self.assertEqual(mock_suspender.call_count, 1)
class ReaperTestCase(TestCase):
@patch('hastexo.jobs.ReaperJob')
def test_start_reaper(self, mock_reaper):
with patch('apscheduler.schedulers.blocking.BlockingScheduler'):
call_command('reaper')
# Did we create a new reaper job?
self.assertEqual(mock_reaper.call_count, 1)
|
<commit_before><commit_msg>Add minimal unit tests for the manage.py commands<commit_after>
|
from django.core.management import call_command
from django.test import TestCase
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class SuspenderTestCase(TestCase):
@patch('hastexo.jobs.SuspenderJob')
def test_start_suspender(self, mock_suspender):
# We need to mock the scheduler here, because we obviously
# don't want to add an actual recurring job during testing
with patch('apscheduler.schedulers.blocking.BlockingScheduler'):
call_command('suspender')
# Did we create a new suspender job?
self.assertEqual(mock_suspender.call_count, 1)
class ReaperTestCase(TestCase):
@patch('hastexo.jobs.ReaperJob')
def test_start_reaper(self, mock_reaper):
with patch('apscheduler.schedulers.blocking.BlockingScheduler'):
call_command('reaper')
# Did we create a new reaper job?
self.assertEqual(mock_reaper.call_count, 1)
|
Add minimal unit tests for the manage.py commandsfrom django.core.management import call_command
from django.test import TestCase
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class SuspenderTestCase(TestCase):
@patch('hastexo.jobs.SuspenderJob')
def test_start_suspender(self, mock_suspender):
# We need to mock the scheduler here, because we obviously
# don't want to add an actual recurring job during testing
with patch('apscheduler.schedulers.blocking.BlockingScheduler'):
call_command('suspender')
# Did we create a new suspender job?
self.assertEqual(mock_suspender.call_count, 1)
class ReaperTestCase(TestCase):
@patch('hastexo.jobs.ReaperJob')
def test_start_reaper(self, mock_reaper):
with patch('apscheduler.schedulers.blocking.BlockingScheduler'):
call_command('reaper')
# Did we create a new reaper job?
self.assertEqual(mock_reaper.call_count, 1)
|
<commit_before><commit_msg>Add minimal unit tests for the manage.py commands<commit_after>from django.core.management import call_command
from django.test import TestCase
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class SuspenderTestCase(TestCase):
@patch('hastexo.jobs.SuspenderJob')
def test_start_suspender(self, mock_suspender):
# We need to mock the scheduler here, because we obviously
# don't want to add an actual recurring job during testing
with patch('apscheduler.schedulers.blocking.BlockingScheduler'):
call_command('suspender')
# Did we create a new suspender job?
self.assertEqual(mock_suspender.call_count, 1)
class ReaperTestCase(TestCase):
@patch('hastexo.jobs.ReaperJob')
def test_start_reaper(self, mock_reaper):
with patch('apscheduler.schedulers.blocking.BlockingScheduler'):
call_command('reaper')
# Did we create a new reaper job?
self.assertEqual(mock_reaper.call_count, 1)
|
|
c00187d36ff625e7b78861d326452efb97fb2498
|
utils/validator_all.py
|
utils/validator_all.py
|
import re
import os
import sys
import subprocess
from natsort import natsort_keygen, ns
if len(sys.argv) < 2:
print('python3 validator_all.py testbenchDir')
exit(0)
solver = './yasat' if len(sys.argv)<3 else sys.argv[2]
SAT = 0
UNSAT = 0
ERROR = 0
TIMEOUT = 0
def check(cnfFile, result):
assignment = set(str(result).split()[2:-1])
ok = True
clsok = False
with open(cnfFile) as cnf:
for line in cnf:
clause = line.split()
if len(clause)==0 or clause[0] == 'p' or clause[0] == 'c':
continue
for v in clause:
if v == '0':
if not clsok:
ok = False
clsok = False
elif v in assignment:
clsok = True
return ok
for dirname, dirnames, filenames in os.walk(sys.argv[1]):
filenames.sort(key=lambda x: natsort_keygen(alg=ns.IGNORECASE)(x))
for filename in filenames:
# Run only .cnf files
if filename.find('.cnf') == -1:
continue
cnfFile = os.path.join(dirname, filename)
print(os.path.join(filename).ljust(20, " "), end='')
proc = subprocess.Popen([
solver, '-stdout', '-statistic',
cnfFile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,)
try:
outs, errs = proc.communicate(input=None, timeout=1200)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
TIMEOUT += 1
print('TIMEOUT\n')
continue
result = str(outs)
if proc.returncode != 0:
print('ERROR')
ERROR = ERROR + 1
elif result.find('UNSATISFIABLE') != -1:
print('UNSAT')
UNSAT = UNSAT + 1
else:
print('SAT', 'AC' if check(cnfFile, result) else 'WA')
SAT = SAT + 1
print(" statistic ".center(70, "-"))
print('SAT', SAT, '/ UNSAT', UNSAT, '/ ERROR', ERROR, '/ TIMEOUT', TIMEOUT)
print("".center(70, "="))
|
Add validator for all testcase in dir
|
Add validator for all testcase in dir
|
Python
|
mit
|
sunset1995/YA-SAT,sunset1995/YA-SAT,sunset1995/YA-SAT
|
Add validator for all testcase in dir
|
import re
import os
import sys
import subprocess
from natsort import natsort_keygen, ns
if len(sys.argv) < 2:
print('python3 validator_all.py testbenchDir')
exit(0)
solver = './yasat' if len(sys.argv)<3 else sys.argv[2]
SAT = 0
UNSAT = 0
ERROR = 0
TIMEOUT = 0
def check(cnfFile, result):
assignment = set(str(result).split()[2:-1])
ok = True
clsok = False
with open(cnfFile) as cnf:
for line in cnf:
clause = line.split()
if len(clause)==0 or clause[0] == 'p' or clause[0] == 'c':
continue
for v in clause:
if v == '0':
if not clsok:
ok = False
clsok = False
elif v in assignment:
clsok = True
return ok
for dirname, dirnames, filenames in os.walk(sys.argv[1]):
filenames.sort(key=lambda x: natsort_keygen(alg=ns.IGNORECASE)(x))
for filename in filenames:
# Run only .cnf files
if filename.find('.cnf') == -1:
continue
cnfFile = os.path.join(dirname, filename)
print(os.path.join(filename).ljust(20, " "), end='')
proc = subprocess.Popen([
solver, '-stdout', '-statistic',
cnfFile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,)
try:
outs, errs = proc.communicate(input=None, timeout=1200)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
TIMEOUT += 1
print('TIMEOUT\n')
continue
result = str(outs)
if proc.returncode != 0:
print('ERROR')
ERROR = ERROR + 1
elif result.find('UNSATISFIABLE') != -1:
print('UNSAT')
UNSAT = UNSAT + 1
else:
print('SAT', 'AC' if check(cnfFile, result) else 'WA')
SAT = SAT + 1
print(" statistic ".center(70, "-"))
print('SAT', SAT, '/ UNSAT', UNSAT, '/ ERROR', ERROR, '/ TIMEOUT', TIMEOUT)
print("".center(70, "="))
|
<commit_before><commit_msg>Add validator for all testcase in dir<commit_after>
|
import re
import os
import sys
import subprocess
from natsort import natsort_keygen, ns
if len(sys.argv) < 2:
print('python3 validator_all.py testbenchDir')
exit(0)
solver = './yasat' if len(sys.argv)<3 else sys.argv[2]
SAT = 0
UNSAT = 0
ERROR = 0
TIMEOUT = 0
def check(cnfFile, result):
assignment = set(str(result).split()[2:-1])
ok = True
clsok = False
with open(cnfFile) as cnf:
for line in cnf:
clause = line.split()
if len(clause)==0 or clause[0] == 'p' or clause[0] == 'c':
continue
for v in clause:
if v == '0':
if not clsok:
ok = False
clsok = False
elif v in assignment:
clsok = True
return ok
for dirname, dirnames, filenames in os.walk(sys.argv[1]):
filenames.sort(key=lambda x: natsort_keygen(alg=ns.IGNORECASE)(x))
for filename in filenames:
# Run only .cnf files
if filename.find('.cnf') == -1:
continue
cnfFile = os.path.join(dirname, filename)
print(os.path.join(filename).ljust(20, " "), end='')
proc = subprocess.Popen([
solver, '-stdout', '-statistic',
cnfFile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,)
try:
outs, errs = proc.communicate(input=None, timeout=1200)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
TIMEOUT += 1
print('TIMEOUT\n')
continue
result = str(outs)
if proc.returncode != 0:
print('ERROR')
ERROR = ERROR + 1
elif result.find('UNSATISFIABLE') != -1:
print('UNSAT')
UNSAT = UNSAT + 1
else:
print('SAT', 'AC' if check(cnfFile, result) else 'WA')
SAT = SAT + 1
print(" statistic ".center(70, "-"))
print('SAT', SAT, '/ UNSAT', UNSAT, '/ ERROR', ERROR, '/ TIMEOUT', TIMEOUT)
print("".center(70, "="))
|
Add validator for all testcase in dirimport re
import os
import sys
import subprocess
from natsort import natsort_keygen, ns
if len(sys.argv) < 2:
print('python3 validator_all.py testbenchDir')
exit(0)
solver = './yasat' if len(sys.argv)<3 else sys.argv[2]
SAT = 0
UNSAT = 0
ERROR = 0
TIMEOUT = 0
def check(cnfFile, result):
assignment = set(str(result).split()[2:-1])
ok = True
clsok = False
with open(cnfFile) as cnf:
for line in cnf:
clause = line.split()
if len(clause)==0 or clause[0] == 'p' or clause[0] == 'c':
continue
for v in clause:
if v == '0':
if not clsok:
ok = False
clsok = False
elif v in assignment:
clsok = True
return ok
for dirname, dirnames, filenames in os.walk(sys.argv[1]):
filenames.sort(key=lambda x: natsort_keygen(alg=ns.IGNORECASE)(x))
for filename in filenames:
# Run only .cnf files
if filename.find('.cnf') == -1:
continue
cnfFile = os.path.join(dirname, filename)
print(os.path.join(filename).ljust(20, " "), end='')
proc = subprocess.Popen([
solver, '-stdout', '-statistic',
cnfFile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,)
try:
outs, errs = proc.communicate(input=None, timeout=1200)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
TIMEOUT += 1
print('TIMEOUT\n')
continue
result = str(outs)
if proc.returncode != 0:
print('ERROR')
ERROR = ERROR + 1
elif result.find('UNSATISFIABLE') != -1:
print('UNSAT')
UNSAT = UNSAT + 1
else:
print('SAT', 'AC' if check(cnfFile, result) else 'WA')
SAT = SAT + 1
print(" statistic ".center(70, "-"))
print('SAT', SAT, '/ UNSAT', UNSAT, '/ ERROR', ERROR, '/ TIMEOUT', TIMEOUT)
print("".center(70, "="))
|
<commit_before><commit_msg>Add validator for all testcase in dir<commit_after>import re
import os
import sys
import subprocess
from natsort import natsort_keygen, ns
if len(sys.argv) < 2:
print('python3 validator_all.py testbenchDir')
exit(0)
solver = './yasat' if len(sys.argv)<3 else sys.argv[2]
SAT = 0
UNSAT = 0
ERROR = 0
TIMEOUT = 0
def check(cnfFile, result):
assignment = set(str(result).split()[2:-1])
ok = True
clsok = False
with open(cnfFile) as cnf:
for line in cnf:
clause = line.split()
if len(clause)==0 or clause[0] == 'p' or clause[0] == 'c':
continue
for v in clause:
if v == '0':
if not clsok:
ok = False
clsok = False
elif v in assignment:
clsok = True
return ok
for dirname, dirnames, filenames in os.walk(sys.argv[1]):
filenames.sort(key=lambda x: natsort_keygen(alg=ns.IGNORECASE)(x))
for filename in filenames:
# Run only .cnf files
if filename.find('.cnf') == -1:
continue
cnfFile = os.path.join(dirname, filename)
print(os.path.join(filename).ljust(20, " "), end='')
proc = subprocess.Popen([
solver, '-stdout', '-statistic',
cnfFile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,)
try:
outs, errs = proc.communicate(input=None, timeout=1200)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
TIMEOUT += 1
print('TIMEOUT\n')
continue
result = str(outs)
if proc.returncode != 0:
print('ERROR')
ERROR = ERROR + 1
elif result.find('UNSATISFIABLE') != -1:
print('UNSAT')
UNSAT = UNSAT + 1
else:
print('SAT', 'AC' if check(cnfFile, result) else 'WA')
SAT = SAT + 1
print(" statistic ".center(70, "-"))
print('SAT', SAT, '/ UNSAT', UNSAT, '/ ERROR', ERROR, '/ TIMEOUT', TIMEOUT)
print("".center(70, "="))
|
|
bf616f52a2a90fc04921e22a530ab1038eb15b4b
|
python/02-2.py
|
python/02-2.py
|
#!/usr/bin/env python
import re
total = 0
with open('../inputs/02.txt') as f:
for line in f:
# data is like "1x2x3"
matches = re.split("(\d+)", line)
# 7 = 3 matches + 2 empty matches + 2 x characters
# The empty matches are because of the expression matches the begin and
# end of the string.
if len(matches) == 7:
l = int(matches[1])
w = int(matches[3])
h = int(matches[5])
dimensions = [l, w, h]
dimensions.sort()
shortPerimiter = (2 * dimensions[0]) + (2 * dimensions[1])
volume = l * w * h
total += shortPerimiter + volume
else:
print "Error: did not find 3 matches '%s'" % (line)
print total
|
Add day 2 part 2 solution.
|
Add day 2 part 2 solution.
|
Python
|
mit
|
opello/adventofcode
|
Add day 2 part 2 solution.
|
#!/usr/bin/env python
import re
total = 0
with open('../inputs/02.txt') as f:
for line in f:
# data is like "1x2x3"
matches = re.split("(\d+)", line)
# 7 = 3 matches + 2 empty matches + 2 x characters
# The empty matches are because of the expression matches the begin and
# end of the string.
if len(matches) == 7:
l = int(matches[1])
w = int(matches[3])
h = int(matches[5])
dimensions = [l, w, h]
dimensions.sort()
shortPerimiter = (2 * dimensions[0]) + (2 * dimensions[1])
volume = l * w * h
total += shortPerimiter + volume
else:
print "Error: did not find 3 matches '%s'" % (line)
print total
|
<commit_before><commit_msg>Add day 2 part 2 solution.<commit_after>
|
#!/usr/bin/env python
import re
total = 0
with open('../inputs/02.txt') as f:
for line in f:
# data is like "1x2x3"
matches = re.split("(\d+)", line)
# 7 = 3 matches + 2 empty matches + 2 x characters
# The empty matches are because of the expression matches the begin and
# end of the string.
if len(matches) == 7:
l = int(matches[1])
w = int(matches[3])
h = int(matches[5])
dimensions = [l, w, h]
dimensions.sort()
shortPerimiter = (2 * dimensions[0]) + (2 * dimensions[1])
volume = l * w * h
total += shortPerimiter + volume
else:
print "Error: did not find 3 matches '%s'" % (line)
print total
|
Add day 2 part 2 solution.#!/usr/bin/env python
import re
total = 0
with open('../inputs/02.txt') as f:
for line in f:
# data is like "1x2x3"
matches = re.split("(\d+)", line)
# 7 = 3 matches + 2 empty matches + 2 x characters
# The empty matches are because of the expression matches the begin and
# end of the string.
if len(matches) == 7:
l = int(matches[1])
w = int(matches[3])
h = int(matches[5])
dimensions = [l, w, h]
dimensions.sort()
shortPerimiter = (2 * dimensions[0]) + (2 * dimensions[1])
volume = l * w * h
total += shortPerimiter + volume
else:
print "Error: did not find 3 matches '%s'" % (line)
print total
|
<commit_before><commit_msg>Add day 2 part 2 solution.<commit_after>#!/usr/bin/env python
import re
total = 0
with open('../inputs/02.txt') as f:
for line in f:
# data is like "1x2x3"
matches = re.split("(\d+)", line)
# 7 = 3 matches + 2 empty matches + 2 x characters
# The empty matches are because of the expression matches the begin and
# end of the string.
if len(matches) == 7:
l = int(matches[1])
w = int(matches[3])
h = int(matches[5])
dimensions = [l, w, h]
dimensions.sort()
shortPerimiter = (2 * dimensions[0]) + (2 * dimensions[1])
volume = l * w * h
total += shortPerimiter + volume
else:
print "Error: did not find 3 matches '%s'" % (line)
print total
|
|
3c9fc65fefb21d5022c27a3321a930b8e7fff923
|
shapely/tests/test_collection.py
|
shapely/tests/test_collection.py
|
import unittest
from shapely.geometry.collection import GeometryCollection
class CollectionTestCase(unittest.TestCase):
def test_array_interface(self):
m = GeometryCollection()
self.failUnlessEqual(len(m), 0)
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(CollectionTestCase)
|
Add test of empty geometry collection creation.
|
Add test of empty geometry collection creation.
|
Python
|
bsd-3-clause
|
abali96/Shapely,mouadino/Shapely,mindw/shapely,jdmcbr/Shapely,abali96/Shapely,mindw/shapely,mouadino/Shapely,jdmcbr/Shapely
|
Add test of empty geometry collection creation.
|
import unittest
from shapely.geometry.collection import GeometryCollection
class CollectionTestCase(unittest.TestCase):
def test_array_interface(self):
m = GeometryCollection()
self.failUnlessEqual(len(m), 0)
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(CollectionTestCase)
|
<commit_before><commit_msg>Add test of empty geometry collection creation.<commit_after>
|
import unittest
from shapely.geometry.collection import GeometryCollection
class CollectionTestCase(unittest.TestCase):
def test_array_interface(self):
m = GeometryCollection()
self.failUnlessEqual(len(m), 0)
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(CollectionTestCase)
|
Add test of empty geometry collection creation.import unittest
from shapely.geometry.collection import GeometryCollection
class CollectionTestCase(unittest.TestCase):
def test_array_interface(self):
m = GeometryCollection()
self.failUnlessEqual(len(m), 0)
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(CollectionTestCase)
|
<commit_before><commit_msg>Add test of empty geometry collection creation.<commit_after>import unittest
from shapely.geometry.collection import GeometryCollection
class CollectionTestCase(unittest.TestCase):
def test_array_interface(self):
m = GeometryCollection()
self.failUnlessEqual(len(m), 0)
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(CollectionTestCase)
|
|
67326682df2efaf013830df5985f479e5d0ce19c
|
datasciencebox/tests/salt/test_mesos_spark.py
|
datasciencebox/tests/salt/test_mesos_spark.py
|
import pytest
from hdfs.client import Client
import utils
def setup_module(module):
utils.invoke('install', 'spark')
@utils.vagranttest
def test_salt_formulas():
project = utils.get_test_project()
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.zookeeper'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.hdfs.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['mesos.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['mesos.spark'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
@utils.vagranttest
def test_hdfs_files():
project = utils.get_test_project()
master_ip = project.cluster.master.ip
hdfs = Client('http://%s:50070' % master_ip)
root_dirs = hdfs.list('/')
assert 'spark' in root_dirs
spark_dirs = hdfs.list('/spark')
assert 'spark-1.4.1-bin-hadoop2.6.tgz' in spark_dirs
|
Add simple spark on mesos test
|
Add simple spark on mesos test
|
Python
|
apache-2.0
|
danielfrg/datasciencebox,danielfrg/datasciencebox,danielfrg/datasciencebox,danielfrg/datasciencebox
|
Add simple spark on mesos test
|
import pytest
from hdfs.client import Client
import utils
def setup_module(module):
utils.invoke('install', 'spark')
@utils.vagranttest
def test_salt_formulas():
project = utils.get_test_project()
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.zookeeper'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.hdfs.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['mesos.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['mesos.spark'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
@utils.vagranttest
def test_hdfs_files():
project = utils.get_test_project()
master_ip = project.cluster.master.ip
hdfs = Client('http://%s:50070' % master_ip)
root_dirs = hdfs.list('/')
assert 'spark' in root_dirs
spark_dirs = hdfs.list('/spark')
assert 'spark-1.4.1-bin-hadoop2.6.tgz' in spark_dirs
|
<commit_before><commit_msg>Add simple spark on mesos test<commit_after>
|
import pytest
from hdfs.client import Client
import utils
def setup_module(module):
utils.invoke('install', 'spark')
@utils.vagranttest
def test_salt_formulas():
project = utils.get_test_project()
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.zookeeper'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.hdfs.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['mesos.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['mesos.spark'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
@utils.vagranttest
def test_hdfs_files():
project = utils.get_test_project()
master_ip = project.cluster.master.ip
hdfs = Client('http://%s:50070' % master_ip)
root_dirs = hdfs.list('/')
assert 'spark' in root_dirs
spark_dirs = hdfs.list('/spark')
assert 'spark-1.4.1-bin-hadoop2.6.tgz' in spark_dirs
|
Add simple spark on mesos testimport pytest
from hdfs.client import Client
import utils
def setup_module(module):
utils.invoke('install', 'spark')
@utils.vagranttest
def test_salt_formulas():
project = utils.get_test_project()
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.zookeeper'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.hdfs.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['mesos.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['mesos.spark'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
@utils.vagranttest
def test_hdfs_files():
project = utils.get_test_project()
master_ip = project.cluster.master.ip
hdfs = Client('http://%s:50070' % master_ip)
root_dirs = hdfs.list('/')
assert 'spark' in root_dirs
spark_dirs = hdfs.list('/spark')
assert 'spark-1.4.1-bin-hadoop2.6.tgz' in spark_dirs
|
<commit_before><commit_msg>Add simple spark on mesos test<commit_after>import pytest
from hdfs.client import Client
import utils
def setup_module(module):
utils.invoke('install', 'spark')
@utils.vagranttest
def test_salt_formulas():
project = utils.get_test_project()
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.zookeeper'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['cdh5.hdfs.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['mesos.cluster'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
kwargs = {'test': 'true', '--out': 'json', '--out-indent': '-1'}
out = project.salt('state.sls', args=['mesos.spark'], kwargs=kwargs)
utils.check_all_true(out, none_is_ok=True)
@utils.vagranttest
def test_hdfs_files():
project = utils.get_test_project()
master_ip = project.cluster.master.ip
hdfs = Client('http://%s:50070' % master_ip)
root_dirs = hdfs.list('/')
assert 'spark' in root_dirs
spark_dirs = hdfs.list('/spark')
assert 'spark-1.4.1-bin-hadoop2.6.tgz' in spark_dirs
|
|
0701fae308161caf92ac89b8d8fc765258c6f38f
|
actions/cloudbolt_plugins/aws/spending_anomaly_detection/hourly_spending_anomaly_detection.py
|
actions/cloudbolt_plugins/aws/spending_anomaly_detection/hourly_spending_anomaly_detection.py
|
"""
Checks the last hour of AWS Billing Data from S3 buckets, and sends an alert if
any servers have exceeded the specified threshold.
Note: This Action assumes that you have one or more configured Alert Channels
that can be used for notifying the appropraite users. These channels must
be listed in ALERT_CHANNEL_NAMES below.
Version:
Requires >= v9.0.1
"""
import json
from typing import List
from alerts.methods import alert
from resourcehandlers.aws.models import AWSHandler
AWS_COST_THRESHOLD = 100
ALERT_CHANNEL_NAMES: List[str] = []
class AWSCostAlert:
def __init__(self):
self.over_threshold = []
def __call__(self):
"""
Retrieve cost data for all AWS Resource Handlers and alert if any
servers exceed the specified threshold.
"""
aws_instances = AWSHandler.objects.all()
for aws in aws_instances:
last_hours_data = aws.get_last_hour_billing_data()
self.__check_threshold(last_hours_data)
_ = self.__alert_over_threshold()
def __check_threshold(self, data_list):
"""
Parse server information for instances that exceed the threshold.
Args:
data_list (List[Tuple[str, float]]): [(server_id, cost)]
"""
_exceed_threshold_list = []
for server_id, cost in data_list:
if cost >= AWS_COST_THRESHOLD:
_exceed_threshold_list.append((server_id, cost))
self.over_threshold.extend(_exceed_threshold_list)
return
def __alert_over_threshold(self):
"""
Send alert for all server instances that exceed the threshold.
Returns:
None
"""
instance_dict = dict(self.over_threshold)
instance_json = json.dumps(instance_dict, indent=4)
message = (
f"The following servers exceeded the AWS cost threshold of {AWS_COST_THRESHOLD}:"
f"{instance_json}"
)
for channel_name in ALERT_CHANNEL_NAMES:
alert(channel_name, message)
return None
def run(*args, **kwargs):
cost_alert = AWSCostAlert()
cost_alert()
return "SUCCESS", "", ""
|
Add AWS Hourly Spending Anomaly Detection
|
Add AWS Hourly Spending Anomaly Detection
New plugin checks hourly spending data for all AWS instances and sends
an alert if the cost threshold has been exceeded.
[DEV-13347]
|
Python
|
apache-2.0
|
CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge
|
Add AWS Hourly Spending Anomaly Detection
New plugin checks hourly spending data for all AWS instances and sends
an alert if the cost threshold has been exceeded.
[DEV-13347]
|
"""
Checks the last hour of AWS Billing Data from S3 buckets, and sends an alert if
any servers have exceeded the specified threshold.
Note: This Action assumes that you have one or more configured Alert Channels
that can be used for notifying the appropraite users. These channels must
be listed in ALERT_CHANNEL_NAMES below.
Version:
Requires >= v9.0.1
"""
import json
from typing import List
from alerts.methods import alert
from resourcehandlers.aws.models import AWSHandler
AWS_COST_THRESHOLD = 100
ALERT_CHANNEL_NAMES: List[str] = []
class AWSCostAlert:
def __init__(self):
self.over_threshold = []
def __call__(self):
"""
Retrieve cost data for all AWS Resource Handlers and alert if any
servers exceed the specified threshold.
"""
aws_instances = AWSHandler.objects.all()
for aws in aws_instances:
last_hours_data = aws.get_last_hour_billing_data()
self.__check_threshold(last_hours_data)
_ = self.__alert_over_threshold()
def __check_threshold(self, data_list):
"""
Parse server information for instances that exceed the threshold.
Args:
data_list (List[Tuple[str, float]]): [(server_id, cost)]
"""
_exceed_threshold_list = []
for server_id, cost in data_list:
if cost >= AWS_COST_THRESHOLD:
_exceed_threshold_list.append((server_id, cost))
self.over_threshold.extend(_exceed_threshold_list)
return
def __alert_over_threshold(self):
"""
Send alert for all server instances that exceed the threshold.
Returns:
None
"""
instance_dict = dict(self.over_threshold)
instance_json = json.dumps(instance_dict, indent=4)
message = (
f"The following servers exceeded the AWS cost threshold of {AWS_COST_THRESHOLD}:"
f"{instance_json}"
)
for channel_name in ALERT_CHANNEL_NAMES:
alert(channel_name, message)
return None
def run(*args, **kwargs):
cost_alert = AWSCostAlert()
cost_alert()
return "SUCCESS", "", ""
|
<commit_before><commit_msg>Add AWS Hourly Spending Anomaly Detection
New plugin checks hourly spending data for all AWS instances and sends
an alert if the cost threshold has been exceeded.
[DEV-13347]<commit_after>
|
"""
Checks the last hour of AWS Billing Data from S3 buckets, and sends an alert if
any servers have exceeded the specified threshold.
Note: This Action assumes that you have one or more configured Alert Channels
that can be used for notifying the appropraite users. These channels must
be listed in ALERT_CHANNEL_NAMES below.
Version:
Requires >= v9.0.1
"""
import json
from typing import List
from alerts.methods import alert
from resourcehandlers.aws.models import AWSHandler
AWS_COST_THRESHOLD = 100
ALERT_CHANNEL_NAMES: List[str] = []
class AWSCostAlert:
def __init__(self):
self.over_threshold = []
def __call__(self):
"""
Retrieve cost data for all AWS Resource Handlers and alert if any
servers exceed the specified threshold.
"""
aws_instances = AWSHandler.objects.all()
for aws in aws_instances:
last_hours_data = aws.get_last_hour_billing_data()
self.__check_threshold(last_hours_data)
_ = self.__alert_over_threshold()
def __check_threshold(self, data_list):
"""
Parse server information for instances that exceed the threshold.
Args:
data_list (List[Tuple[str, float]]): [(server_id, cost)]
"""
_exceed_threshold_list = []
for server_id, cost in data_list:
if cost >= AWS_COST_THRESHOLD:
_exceed_threshold_list.append((server_id, cost))
self.over_threshold.extend(_exceed_threshold_list)
return
def __alert_over_threshold(self):
"""
Send alert for all server instances that exceed the threshold.
Returns:
None
"""
instance_dict = dict(self.over_threshold)
instance_json = json.dumps(instance_dict, indent=4)
message = (
f"The following servers exceeded the AWS cost threshold of {AWS_COST_THRESHOLD}:"
f"{instance_json}"
)
for channel_name in ALERT_CHANNEL_NAMES:
alert(channel_name, message)
return None
def run(*args, **kwargs):
cost_alert = AWSCostAlert()
cost_alert()
return "SUCCESS", "", ""
|
Add AWS Hourly Spending Anomaly Detection
New plugin checks hourly spending data for all AWS instances and sends
an alert if the cost threshold has been exceeded.
[DEV-13347]"""
Checks the last hour of AWS Billing Data from S3 buckets, and sends an alert if
any servers have exceeded the specified threshold.
Note: This Action assumes that you have one or more configured Alert Channels
that can be used for notifying the appropraite users. These channels must
be listed in ALERT_CHANNEL_NAMES below.
Version:
Requires >= v9.0.1
"""
import json
from typing import List
from alerts.methods import alert
from resourcehandlers.aws.models import AWSHandler
AWS_COST_THRESHOLD = 100
ALERT_CHANNEL_NAMES: List[str] = []
class AWSCostAlert:
def __init__(self):
self.over_threshold = []
def __call__(self):
"""
Retrieve cost data for all AWS Resource Handlers and alert if any
servers exceed the specified threshold.
"""
aws_instances = AWSHandler.objects.all()
for aws in aws_instances:
last_hours_data = aws.get_last_hour_billing_data()
self.__check_threshold(last_hours_data)
_ = self.__alert_over_threshold()
def __check_threshold(self, data_list):
"""
Parse server information for instances that exceed the threshold.
Args:
data_list (List[Tuple[str, float]]): [(server_id, cost)]
"""
_exceed_threshold_list = []
for server_id, cost in data_list:
if cost >= AWS_COST_THRESHOLD:
_exceed_threshold_list.append((server_id, cost))
self.over_threshold.extend(_exceed_threshold_list)
return
def __alert_over_threshold(self):
"""
Send alert for all server instances that exceed the threshold.
Returns:
None
"""
instance_dict = dict(self.over_threshold)
instance_json = json.dumps(instance_dict, indent=4)
message = (
f"The following servers exceeded the AWS cost threshold of {AWS_COST_THRESHOLD}:"
f"{instance_json}"
)
for channel_name in ALERT_CHANNEL_NAMES:
alert(channel_name, message)
return None
def run(*args, **kwargs):
cost_alert = AWSCostAlert()
cost_alert()
return "SUCCESS", "", ""
|
<commit_before><commit_msg>Add AWS Hourly Spending Anomaly Detection
New plugin checks hourly spending data for all AWS instances and sends
an alert if the cost threshold has been exceeded.
[DEV-13347]<commit_after>"""
Checks the last hour of AWS Billing Data from S3 buckets, and sends an alert if
any servers have exceeded the specified threshold.
Note: This Action assumes that you have one or more configured Alert Channels
that can be used for notifying the appropraite users. These channels must
be listed in ALERT_CHANNEL_NAMES below.
Version:
Requires >= v9.0.1
"""
import json
from typing import List
from alerts.methods import alert
from resourcehandlers.aws.models import AWSHandler
AWS_COST_THRESHOLD = 100
ALERT_CHANNEL_NAMES: List[str] = []
class AWSCostAlert:
def __init__(self):
self.over_threshold = []
def __call__(self):
"""
Retrieve cost data for all AWS Resource Handlers and alert if any
servers exceed the specified threshold.
"""
aws_instances = AWSHandler.objects.all()
for aws in aws_instances:
last_hours_data = aws.get_last_hour_billing_data()
self.__check_threshold(last_hours_data)
_ = self.__alert_over_threshold()
def __check_threshold(self, data_list):
"""
Parse server information for instances that exceed the threshold.
Args:
data_list (List[Tuple[str, float]]): [(server_id, cost)]
"""
_exceed_threshold_list = []
for server_id, cost in data_list:
if cost >= AWS_COST_THRESHOLD:
_exceed_threshold_list.append((server_id, cost))
self.over_threshold.extend(_exceed_threshold_list)
return
def __alert_over_threshold(self):
"""
Send alert for all server instances that exceed the threshold.
Returns:
None
"""
instance_dict = dict(self.over_threshold)
instance_json = json.dumps(instance_dict, indent=4)
message = (
f"The following servers exceeded the AWS cost threshold of {AWS_COST_THRESHOLD}:"
f"{instance_json}"
)
for channel_name in ALERT_CHANNEL_NAMES:
alert(channel_name, message)
return None
def run(*args, **kwargs):
cost_alert = AWSCostAlert()
cost_alert()
return "SUCCESS", "", ""
|
|
db8b342e999cc75cd776d8ca1821522145610d1d
|
ratechecker/migrations/0002_remove_fee_loader.py
|
ratechecker/migrations/0002_remove_fee_loader.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-10-31 16:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ratechecker', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='fee',
unique_together=set([]),
),
migrations.RemoveField(
model_name='fee',
name='plan',
),
migrations.DeleteModel(
name='Fee',
),
]
|
Add ratechecker migration to remove_fee_loader
|
Add ratechecker migration to remove_fee_loader
|
Python
|
cc0-1.0
|
cfpb/owning-a-home-api
|
Add ratechecker migration to remove_fee_loader
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-10-31 16:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ratechecker', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='fee',
unique_together=set([]),
),
migrations.RemoveField(
model_name='fee',
name='plan',
),
migrations.DeleteModel(
name='Fee',
),
]
|
<commit_before><commit_msg>Add ratechecker migration to remove_fee_loader<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-10-31 16:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ratechecker', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='fee',
unique_together=set([]),
),
migrations.RemoveField(
model_name='fee',
name='plan',
),
migrations.DeleteModel(
name='Fee',
),
]
|
Add ratechecker migration to remove_fee_loader# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-10-31 16:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ratechecker', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='fee',
unique_together=set([]),
),
migrations.RemoveField(
model_name='fee',
name='plan',
),
migrations.DeleteModel(
name='Fee',
),
]
|
<commit_before><commit_msg>Add ratechecker migration to remove_fee_loader<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-10-31 16:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ratechecker', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='fee',
unique_together=set([]),
),
migrations.RemoveField(
model_name='fee',
name='plan',
),
migrations.DeleteModel(
name='Fee',
),
]
|
|
f03f5522fc8e57b2801f967c93b7e0de60688801
|
src/test/test_findlines.py
|
src/test/test_findlines.py
|
#-------------------------------------------------------------------------------
# Copyright (c) 2011 Anton Golubkov.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v2.1
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
#
# Contributors:
# Anton Golubkov - initial API and implementation
#-------------------------------------------------------------------------------
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import cv
import os, sys
cmd_folder, f = os.path.split(os.path.dirname(os.path.abspath(__file__)))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import ipf.ipfblock.findlines
class TestFindLinesBlock(unittest.TestCase):
def setUp(self):
self.block = ipf.ipfblock.findlines.FindLines()
self.block.properties["distance_resolution"].set_value(5)
self.block.properties["angle_resolution"].set_value(0.1)
self.block.properties["threshold"].set_value(70)
self.block.properties["min_length"].set_value(140)
self.block.properties["max_gap"].set_value(1)
def test_zero_image(self):
zero_image = cv.CreateImage( (0, 0), cv.IPL_DEPTH_8U, 1)
self.block.input_ports["input_image"].pass_value(zero_image)
self.block.process()
output_array = self.block.output_ports["output_array"].get_value()
self.assertTrue(output_array == [])
def test_find_lines(self):
image = cv.LoadImage("files/test_adaptive_threshold.png")
gray_image = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
cv.CvtColor(image, gray_image, cv.CV_RGB2GRAY)
self.block.input_ports["input_image"].pass_value(gray_image)
self.block.process()
output_array = self.block.output_ports["output_array"].get_value()
self.assertTrue(len(output_array) == 8)
if __name__ == '__main__':
unittest.main()
|
Add test case for FindLines block
|
Add test case for FindLines block
|
Python
|
lgpl-2.1
|
anton-golubkov/Garland,anton-golubkov/Garland
|
Add test case for FindLines block
|
#-------------------------------------------------------------------------------
# Copyright (c) 2011 Anton Golubkov.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v2.1
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
#
# Contributors:
# Anton Golubkov - initial API and implementation
#-------------------------------------------------------------------------------
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import cv
import os, sys
cmd_folder, f = os.path.split(os.path.dirname(os.path.abspath(__file__)))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import ipf.ipfblock.findlines
class TestFindLinesBlock(unittest.TestCase):
def setUp(self):
self.block = ipf.ipfblock.findlines.FindLines()
self.block.properties["distance_resolution"].set_value(5)
self.block.properties["angle_resolution"].set_value(0.1)
self.block.properties["threshold"].set_value(70)
self.block.properties["min_length"].set_value(140)
self.block.properties["max_gap"].set_value(1)
def test_zero_image(self):
zero_image = cv.CreateImage( (0, 0), cv.IPL_DEPTH_8U, 1)
self.block.input_ports["input_image"].pass_value(zero_image)
self.block.process()
output_array = self.block.output_ports["output_array"].get_value()
self.assertTrue(output_array == [])
def test_find_lines(self):
image = cv.LoadImage("files/test_adaptive_threshold.png")
gray_image = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
cv.CvtColor(image, gray_image, cv.CV_RGB2GRAY)
self.block.input_ports["input_image"].pass_value(gray_image)
self.block.process()
output_array = self.block.output_ports["output_array"].get_value()
self.assertTrue(len(output_array) == 8)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test case for FindLines block<commit_after>
|
#-------------------------------------------------------------------------------
# Copyright (c) 2011 Anton Golubkov.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v2.1
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
#
# Contributors:
# Anton Golubkov - initial API and implementation
#-------------------------------------------------------------------------------
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import cv
import os, sys
cmd_folder, f = os.path.split(os.path.dirname(os.path.abspath(__file__)))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import ipf.ipfblock.findlines
class TestFindLinesBlock(unittest.TestCase):
def setUp(self):
self.block = ipf.ipfblock.findlines.FindLines()
self.block.properties["distance_resolution"].set_value(5)
self.block.properties["angle_resolution"].set_value(0.1)
self.block.properties["threshold"].set_value(70)
self.block.properties["min_length"].set_value(140)
self.block.properties["max_gap"].set_value(1)
def test_zero_image(self):
zero_image = cv.CreateImage( (0, 0), cv.IPL_DEPTH_8U, 1)
self.block.input_ports["input_image"].pass_value(zero_image)
self.block.process()
output_array = self.block.output_ports["output_array"].get_value()
self.assertTrue(output_array == [])
def test_find_lines(self):
image = cv.LoadImage("files/test_adaptive_threshold.png")
gray_image = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
cv.CvtColor(image, gray_image, cv.CV_RGB2GRAY)
self.block.input_ports["input_image"].pass_value(gray_image)
self.block.process()
output_array = self.block.output_ports["output_array"].get_value()
self.assertTrue(len(output_array) == 8)
if __name__ == '__main__':
unittest.main()
|
Add test case for FindLines block#-------------------------------------------------------------------------------
# Copyright (c) 2011 Anton Golubkov.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v2.1
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
#
# Contributors:
# Anton Golubkov - initial API and implementation
#-------------------------------------------------------------------------------
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import cv
import os, sys
cmd_folder, f = os.path.split(os.path.dirname(os.path.abspath(__file__)))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import ipf.ipfblock.findlines
class TestFindLinesBlock(unittest.TestCase):
def setUp(self):
self.block = ipf.ipfblock.findlines.FindLines()
self.block.properties["distance_resolution"].set_value(5)
self.block.properties["angle_resolution"].set_value(0.1)
self.block.properties["threshold"].set_value(70)
self.block.properties["min_length"].set_value(140)
self.block.properties["max_gap"].set_value(1)
def test_zero_image(self):
zero_image = cv.CreateImage( (0, 0), cv.IPL_DEPTH_8U, 1)
self.block.input_ports["input_image"].pass_value(zero_image)
self.block.process()
output_array = self.block.output_ports["output_array"].get_value()
self.assertTrue(output_array == [])
def test_find_lines(self):
image = cv.LoadImage("files/test_adaptive_threshold.png")
gray_image = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
cv.CvtColor(image, gray_image, cv.CV_RGB2GRAY)
self.block.input_ports["input_image"].pass_value(gray_image)
self.block.process()
output_array = self.block.output_ports["output_array"].get_value()
self.assertTrue(len(output_array) == 8)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test case for FindLines block<commit_after>#-------------------------------------------------------------------------------
# Copyright (c) 2011 Anton Golubkov.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v2.1
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
#
# Contributors:
# Anton Golubkov - initial API and implementation
#-------------------------------------------------------------------------------
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import cv
import os, sys
cmd_folder, f = os.path.split(os.path.dirname(os.path.abspath(__file__)))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import ipf.ipfblock.findlines
class TestFindLinesBlock(unittest.TestCase):
def setUp(self):
self.block = ipf.ipfblock.findlines.FindLines()
self.block.properties["distance_resolution"].set_value(5)
self.block.properties["angle_resolution"].set_value(0.1)
self.block.properties["threshold"].set_value(70)
self.block.properties["min_length"].set_value(140)
self.block.properties["max_gap"].set_value(1)
def test_zero_image(self):
zero_image = cv.CreateImage( (0, 0), cv.IPL_DEPTH_8U, 1)
self.block.input_ports["input_image"].pass_value(zero_image)
self.block.process()
output_array = self.block.output_ports["output_array"].get_value()
self.assertTrue(output_array == [])
def test_find_lines(self):
image = cv.LoadImage("files/test_adaptive_threshold.png")
gray_image = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 1)
cv.CvtColor(image, gray_image, cv.CV_RGB2GRAY)
self.block.input_ports["input_image"].pass_value(gray_image)
self.block.process()
output_array = self.block.output_ports["output_array"].get_value()
self.assertTrue(len(output_array) == 8)
if __name__ == '__main__':
unittest.main()
|
|
1ff90eec99c1d0f8130552c9f4323f3b099a8231
|
CodeFights/pressureGauges.py
|
CodeFights/pressureGauges.py
|
#!/usr/local/bin/python
# Code Fights Pressure Gauges Problem
def pressureGauges(morning, evening):
return [list(map(min, zip(morning, evening))),
list(map(max, zip(morning, evening)))]
def main():
tests = [
[[3, 5, 2, 6], [1, 6, 6, 6], [[1, 5, 2, 6], [3, 6, 6, 6]]],
[[0, 12, 478, 23, 1000], [48, 23, 56, 23, 88],
[[0, 12, 56, 23, 88], [48, 23, 478, 23, 1000]]],
[[8], [1], [[1], [8]]],
[[129, 553, 585], [852, 601, 997], [[129, 553, 585], [852, 601, 997]]],
[[734, 483, 87, 499, 931, 657, 833],
[316, 511, 592, 355, 819, 621, 419],
[[316, 483, 87, 355, 819, 621, 419],
[734, 511, 592, 499, 931, 657, 833]]]
]
for t in tests:
res = pressureGauges(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: pressureGauges({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: pressureGauges({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights pressure gauges problem
|
Solve Code Fights pressure gauges problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights pressure gauges problem
|
#!/usr/local/bin/python
# Code Fights Pressure Gauges Problem
def pressureGauges(morning, evening):
return [list(map(min, zip(morning, evening))),
list(map(max, zip(morning, evening)))]
def main():
tests = [
[[3, 5, 2, 6], [1, 6, 6, 6], [[1, 5, 2, 6], [3, 6, 6, 6]]],
[[0, 12, 478, 23, 1000], [48, 23, 56, 23, 88],
[[0, 12, 56, 23, 88], [48, 23, 478, 23, 1000]]],
[[8], [1], [[1], [8]]],
[[129, 553, 585], [852, 601, 997], [[129, 553, 585], [852, 601, 997]]],
[[734, 483, 87, 499, 931, 657, 833],
[316, 511, 592, 355, 819, 621, 419],
[[316, 483, 87, 355, 819, 621, 419],
[734, 511, 592, 499, 931, 657, 833]]]
]
for t in tests:
res = pressureGauges(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: pressureGauges({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: pressureGauges({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights pressure gauges problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Pressure Gauges Problem
def pressureGauges(morning, evening):
return [list(map(min, zip(morning, evening))),
list(map(max, zip(morning, evening)))]
def main():
tests = [
[[3, 5, 2, 6], [1, 6, 6, 6], [[1, 5, 2, 6], [3, 6, 6, 6]]],
[[0, 12, 478, 23, 1000], [48, 23, 56, 23, 88],
[[0, 12, 56, 23, 88], [48, 23, 478, 23, 1000]]],
[[8], [1], [[1], [8]]],
[[129, 553, 585], [852, 601, 997], [[129, 553, 585], [852, 601, 997]]],
[[734, 483, 87, 499, 931, 657, 833],
[316, 511, 592, 355, 819, 621, 419],
[[316, 483, 87, 355, 819, 621, 419],
[734, 511, 592, 499, 931, 657, 833]]]
]
for t in tests:
res = pressureGauges(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: pressureGauges({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: pressureGauges({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights pressure gauges problem#!/usr/local/bin/python
# Code Fights Pressure Gauges Problem
def pressureGauges(morning, evening):
return [list(map(min, zip(morning, evening))),
list(map(max, zip(morning, evening)))]
def main():
tests = [
[[3, 5, 2, 6], [1, 6, 6, 6], [[1, 5, 2, 6], [3, 6, 6, 6]]],
[[0, 12, 478, 23, 1000], [48, 23, 56, 23, 88],
[[0, 12, 56, 23, 88], [48, 23, 478, 23, 1000]]],
[[8], [1], [[1], [8]]],
[[129, 553, 585], [852, 601, 997], [[129, 553, 585], [852, 601, 997]]],
[[734, 483, 87, 499, 931, 657, 833],
[316, 511, 592, 355, 819, 621, 419],
[[316, 483, 87, 355, 819, 621, 419],
[734, 511, 592, 499, 931, 657, 833]]]
]
for t in tests:
res = pressureGauges(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: pressureGauges({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: pressureGauges({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights pressure gauges problem<commit_after>#!/usr/local/bin/python
# Code Fights Pressure Gauges Problem
def pressureGauges(morning, evening):
return [list(map(min, zip(morning, evening))),
list(map(max, zip(morning, evening)))]
def main():
tests = [
[[3, 5, 2, 6], [1, 6, 6, 6], [[1, 5, 2, 6], [3, 6, 6, 6]]],
[[0, 12, 478, 23, 1000], [48, 23, 56, 23, 88],
[[0, 12, 56, 23, 88], [48, 23, 478, 23, 1000]]],
[[8], [1], [[1], [8]]],
[[129, 553, 585], [852, 601, 997], [[129, 553, 585], [852, 601, 997]]],
[[734, 483, 87, 499, 931, 657, 833],
[316, 511, 592, 355, 819, 621, 419],
[[316, 483, 87, 355, 819, 621, 419],
[734, 511, 592, 499, 931, 657, 833]]]
]
for t in tests:
res = pressureGauges(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: pressureGauges({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: pressureGauges({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
|
2570564f9b6334a14e1b06c3cc71afa3576f6e22
|
notifications/alliance_selections.py
|
notifications/alliance_selections.py
|
from consts.notification_type import NotificationType
from helpers.model_to_dict import ModelToDict
from notifications.base_notification import BaseNotification
class AllianceSelectionNotification(BaseNotification):
def __init__(self, event):
self.event = event
def _build_dict(self):
data = {}
data['message_type'] = NotificationType.type_names[NotificationType.ALLIANCE_SELECTION]
data['message_data'] = {}
data['message_data']['event'] = ModelToDict.eventConverter(self.event)
return data
|
Add notification for alliance selections
|
Add notification for alliance selections
|
Python
|
mit
|
phil-lopreiato/the-blue-alliance,josephbisch/the-blue-alliance,bdaroz/the-blue-alliance,josephbisch/the-blue-alliance,jaredhasenklein/the-blue-alliance,fangeugene/the-blue-alliance,jaredhasenklein/the-blue-alliance,bdaroz/the-blue-alliance,nwalters512/the-blue-alliance,nwalters512/the-blue-alliance,bvisness/the-blue-alliance,fangeugene/the-blue-alliance,nwalters512/the-blue-alliance,verycumbersome/the-blue-alliance,jaredhasenklein/the-blue-alliance,bvisness/the-blue-alliance,josephbisch/the-blue-alliance,1fish2/the-blue-alliance,bvisness/the-blue-alliance,bvisness/the-blue-alliance,phil-lopreiato/the-blue-alliance,synth3tk/the-blue-alliance,tsteward/the-blue-alliance,nwalters512/the-blue-alliance,bdaroz/the-blue-alliance,verycumbersome/the-blue-alliance,bvisness/the-blue-alliance,tsteward/the-blue-alliance,bdaroz/the-blue-alliance,1fish2/the-blue-alliance,bdaroz/the-blue-alliance,bdaroz/the-blue-alliance,tsteward/the-blue-alliance,fangeugene/the-blue-alliance,phil-lopreiato/the-blue-alliance,josephbisch/the-blue-alliance,jaredhasenklein/the-blue-alliance,the-blue-alliance/the-blue-alliance,tsteward/the-blue-alliance,1fish2/the-blue-alliance,synth3tk/the-blue-alliance,bvisness/the-blue-alliance,josephbisch/the-blue-alliance,fangeugene/the-blue-alliance,nwalters512/the-blue-alliance,tsteward/the-blue-alliance,fangeugene/the-blue-alliance,tsteward/the-blue-alliance,1fish2/the-blue-alliance,phil-lopreiato/the-blue-alliance,phil-lopreiato/the-blue-alliance,josephbisch/the-blue-alliance,synth3tk/the-blue-alliance,jaredhasenklein/the-blue-alliance,verycumbersome/the-blue-alliance,synth3tk/the-blue-alliance,nwalters512/the-blue-alliance,verycumbersome/the-blue-alliance,the-blue-alliance/the-blue-alliance,phil-lopreiato/the-blue-alliance,the-blue-alliance/the-blue-alliance,synth3tk/the-blue-alliance,jaredhasenklein/the-blue-alliance,fangeugene/the-blue-alliance,verycumbersome/the-blue-alliance,the-blue-alliance/the-blue-alliance,the-blue-alliance/the-blue-alliance,1fish2/the-blue-alliance,verycumbersome/the-blue-alliance,1fish2/the-blue-alliance,the-blue-alliance/the-blue-alliance,synth3tk/the-blue-alliance
|
Add notification for alliance selections
|
from consts.notification_type import NotificationType
from helpers.model_to_dict import ModelToDict
from notifications.base_notification import BaseNotification
class AllianceSelectionNotification(BaseNotification):
def __init__(self, event):
self.event = event
def _build_dict(self):
data = {}
data['message_type'] = NotificationType.type_names[NotificationType.ALLIANCE_SELECTION]
data['message_data'] = {}
data['message_data']['event'] = ModelToDict.eventConverter(self.event)
return data
|
<commit_before><commit_msg>Add notification for alliance selections<commit_after>
|
from consts.notification_type import NotificationType
from helpers.model_to_dict import ModelToDict
from notifications.base_notification import BaseNotification
class AllianceSelectionNotification(BaseNotification):
def __init__(self, event):
self.event = event
def _build_dict(self):
data = {}
data['message_type'] = NotificationType.type_names[NotificationType.ALLIANCE_SELECTION]
data['message_data'] = {}
data['message_data']['event'] = ModelToDict.eventConverter(self.event)
return data
|
Add notification for alliance selectionsfrom consts.notification_type import NotificationType
from helpers.model_to_dict import ModelToDict
from notifications.base_notification import BaseNotification
class AllianceSelectionNotification(BaseNotification):
def __init__(self, event):
self.event = event
def _build_dict(self):
data = {}
data['message_type'] = NotificationType.type_names[NotificationType.ALLIANCE_SELECTION]
data['message_data'] = {}
data['message_data']['event'] = ModelToDict.eventConverter(self.event)
return data
|
<commit_before><commit_msg>Add notification for alliance selections<commit_after>from consts.notification_type import NotificationType
from helpers.model_to_dict import ModelToDict
from notifications.base_notification import BaseNotification
class AllianceSelectionNotification(BaseNotification):
def __init__(self, event):
self.event = event
def _build_dict(self):
data = {}
data['message_type'] = NotificationType.type_names[NotificationType.ALLIANCE_SELECTION]
data['message_data'] = {}
data['message_data']['event'] = ModelToDict.eventConverter(self.event)
return data
|
|
f9524d36fa2b43a71726b7935af5d8d57c88a426
|
kirppu/management/commands/accounting_data.py
|
kirppu/management/commands/accounting_data.py
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.utils.translation import activate
from kirppu.accounting import accounting_receipt
class Command(BaseCommand):
help = 'Dump accounting CSV to standard output'
def add_arguments(self, parser):
parser.add_argument('--lang', type=str, help="Change language, for example: en")
def handle(self, *args, **options):
if "lang" in options:
activate(options["lang"])
accounting_receipt(self.stdout)
|
Add commandline command for creating accounting data.
|
Add commandline command for creating accounting data.
|
Python
|
mit
|
jlaunonen/kirppu,jlaunonen/kirppu,jlaunonen/kirppu,jlaunonen/kirppu
|
Add commandline command for creating accounting data.
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.utils.translation import activate
from kirppu.accounting import accounting_receipt
class Command(BaseCommand):
help = 'Dump accounting CSV to standard output'
def add_arguments(self, parser):
parser.add_argument('--lang', type=str, help="Change language, for example: en")
def handle(self, *args, **options):
if "lang" in options:
activate(options["lang"])
accounting_receipt(self.stdout)
|
<commit_before><commit_msg>Add commandline command for creating accounting data.<commit_after>
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.utils.translation import activate
from kirppu.accounting import accounting_receipt
class Command(BaseCommand):
help = 'Dump accounting CSV to standard output'
def add_arguments(self, parser):
parser.add_argument('--lang', type=str, help="Change language, for example: en")
def handle(self, *args, **options):
if "lang" in options:
activate(options["lang"])
accounting_receipt(self.stdout)
|
Add commandline command for creating accounting data.# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.utils.translation import activate
from kirppu.accounting import accounting_receipt
class Command(BaseCommand):
help = 'Dump accounting CSV to standard output'
def add_arguments(self, parser):
parser.add_argument('--lang', type=str, help="Change language, for example: en")
def handle(self, *args, **options):
if "lang" in options:
activate(options["lang"])
accounting_receipt(self.stdout)
|
<commit_before><commit_msg>Add commandline command for creating accounting data.<commit_after># -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.utils.translation import activate
from kirppu.accounting import accounting_receipt
class Command(BaseCommand):
help = 'Dump accounting CSV to standard output'
def add_arguments(self, parser):
parser.add_argument('--lang', type=str, help="Change language, for example: en")
def handle(self, *args, **options):
if "lang" in options:
activate(options["lang"])
accounting_receipt(self.stdout)
|
|
f674dcc0aa16753a3cc26bdd3fa1087f7277a9ed
|
python-real-one-liner/abcdefghppp.py
|
python-real-one-liner/abcdefghppp.py
|
#!/usr/bin/env pypy3
from itertools import permutations as p
print([i for i in p(set(range(10)) - {1}, 8) if all([i[j] != 0 for j in range(0, 7, 2)]) and (i[0] * 10 + i[1])-(i[2] * 10 + i[3]) == i[4] * 10 + i[5] and (i[4] * 10 + i[5]) + (i[6] * 10 + i[7]) == 111])
|
Add a real python one-liner
|
Add a real python one-liner
|
Python
|
mit
|
phiSgr/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,dng8888/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP,phiSgr/ABCDEFGHPPP,mingchuno/ABCDEFGHPPP
|
Add a real python one-liner
|
#!/usr/bin/env pypy3
from itertools import permutations as p
print([i for i in p(set(range(10)) - {1}, 8) if all([i[j] != 0 for j in range(0, 7, 2)]) and (i[0] * 10 + i[1])-(i[2] * 10 + i[3]) == i[4] * 10 + i[5] and (i[4] * 10 + i[5]) + (i[6] * 10 + i[7]) == 111])
|
<commit_before><commit_msg>Add a real python one-liner<commit_after>
|
#!/usr/bin/env pypy3
from itertools import permutations as p
print([i for i in p(set(range(10)) - {1}, 8) if all([i[j] != 0 for j in range(0, 7, 2)]) and (i[0] * 10 + i[1])-(i[2] * 10 + i[3]) == i[4] * 10 + i[5] and (i[4] * 10 + i[5]) + (i[6] * 10 + i[7]) == 111])
|
Add a real python one-liner#!/usr/bin/env pypy3
from itertools import permutations as p
print([i for i in p(set(range(10)) - {1}, 8) if all([i[j] != 0 for j in range(0, 7, 2)]) and (i[0] * 10 + i[1])-(i[2] * 10 + i[3]) == i[4] * 10 + i[5] and (i[4] * 10 + i[5]) + (i[6] * 10 + i[7]) == 111])
|
<commit_before><commit_msg>Add a real python one-liner<commit_after>#!/usr/bin/env pypy3
from itertools import permutations as p
print([i for i in p(set(range(10)) - {1}, 8) if all([i[j] != 0 for j in range(0, 7, 2)]) and (i[0] * 10 + i[1])-(i[2] * 10 + i[3]) == i[4] * 10 + i[5] and (i[4] * 10 + i[5]) + (i[6] * 10 + i[7]) == 111])
|
|
871cba8009b383c89023dbc39db75be27bad0ee0
|
utils/stats.py
|
utils/stats.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import json
def main():
"""TODO..."""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on results files (JSON files).")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
json_file_path = args.fileargs[0]
# PARSE THE RESULTS FILES #################################################
with open(json_file_path, "r") as fd:
data = json.load(fd)
result_list = [game["winner"]for game in data["game_log_list"]]
print("player1: {} ({:.2f}%)".format(result_list.count(0), 100. * result_list.count(0)/len(result_list)))
print("player2: {} ({:.2f}%)".format(result_list.count(1), 100. * result_list.count(1)/len(result_list)))
print("draw: {} ({:.2f}%)".format(result_list.count(None), 100. * result_list.count(None)/len(result_list)))
if __name__ == '__main__':
main()
|
Add a script to display statistics about games result.
|
Add a script to display statistics about games result.
|
Python
|
mit
|
jeremiedecock/tictactoe-py,jeremiedecock/tictactoe-py
|
Add a script to display statistics about games result.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import json
def main():
"""TODO..."""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on results files (JSON files).")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
json_file_path = args.fileargs[0]
# PARSE THE RESULTS FILES #################################################
with open(json_file_path, "r") as fd:
data = json.load(fd)
result_list = [game["winner"]for game in data["game_log_list"]]
print("player1: {} ({:.2f}%)".format(result_list.count(0), 100. * result_list.count(0)/len(result_list)))
print("player2: {} ({:.2f}%)".format(result_list.count(1), 100. * result_list.count(1)/len(result_list)))
print("draw: {} ({:.2f}%)".format(result_list.count(None), 100. * result_list.count(None)/len(result_list)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to display statistics about games result.<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import json
def main():
"""TODO..."""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on results files (JSON files).")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
json_file_path = args.fileargs[0]
# PARSE THE RESULTS FILES #################################################
with open(json_file_path, "r") as fd:
data = json.load(fd)
result_list = [game["winner"]for game in data["game_log_list"]]
print("player1: {} ({:.2f}%)".format(result_list.count(0), 100. * result_list.count(0)/len(result_list)))
print("player2: {} ({:.2f}%)".format(result_list.count(1), 100. * result_list.count(1)/len(result_list)))
print("draw: {} ({:.2f}%)".format(result_list.count(None), 100. * result_list.count(None)/len(result_list)))
if __name__ == '__main__':
main()
|
Add a script to display statistics about games result.#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import json
def main():
"""TODO..."""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on results files (JSON files).")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
json_file_path = args.fileargs[0]
# PARSE THE RESULTS FILES #################################################
with open(json_file_path, "r") as fd:
data = json.load(fd)
result_list = [game["winner"]for game in data["game_log_list"]]
print("player1: {} ({:.2f}%)".format(result_list.count(0), 100. * result_list.count(0)/len(result_list)))
print("player2: {} ({:.2f}%)".format(result_list.count(1), 100. * result_list.count(1)/len(result_list)))
print("draw: {} ({:.2f}%)".format(result_list.count(None), 100. * result_list.count(None)/len(result_list)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to display statistics about games result.<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import json
def main():
"""TODO..."""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on results files (JSON files).")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
json_file_path = args.fileargs[0]
# PARSE THE RESULTS FILES #################################################
with open(json_file_path, "r") as fd:
data = json.load(fd)
result_list = [game["winner"]for game in data["game_log_list"]]
print("player1: {} ({:.2f}%)".format(result_list.count(0), 100. * result_list.count(0)/len(result_list)))
print("player2: {} ({:.2f}%)".format(result_list.count(1), 100. * result_list.count(1)/len(result_list)))
print("draw: {} ({:.2f}%)".format(result_list.count(None), 100. * result_list.count(None)/len(result_list)))
if __name__ == '__main__':
main()
|
|
16311a8288eefe00ccb5bfce736ab7e211a0a905
|
migrations/versions/0247_another_letter_org.py
|
migrations/versions/0247_another_letter_org.py
|
"""empty message
Revision ID: 0247_another_letter_org
Revises: 0246_notifications_index
"""
# revision identifiers, used by Alembic.
revision = '0247_another_letter_org'
down_revision = '0246_notifications_index'
from alembic import op
NEW_ORGANISATIONS = [
('520', 'Neath Port Talbot Council', 'npt'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add letter logo for Neath Port Talbot Council
|
Add letter logo for Neath Port Talbot Council
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add letter logo for Neath Port Talbot Council
|
"""empty message
Revision ID: 0247_another_letter_org
Revises: 0246_notifications_index
"""
# revision identifiers, used by Alembic.
revision = '0247_another_letter_org'
down_revision = '0246_notifications_index'
from alembic import op
NEW_ORGANISATIONS = [
('520', 'Neath Port Talbot Council', 'npt'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add letter logo for Neath Port Talbot Council<commit_after>
|
"""empty message
Revision ID: 0247_another_letter_org
Revises: 0246_notifications_index
"""
# revision identifiers, used by Alembic.
revision = '0247_another_letter_org'
down_revision = '0246_notifications_index'
from alembic import op
NEW_ORGANISATIONS = [
('520', 'Neath Port Talbot Council', 'npt'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add letter logo for Neath Port Talbot Council"""empty message
Revision ID: 0247_another_letter_org
Revises: 0246_notifications_index
"""
# revision identifiers, used by Alembic.
revision = '0247_another_letter_org'
down_revision = '0246_notifications_index'
from alembic import op
NEW_ORGANISATIONS = [
('520', 'Neath Port Talbot Council', 'npt'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add letter logo for Neath Port Talbot Council<commit_after>"""empty message
Revision ID: 0247_another_letter_org
Revises: 0246_notifications_index
"""
# revision identifiers, used by Alembic.
revision = '0247_another_letter_org'
down_revision = '0246_notifications_index'
from alembic import op
NEW_ORGANISATIONS = [
('520', 'Neath Port Talbot Council', 'npt'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
|
89b3097e13c8241b26cacde615b792c75524d3d5
|
testcases/PetitbootMMU.py
|
testcases/PetitbootMMU.py
|
#!/usr/bin/env python2
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2019
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
'''
Petitboot MMU
----------
This test verifies that the petitboot kernel has booted with the expected MMU mode
for the processor. This means hash for P8 and radix for P9 onwards.
'''
import unittest
import logging
import OpTestConfiguration
import OpTestLogger
from common.OpTestSystem import OpSystemState
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
class PetitbootMMU(unittest.TestCase):
def setUp(self):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_IPMI = conf.ipmi()
self.cv_SYSTEM = conf.system()
def runTest(self):
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
self.c = self.cv_SYSTEM.console
log.debug("Scraping /proc/cpuinfo for CPU and MMU info")
cpu = self.c.run_command("awk '$1 == \"cpu\" {print $3}' /proc/cpuinfo | uniq")[0].strip(',')
mmu = self.c.run_command("awk '$1 == \"MMU\" {print $3}' /proc/cpuinfo")[0]
log.debug("Got CPU '{}' and MMU '{}'".format(cpu, mmu))
if cpu == 'POWER8':
self.assertEqual(mmu, 'Hash', 'Expect hash MMU on Power8, found {}'.format(mmu))
elif cpu == 'POWER9':
self.assertEqual(mmu, 'Radix', 'Expect radix MMU on Power9, found {}'.format(mmu))
else:
self.skipTest("Unknown CPU '{}', please update testcase to support this CPU")
|
Add check for the MMU type under petitboot
|
Add check for the MMU type under petitboot
We expect the petitboot kernel to always use radix MMU on Power9. This
test case will also test that Power8 is using hash.
It uses /proc/cpuinfo while booted into the petitboot environment.
Signed-off-by: Joel Stanley <4816dabf8db1bc6cac35b3a24cab2ff844b5b0c7@jms.id.au>
|
Python
|
apache-2.0
|
open-power/op-test-framework,open-power/op-test-framework,open-power/op-test-framework
|
Add check for the MMU type under petitboot
We expect the petitboot kernel to always use radix MMU on Power9. This
test case will also test that Power8 is using hash.
It uses /proc/cpuinfo while booted into the petitboot environment.
Signed-off-by: Joel Stanley <4816dabf8db1bc6cac35b3a24cab2ff844b5b0c7@jms.id.au>
|
#!/usr/bin/env python2
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2019
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
'''
Petitboot MMU
----------
This test verifies that the petitboot kernel has booted with the expected MMU mode
for the processor. This means hash for P8 and radix for P9 onwards.
'''
import unittest
import logging
import OpTestConfiguration
import OpTestLogger
from common.OpTestSystem import OpSystemState
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
class PetitbootMMU(unittest.TestCase):
def setUp(self):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_IPMI = conf.ipmi()
self.cv_SYSTEM = conf.system()
def runTest(self):
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
self.c = self.cv_SYSTEM.console
log.debug("Scraping /proc/cpuinfo for CPU and MMU info")
cpu = self.c.run_command("awk '$1 == \"cpu\" {print $3}' /proc/cpuinfo | uniq")[0].strip(',')
mmu = self.c.run_command("awk '$1 == \"MMU\" {print $3}' /proc/cpuinfo")[0]
log.debug("Got CPU '{}' and MMU '{}'".format(cpu, mmu))
if cpu == 'POWER8':
self.assertEqual(mmu, 'Hash', 'Expect hash MMU on Power8, found {}'.format(mmu))
elif cpu == 'POWER9':
self.assertEqual(mmu, 'Radix', 'Expect radix MMU on Power9, found {}'.format(mmu))
else:
self.skipTest("Unknown CPU '{}', please update testcase to support this CPU")
|
<commit_before><commit_msg>Add check for the MMU type under petitboot
We expect the petitboot kernel to always use radix MMU on Power9. This
test case will also test that Power8 is using hash.
It uses /proc/cpuinfo while booted into the petitboot environment.
Signed-off-by: Joel Stanley <4816dabf8db1bc6cac35b3a24cab2ff844b5b0c7@jms.id.au><commit_after>
|
#!/usr/bin/env python2
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2019
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
'''
Petitboot MMU
----------
This test verifies that the petitboot kernel has booted with the expected MMU mode
for the processor. This means hash for P8 and radix for P9 onwards.
'''
import unittest
import logging
import OpTestConfiguration
import OpTestLogger
from common.OpTestSystem import OpSystemState
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
class PetitbootMMU(unittest.TestCase):
def setUp(self):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_IPMI = conf.ipmi()
self.cv_SYSTEM = conf.system()
def runTest(self):
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
self.c = self.cv_SYSTEM.console
log.debug("Scraping /proc/cpuinfo for CPU and MMU info")
cpu = self.c.run_command("awk '$1 == \"cpu\" {print $3}' /proc/cpuinfo | uniq")[0].strip(',')
mmu = self.c.run_command("awk '$1 == \"MMU\" {print $3}' /proc/cpuinfo")[0]
log.debug("Got CPU '{}' and MMU '{}'".format(cpu, mmu))
if cpu == 'POWER8':
self.assertEqual(mmu, 'Hash', 'Expect hash MMU on Power8, found {}'.format(mmu))
elif cpu == 'POWER9':
self.assertEqual(mmu, 'Radix', 'Expect radix MMU on Power9, found {}'.format(mmu))
else:
self.skipTest("Unknown CPU '{}', please update testcase to support this CPU")
|
Add check for the MMU type under petitboot
We expect the petitboot kernel to always use radix MMU on Power9. This
test case will also test that Power8 is using hash.
It uses /proc/cpuinfo while booted into the petitboot environment.
Signed-off-by: Joel Stanley <4816dabf8db1bc6cac35b3a24cab2ff844b5b0c7@jms.id.au>#!/usr/bin/env python2
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2019
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
'''
Petitboot MMU
----------
This test verifies that the petitboot kernel has booted with the expected MMU mode
for the processor. This means hash for P8 and radix for P9 onwards.
'''
import unittest
import logging
import OpTestConfiguration
import OpTestLogger
from common.OpTestSystem import OpSystemState
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
class PetitbootMMU(unittest.TestCase):
def setUp(self):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_IPMI = conf.ipmi()
self.cv_SYSTEM = conf.system()
def runTest(self):
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
self.c = self.cv_SYSTEM.console
log.debug("Scraping /proc/cpuinfo for CPU and MMU info")
cpu = self.c.run_command("awk '$1 == \"cpu\" {print $3}' /proc/cpuinfo | uniq")[0].strip(',')
mmu = self.c.run_command("awk '$1 == \"MMU\" {print $3}' /proc/cpuinfo")[0]
log.debug("Got CPU '{}' and MMU '{}'".format(cpu, mmu))
if cpu == 'POWER8':
self.assertEqual(mmu, 'Hash', 'Expect hash MMU on Power8, found {}'.format(mmu))
elif cpu == 'POWER9':
self.assertEqual(mmu, 'Radix', 'Expect radix MMU on Power9, found {}'.format(mmu))
else:
self.skipTest("Unknown CPU '{}', please update testcase to support this CPU")
|
<commit_before><commit_msg>Add check for the MMU type under petitboot
We expect the petitboot kernel to always use radix MMU on Power9. This
test case will also test that Power8 is using hash.
It uses /proc/cpuinfo while booted into the petitboot environment.
Signed-off-by: Joel Stanley <4816dabf8db1bc6cac35b3a24cab2ff844b5b0c7@jms.id.au><commit_after>#!/usr/bin/env python2
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2019
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
'''
Petitboot MMU
----------
This test verifies that the petitboot kernel has booted with the expected MMU mode
for the processor. This means hash for P8 and radix for P9 onwards.
'''
import unittest
import logging
import OpTestConfiguration
import OpTestLogger
from common.OpTestSystem import OpSystemState
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
class PetitbootMMU(unittest.TestCase):
def setUp(self):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_IPMI = conf.ipmi()
self.cv_SYSTEM = conf.system()
def runTest(self):
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
self.c = self.cv_SYSTEM.console
log.debug("Scraping /proc/cpuinfo for CPU and MMU info")
cpu = self.c.run_command("awk '$1 == \"cpu\" {print $3}' /proc/cpuinfo | uniq")[0].strip(',')
mmu = self.c.run_command("awk '$1 == \"MMU\" {print $3}' /proc/cpuinfo")[0]
log.debug("Got CPU '{}' and MMU '{}'".format(cpu, mmu))
if cpu == 'POWER8':
self.assertEqual(mmu, 'Hash', 'Expect hash MMU on Power8, found {}'.format(mmu))
elif cpu == 'POWER9':
self.assertEqual(mmu, 'Radix', 'Expect radix MMU on Power9, found {}'.format(mmu))
else:
self.skipTest("Unknown CPU '{}', please update testcase to support this CPU")
|
|
4756856a1f146b39103e50abcda41c5ecf8e89e9
|
tests/test_middlewares.py
|
tests/test_middlewares.py
|
from django.http.response import HttpResponse
from django.test import TestCase
from mock import MagicMock
from rewrite_external_links.middleware import RewriteExternalLinksMiddleware
class TestRewriteExternalLinksMiddleware(TestCase):
def setUp(self):
self.middleware = RewriteExternalLinksMiddleware()
def test_no_response_content(self):
"""response doesn't change if no response content."""
request = MagicMock()
content_type = 'application/thraud+xml'
initial_response = HttpResponse(content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
def test_other_content_type(self):
"""response doesn't change if `Content-Type` is not `text/html`."""
request = MagicMock()
content_type = 'application/thraud+xml'
initial_response = HttpResponse(content=b'0x0', content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
def test_other_request_path_info(self):
request = MagicMock()
request.META['PATH_INFO'] = '/another-url/'
content_type = 'text/html'
initial_response = HttpResponse(content=b'0x0', content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
|
Add basic tests for middleware
|
Add basic tests for middleware
|
Python
|
bsd-2-clause
|
incuna/django-rewrite-external-links,incuna/django-rewrite-external-links
|
Add basic tests for middleware
|
from django.http.response import HttpResponse
from django.test import TestCase
from mock import MagicMock
from rewrite_external_links.middleware import RewriteExternalLinksMiddleware
class TestRewriteExternalLinksMiddleware(TestCase):
def setUp(self):
self.middleware = RewriteExternalLinksMiddleware()
def test_no_response_content(self):
"""response doesn't change if no response content."""
request = MagicMock()
content_type = 'application/thraud+xml'
initial_response = HttpResponse(content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
def test_other_content_type(self):
"""response doesn't change if `Content-Type` is not `text/html`."""
request = MagicMock()
content_type = 'application/thraud+xml'
initial_response = HttpResponse(content=b'0x0', content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
def test_other_request_path_info(self):
request = MagicMock()
request.META['PATH_INFO'] = '/another-url/'
content_type = 'text/html'
initial_response = HttpResponse(content=b'0x0', content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
|
<commit_before><commit_msg>Add basic tests for middleware<commit_after>
|
from django.http.response import HttpResponse
from django.test import TestCase
from mock import MagicMock
from rewrite_external_links.middleware import RewriteExternalLinksMiddleware
class TestRewriteExternalLinksMiddleware(TestCase):
def setUp(self):
self.middleware = RewriteExternalLinksMiddleware()
def test_no_response_content(self):
"""response doesn't change if no response content."""
request = MagicMock()
content_type = 'application/thraud+xml'
initial_response = HttpResponse(content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
def test_other_content_type(self):
"""response doesn't change if `Content-Type` is not `text/html`."""
request = MagicMock()
content_type = 'application/thraud+xml'
initial_response = HttpResponse(content=b'0x0', content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
def test_other_request_path_info(self):
request = MagicMock()
request.META['PATH_INFO'] = '/another-url/'
content_type = 'text/html'
initial_response = HttpResponse(content=b'0x0', content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
|
Add basic tests for middlewarefrom django.http.response import HttpResponse
from django.test import TestCase
from mock import MagicMock
from rewrite_external_links.middleware import RewriteExternalLinksMiddleware
class TestRewriteExternalLinksMiddleware(TestCase):
def setUp(self):
self.middleware = RewriteExternalLinksMiddleware()
def test_no_response_content(self):
"""response doesn't change if no response content."""
request = MagicMock()
content_type = 'application/thraud+xml'
initial_response = HttpResponse(content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
def test_other_content_type(self):
"""response doesn't change if `Content-Type` is not `text/html`."""
request = MagicMock()
content_type = 'application/thraud+xml'
initial_response = HttpResponse(content=b'0x0', content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
def test_other_request_path_info(self):
request = MagicMock()
request.META['PATH_INFO'] = '/another-url/'
content_type = 'text/html'
initial_response = HttpResponse(content=b'0x0', content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
|
<commit_before><commit_msg>Add basic tests for middleware<commit_after>from django.http.response import HttpResponse
from django.test import TestCase
from mock import MagicMock
from rewrite_external_links.middleware import RewriteExternalLinksMiddleware
class TestRewriteExternalLinksMiddleware(TestCase):
def setUp(self):
self.middleware = RewriteExternalLinksMiddleware()
def test_no_response_content(self):
"""response doesn't change if no response content."""
request = MagicMock()
content_type = 'application/thraud+xml'
initial_response = HttpResponse(content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
def test_other_content_type(self):
"""response doesn't change if `Content-Type` is not `text/html`."""
request = MagicMock()
content_type = 'application/thraud+xml'
initial_response = HttpResponse(content=b'0x0', content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
def test_other_request_path_info(self):
request = MagicMock()
request.META['PATH_INFO'] = '/another-url/'
content_type = 'text/html'
initial_response = HttpResponse(content=b'0x0', content_type=content_type)
processed_response = self.middleware.process_response(
request=request,
response=initial_response,
)
self.assertEqual(processed_response, initial_response)
|
|
5c9ffdb83bbc2796acc520365f6ce854d39a4c14
|
makeul4represcapes.py
|
makeul4represcapes.py
|
lastback = False
startback = 0
unprintable = []
for i in range(0x7f, 0x10001):
c = chr(i)
thisback = repr(c)[1] == "\\"
if thisback != lastback:
# print("{:08x}|{!r}: {}".format(i, c, thisback))
if thisback:
startback = i
else:
end = i-1
if startback == end:
unprintable.append("\\u{:04x}".format(startback))
else:
unprintable.append("\\u{:04x}-\\u{:04x}".format(startback, i-1))
lastback = thisback
print('/[{}]/'.format("".join(unprintable)))
|
Add script for generating the regexp for which chars to escape.
|
Add script for generating the regexp for which chars to escape.
|
Python
|
mit
|
LivingLogic/LivingLogic.Javascript.ul4,LivingLogic/LivingLogic.Javascript.ul4,doerwalter/LivingLogic.Javascript.ul4,doerwalter/LivingLogic.Javascript.ul4
|
Add script for generating the regexp for which chars to escape.
|
lastback = False
startback = 0
unprintable = []
for i in range(0x7f, 0x10001):
c = chr(i)
thisback = repr(c)[1] == "\\"
if thisback != lastback:
# print("{:08x}|{!r}: {}".format(i, c, thisback))
if thisback:
startback = i
else:
end = i-1
if startback == end:
unprintable.append("\\u{:04x}".format(startback))
else:
unprintable.append("\\u{:04x}-\\u{:04x}".format(startback, i-1))
lastback = thisback
print('/[{}]/'.format("".join(unprintable)))
|
<commit_before><commit_msg>Add script for generating the regexp for which chars to escape.<commit_after>
|
lastback = False
startback = 0
unprintable = []
for i in range(0x7f, 0x10001):
c = chr(i)
thisback = repr(c)[1] == "\\"
if thisback != lastback:
# print("{:08x}|{!r}: {}".format(i, c, thisback))
if thisback:
startback = i
else:
end = i-1
if startback == end:
unprintable.append("\\u{:04x}".format(startback))
else:
unprintable.append("\\u{:04x}-\\u{:04x}".format(startback, i-1))
lastback = thisback
print('/[{}]/'.format("".join(unprintable)))
|
Add script for generating the regexp for which chars to escape.lastback = False
startback = 0
unprintable = []
for i in range(0x7f, 0x10001):
c = chr(i)
thisback = repr(c)[1] == "\\"
if thisback != lastback:
# print("{:08x}|{!r}: {}".format(i, c, thisback))
if thisback:
startback = i
else:
end = i-1
if startback == end:
unprintable.append("\\u{:04x}".format(startback))
else:
unprintable.append("\\u{:04x}-\\u{:04x}".format(startback, i-1))
lastback = thisback
print('/[{}]/'.format("".join(unprintable)))
|
<commit_before><commit_msg>Add script for generating the regexp for which chars to escape.<commit_after>lastback = False
startback = 0
unprintable = []
for i in range(0x7f, 0x10001):
c = chr(i)
thisback = repr(c)[1] == "\\"
if thisback != lastback:
# print("{:08x}|{!r}: {}".format(i, c, thisback))
if thisback:
startback = i
else:
end = i-1
if startback == end:
unprintable.append("\\u{:04x}".format(startback))
else:
unprintable.append("\\u{:04x}-\\u{:04x}".format(startback, i-1))
lastback = thisback
print('/[{}]/'.format("".join(unprintable)))
|
|
f6ca3abbcb621135acd39c0556df2b5f672cc286
|
CodeFights/replaceMiddle.py
|
CodeFights/replaceMiddle.py
|
#!/usr/local/bin/python
# Code Fights Replace Middle Problem
def replaceMiddle(arr):
n = len(arr)
if n % 2 == 1:
return arr
else:
mid = arr[n // 2 - 1] + arr[n // 2]
arr[n // 2 - 1: n // 2 + 1] = [mid]
return arr
def main():
tests = [
[[7, 2, 2, 5, 10, 7], [7, 2, 7, 10, 7]],
[[-5, -5, 10], [-5, -5, 10]],
[
[45, 23, 12, 33, 12, 453, -234, -45],
[45, 23, 12, 45, 453, -234, -45]
],
[[2, 8], [10]],
[
[-12, 34, 40, -5, -12, 4, 0, 0, -12],
[-12, 34, 40, -5, -12, 4, 0, 0, -12]
],
[[9, 0, 15, 9], [9, 15, 9]],
[[-6, 6, -6], [-6, 6, -6]],
[[26, 26, -17], [26, 26, -17]],
[[-7, 5, 5, 10], [-7, 10, 10]]
]
for t in tests:
res = replaceMiddle(t[0])
ans = t[1]
if ans == res:
print("PASSED: replaceMiddle({}) returned {}"
.format(t[0], res))
else:
print("FAILED: replaceMiddle({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights replace middle problem
|
Solve Code Fights replace middle problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights replace middle problem
|
#!/usr/local/bin/python
# Code Fights Replace Middle Problem
def replaceMiddle(arr):
n = len(arr)
if n % 2 == 1:
return arr
else:
mid = arr[n // 2 - 1] + arr[n // 2]
arr[n // 2 - 1: n // 2 + 1] = [mid]
return arr
def main():
tests = [
[[7, 2, 2, 5, 10, 7], [7, 2, 7, 10, 7]],
[[-5, -5, 10], [-5, -5, 10]],
[
[45, 23, 12, 33, 12, 453, -234, -45],
[45, 23, 12, 45, 453, -234, -45]
],
[[2, 8], [10]],
[
[-12, 34, 40, -5, -12, 4, 0, 0, -12],
[-12, 34, 40, -5, -12, 4, 0, 0, -12]
],
[[9, 0, 15, 9], [9, 15, 9]],
[[-6, 6, -6], [-6, 6, -6]],
[[26, 26, -17], [26, 26, -17]],
[[-7, 5, 5, 10], [-7, 10, 10]]
]
for t in tests:
res = replaceMiddle(t[0])
ans = t[1]
if ans == res:
print("PASSED: replaceMiddle({}) returned {}"
.format(t[0], res))
else:
print("FAILED: replaceMiddle({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights replace middle problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Replace Middle Problem
def replaceMiddle(arr):
n = len(arr)
if n % 2 == 1:
return arr
else:
mid = arr[n // 2 - 1] + arr[n // 2]
arr[n // 2 - 1: n // 2 + 1] = [mid]
return arr
def main():
tests = [
[[7, 2, 2, 5, 10, 7], [7, 2, 7, 10, 7]],
[[-5, -5, 10], [-5, -5, 10]],
[
[45, 23, 12, 33, 12, 453, -234, -45],
[45, 23, 12, 45, 453, -234, -45]
],
[[2, 8], [10]],
[
[-12, 34, 40, -5, -12, 4, 0, 0, -12],
[-12, 34, 40, -5, -12, 4, 0, 0, -12]
],
[[9, 0, 15, 9], [9, 15, 9]],
[[-6, 6, -6], [-6, 6, -6]],
[[26, 26, -17], [26, 26, -17]],
[[-7, 5, 5, 10], [-7, 10, 10]]
]
for t in tests:
res = replaceMiddle(t[0])
ans = t[1]
if ans == res:
print("PASSED: replaceMiddle({}) returned {}"
.format(t[0], res))
else:
print("FAILED: replaceMiddle({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights replace middle problem#!/usr/local/bin/python
# Code Fights Replace Middle Problem
def replaceMiddle(arr):
n = len(arr)
if n % 2 == 1:
return arr
else:
mid = arr[n // 2 - 1] + arr[n // 2]
arr[n // 2 - 1: n // 2 + 1] = [mid]
return arr
def main():
tests = [
[[7, 2, 2, 5, 10, 7], [7, 2, 7, 10, 7]],
[[-5, -5, 10], [-5, -5, 10]],
[
[45, 23, 12, 33, 12, 453, -234, -45],
[45, 23, 12, 45, 453, -234, -45]
],
[[2, 8], [10]],
[
[-12, 34, 40, -5, -12, 4, 0, 0, -12],
[-12, 34, 40, -5, -12, 4, 0, 0, -12]
],
[[9, 0, 15, 9], [9, 15, 9]],
[[-6, 6, -6], [-6, 6, -6]],
[[26, 26, -17], [26, 26, -17]],
[[-7, 5, 5, 10], [-7, 10, 10]]
]
for t in tests:
res = replaceMiddle(t[0])
ans = t[1]
if ans == res:
print("PASSED: replaceMiddle({}) returned {}"
.format(t[0], res))
else:
print("FAILED: replaceMiddle({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights replace middle problem<commit_after>#!/usr/local/bin/python
# Code Fights Replace Middle Problem
def replaceMiddle(arr):
n = len(arr)
if n % 2 == 1:
return arr
else:
mid = arr[n // 2 - 1] + arr[n // 2]
arr[n // 2 - 1: n // 2 + 1] = [mid]
return arr
def main():
tests = [
[[7, 2, 2, 5, 10, 7], [7, 2, 7, 10, 7]],
[[-5, -5, 10], [-5, -5, 10]],
[
[45, 23, 12, 33, 12, 453, -234, -45],
[45, 23, 12, 45, 453, -234, -45]
],
[[2, 8], [10]],
[
[-12, 34, 40, -5, -12, 4, 0, 0, -12],
[-12, 34, 40, -5, -12, 4, 0, 0, -12]
],
[[9, 0, 15, 9], [9, 15, 9]],
[[-6, 6, -6], [-6, 6, -6]],
[[26, 26, -17], [26, 26, -17]],
[[-7, 5, 5, 10], [-7, 10, 10]]
]
for t in tests:
res = replaceMiddle(t[0])
ans = t[1]
if ans == res:
print("PASSED: replaceMiddle({}) returned {}"
.format(t[0], res))
else:
print("FAILED: replaceMiddle({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
a35c7023adbe1aeabce62f02d390f934741cb392
|
examples/multimailboxsearch.py
|
examples/multimailboxsearch.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Use IMAP CLI to search trhough every mailbox on an account."""
import argparse
import getpass
import logging
import os
import sys
import imap_cli
from imap_cli import search
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def truncate_string(string, length):
minus_than_position = string.find('<')
if minus_than_position > 0 and string.find('>') > minus_than_position:
string = string[0:minus_than_position]
return string if len(string) < length else u'{0}…'.format(string[0:length])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('imap_server', help="IMAP Server hostname")
parser.add_argument('searched_address', help="Searched address")
parser.add_argument('-l', '--login', help="Login for IMAP account")
parser.add_argument('--no-ssl', action='store_true', help="Don't use SSL")
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
args = parser.parse_args()
password = getpass.getpass()
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
try:
imap_account = imap_cli.connect(
hostname=args.imap_server,
username=args.login,
password=password,
ssl=not args.no_ssl,
)
for directory_status in sorted(imap_cli.status(imap_account), key=lambda obj: obj['directory']):
imap_cli.change_dir(imap_account, directory_status['directory'])
mail_set = search.fetch_uids(
imap_account,
search_criterion=[search.create_search_criterion_by_mail_address(args.searched_address)],
)
if len(mail_set) > 0:
sys.stdout.write(u'{} Directory\n'.format(directory_status['directory']))
for mail_info in search.fetch_mails_info(imap_account, mail_set=mail_set):
sys.stdout.write(u' {:<10} From : {:<30} \tSubject : {}\n'.format(
mail_info['uid'],
truncate_string(mail_info['from'], 30),
truncate_string(mail_info['subject'], 50),
))
imap_cli.disconnect(imap_account)
except KeyboardInterrupt:
log.info('Interrupt by user, exiting')
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add an example script to search through every mailbox
|
Add an example script to search through every mailbox
|
Python
|
mit
|
Gentux/imap-cli,Gentux/imap-cli
|
Add an example script to search through every mailbox
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Use IMAP CLI to search trhough every mailbox on an account."""
import argparse
import getpass
import logging
import os
import sys
import imap_cli
from imap_cli import search
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def truncate_string(string, length):
minus_than_position = string.find('<')
if minus_than_position > 0 and string.find('>') > minus_than_position:
string = string[0:minus_than_position]
return string if len(string) < length else u'{0}…'.format(string[0:length])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('imap_server', help="IMAP Server hostname")
parser.add_argument('searched_address', help="Searched address")
parser.add_argument('-l', '--login', help="Login for IMAP account")
parser.add_argument('--no-ssl', action='store_true', help="Don't use SSL")
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
args = parser.parse_args()
password = getpass.getpass()
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
try:
imap_account = imap_cli.connect(
hostname=args.imap_server,
username=args.login,
password=password,
ssl=not args.no_ssl,
)
for directory_status in sorted(imap_cli.status(imap_account), key=lambda obj: obj['directory']):
imap_cli.change_dir(imap_account, directory_status['directory'])
mail_set = search.fetch_uids(
imap_account,
search_criterion=[search.create_search_criterion_by_mail_address(args.searched_address)],
)
if len(mail_set) > 0:
sys.stdout.write(u'{} Directory\n'.format(directory_status['directory']))
for mail_info in search.fetch_mails_info(imap_account, mail_set=mail_set):
sys.stdout.write(u' {:<10} From : {:<30} \tSubject : {}\n'.format(
mail_info['uid'],
truncate_string(mail_info['from'], 30),
truncate_string(mail_info['subject'], 50),
))
imap_cli.disconnect(imap_account)
except KeyboardInterrupt:
log.info('Interrupt by user, exiting')
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add an example script to search through every mailbox<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Use IMAP CLI to search trhough every mailbox on an account."""
import argparse
import getpass
import logging
import os
import sys
import imap_cli
from imap_cli import search
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def truncate_string(string, length):
minus_than_position = string.find('<')
if minus_than_position > 0 and string.find('>') > minus_than_position:
string = string[0:minus_than_position]
return string if len(string) < length else u'{0}…'.format(string[0:length])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('imap_server', help="IMAP Server hostname")
parser.add_argument('searched_address', help="Searched address")
parser.add_argument('-l', '--login', help="Login for IMAP account")
parser.add_argument('--no-ssl', action='store_true', help="Don't use SSL")
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
args = parser.parse_args()
password = getpass.getpass()
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
try:
imap_account = imap_cli.connect(
hostname=args.imap_server,
username=args.login,
password=password,
ssl=not args.no_ssl,
)
for directory_status in sorted(imap_cli.status(imap_account), key=lambda obj: obj['directory']):
imap_cli.change_dir(imap_account, directory_status['directory'])
mail_set = search.fetch_uids(
imap_account,
search_criterion=[search.create_search_criterion_by_mail_address(args.searched_address)],
)
if len(mail_set) > 0:
sys.stdout.write(u'{} Directory\n'.format(directory_status['directory']))
for mail_info in search.fetch_mails_info(imap_account, mail_set=mail_set):
sys.stdout.write(u' {:<10} From : {:<30} \tSubject : {}\n'.format(
mail_info['uid'],
truncate_string(mail_info['from'], 30),
truncate_string(mail_info['subject'], 50),
))
imap_cli.disconnect(imap_account)
except KeyboardInterrupt:
log.info('Interrupt by user, exiting')
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add an example script to search through every mailbox#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Use IMAP CLI to search trhough every mailbox on an account."""
import argparse
import getpass
import logging
import os
import sys
import imap_cli
from imap_cli import search
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def truncate_string(string, length):
minus_than_position = string.find('<')
if minus_than_position > 0 and string.find('>') > minus_than_position:
string = string[0:minus_than_position]
return string if len(string) < length else u'{0}…'.format(string[0:length])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('imap_server', help="IMAP Server hostname")
parser.add_argument('searched_address', help="Searched address")
parser.add_argument('-l', '--login', help="Login for IMAP account")
parser.add_argument('--no-ssl', action='store_true', help="Don't use SSL")
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
args = parser.parse_args()
password = getpass.getpass()
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
try:
imap_account = imap_cli.connect(
hostname=args.imap_server,
username=args.login,
password=password,
ssl=not args.no_ssl,
)
for directory_status in sorted(imap_cli.status(imap_account), key=lambda obj: obj['directory']):
imap_cli.change_dir(imap_account, directory_status['directory'])
mail_set = search.fetch_uids(
imap_account,
search_criterion=[search.create_search_criterion_by_mail_address(args.searched_address)],
)
if len(mail_set) > 0:
sys.stdout.write(u'{} Directory\n'.format(directory_status['directory']))
for mail_info in search.fetch_mails_info(imap_account, mail_set=mail_set):
sys.stdout.write(u' {:<10} From : {:<30} \tSubject : {}\n'.format(
mail_info['uid'],
truncate_string(mail_info['from'], 30),
truncate_string(mail_info['subject'], 50),
))
imap_cli.disconnect(imap_account)
except KeyboardInterrupt:
log.info('Interrupt by user, exiting')
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add an example script to search through every mailbox<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Use IMAP CLI to search trhough every mailbox on an account."""
import argparse
import getpass
import logging
import os
import sys
import imap_cli
from imap_cli import search
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def truncate_string(string, length):
minus_than_position = string.find('<')
if minus_than_position > 0 and string.find('>') > minus_than_position:
string = string[0:minus_than_position]
return string if len(string) < length else u'{0}…'.format(string[0:length])
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('imap_server', help="IMAP Server hostname")
parser.add_argument('searched_address', help="Searched address")
parser.add_argument('-l', '--login', help="Login for IMAP account")
parser.add_argument('--no-ssl', action='store_true', help="Don't use SSL")
parser.add_argument('-v', '--verbose', action='store_true', help='increase output verbosity')
args = parser.parse_args()
password = getpass.getpass()
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
try:
imap_account = imap_cli.connect(
hostname=args.imap_server,
username=args.login,
password=password,
ssl=not args.no_ssl,
)
for directory_status in sorted(imap_cli.status(imap_account), key=lambda obj: obj['directory']):
imap_cli.change_dir(imap_account, directory_status['directory'])
mail_set = search.fetch_uids(
imap_account,
search_criterion=[search.create_search_criterion_by_mail_address(args.searched_address)],
)
if len(mail_set) > 0:
sys.stdout.write(u'{} Directory\n'.format(directory_status['directory']))
for mail_info in search.fetch_mails_info(imap_account, mail_set=mail_set):
sys.stdout.write(u' {:<10} From : {:<30} \tSubject : {}\n'.format(
mail_info['uid'],
truncate_string(mail_info['from'], 30),
truncate_string(mail_info['subject'], 50),
))
imap_cli.disconnect(imap_account)
except KeyboardInterrupt:
log.info('Interrupt by user, exiting')
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
ce1941a06b37f1273618822212e9570c9ca57f2f
|
ws-tests/test_valid_study_put_override_author.py
|
ws-tests/test_valid_study_put_override_author.py
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/1003'
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': os.environ.get('GITHUB_OAUTH_TOKEN', 'bogus_token'),
'author_name': "Some Dude",
'author_email': "dude@dude.com",
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=200):
sys.exit(0)
sys.exit(1)
|
Add a test for over-riding author info
|
Add a test for over-riding author info
|
Python
|
bsd-2-clause
|
OpenTreeOfLife/phylesystem-api,OpenTreeOfLife/phylesystem-api,OpenTreeOfLife/phylesystem-api
|
Add a test for over-riding author info
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/1003'
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': os.environ.get('GITHUB_OAUTH_TOKEN', 'bogus_token'),
'author_name': "Some Dude",
'author_email': "dude@dude.com",
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=200):
sys.exit(0)
sys.exit(1)
|
<commit_before><commit_msg>Add a test for over-riding author info<commit_after>
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/1003'
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': os.environ.get('GITHUB_OAUTH_TOKEN', 'bogus_token'),
'author_name': "Some Dude",
'author_email': "dude@dude.com",
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=200):
sys.exit(0)
sys.exit(1)
|
Add a test for over-riding author info#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/1003'
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': os.environ.get('GITHUB_OAUTH_TOKEN', 'bogus_token'),
'author_name': "Some Dude",
'author_email': "dude@dude.com",
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=200):
sys.exit(0)
sys.exit(1)
|
<commit_before><commit_msg>Add a test for over-riding author info<commit_after>#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/1003'
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': os.environ.get('GITHUB_OAUTH_TOKEN', 'bogus_token'),
'author_name': "Some Dude",
'author_email': "dude@dude.com",
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=200):
sys.exit(0)
sys.exit(1)
|
|
dccc851f34eebe2e74efcce5141ced166a9baa04
|
bluebottle/notifications/tests/test_admin.py
|
bluebottle/notifications/tests/test_admin.py
|
# -*- coding: utf-8 -*-
from django.urls import reverse
from bluebottle.notifications.models import MessageTemplate
from bluebottle.test.utils import BluebottleAdminTestCase
class TestMessageTemplateAdmin(BluebottleAdminTestCase):
def setUp(self):
super(TestMessageTemplateAdmin, self).setUp()
self.client.force_login(self.superuser)
def test_mail_template_admin(self):
template = MessageTemplate.objects.create(
message='bluebottle.members.messages.AccoutnActivationMessage'
)
self.admin_url = reverse('admin:notifications_messagetemplate_change', args=(template.id,))
response = self.client.get(self.admin_url)
# Check we show placeholder hints
self.assertContains(response, '{first_name}')
self.assertContains(response, '{site_name}')
|
Add tests for message template admin
|
Add tests for message template admin
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Add tests for message template admin
|
# -*- coding: utf-8 -*-
from django.urls import reverse
from bluebottle.notifications.models import MessageTemplate
from bluebottle.test.utils import BluebottleAdminTestCase
class TestMessageTemplateAdmin(BluebottleAdminTestCase):
def setUp(self):
super(TestMessageTemplateAdmin, self).setUp()
self.client.force_login(self.superuser)
def test_mail_template_admin(self):
template = MessageTemplate.objects.create(
message='bluebottle.members.messages.AccoutnActivationMessage'
)
self.admin_url = reverse('admin:notifications_messagetemplate_change', args=(template.id,))
response = self.client.get(self.admin_url)
# Check we show placeholder hints
self.assertContains(response, '{first_name}')
self.assertContains(response, '{site_name}')
|
<commit_before><commit_msg>Add tests for message template admin<commit_after>
|
# -*- coding: utf-8 -*-
from django.urls import reverse
from bluebottle.notifications.models import MessageTemplate
from bluebottle.test.utils import BluebottleAdminTestCase
class TestMessageTemplateAdmin(BluebottleAdminTestCase):
def setUp(self):
super(TestMessageTemplateAdmin, self).setUp()
self.client.force_login(self.superuser)
def test_mail_template_admin(self):
template = MessageTemplate.objects.create(
message='bluebottle.members.messages.AccoutnActivationMessage'
)
self.admin_url = reverse('admin:notifications_messagetemplate_change', args=(template.id,))
response = self.client.get(self.admin_url)
# Check we show placeholder hints
self.assertContains(response, '{first_name}')
self.assertContains(response, '{site_name}')
|
Add tests for message template admin# -*- coding: utf-8 -*-
from django.urls import reverse
from bluebottle.notifications.models import MessageTemplate
from bluebottle.test.utils import BluebottleAdminTestCase
class TestMessageTemplateAdmin(BluebottleAdminTestCase):
def setUp(self):
super(TestMessageTemplateAdmin, self).setUp()
self.client.force_login(self.superuser)
def test_mail_template_admin(self):
template = MessageTemplate.objects.create(
message='bluebottle.members.messages.AccoutnActivationMessage'
)
self.admin_url = reverse('admin:notifications_messagetemplate_change', args=(template.id,))
response = self.client.get(self.admin_url)
# Check we show placeholder hints
self.assertContains(response, '{first_name}')
self.assertContains(response, '{site_name}')
|
<commit_before><commit_msg>Add tests for message template admin<commit_after># -*- coding: utf-8 -*-
from django.urls import reverse
from bluebottle.notifications.models import MessageTemplate
from bluebottle.test.utils import BluebottleAdminTestCase
class TestMessageTemplateAdmin(BluebottleAdminTestCase):
def setUp(self):
super(TestMessageTemplateAdmin, self).setUp()
self.client.force_login(self.superuser)
def test_mail_template_admin(self):
template = MessageTemplate.objects.create(
message='bluebottle.members.messages.AccoutnActivationMessage'
)
self.admin_url = reverse('admin:notifications_messagetemplate_change', args=(template.id,))
response = self.client.get(self.admin_url)
# Check we show placeholder hints
self.assertContains(response, '{first_name}')
self.assertContains(response, '{site_name}')
|
|
830888cb9c795313917e6540f11b411ea002b6b6
|
comics/comics/kalscartoon.py
|
comics/comics/kalscartoon.py
|
from dateutil.parser import parse
from comics.aggregator.crawler import CrawlerBase, CrawlerResult
from comics.meta.base import MetaBase
class Meta(MetaBase):
name = "KAL's Cartoon"
language = 'en'
url = 'http://www.economist.com'
start_date = '2006-01-05'
rights = 'Kevin Kallaugher'
class Crawler(CrawlerBase):
history_capable_days = 1000
schedule = 'Th'
def crawl(self, pub_date):
article_list = self.parse_page('http://www.economist.com/research/articlesBySubject/display.cfm?id=8717275&startRow=1&endrow=500')
article_list.remove('.web-only')
for block in article_list.root.cssselect('.article-list .block'):
date = block.cssselect('.date')[0]
if pub_date != parse(date.text_content()).date():
continue
anchor = blockdate.cssselect('h2 a')[0]
if "KAL's cartoon" not in anchor.text_content():
continue
page = self.parse_page(anchor.get('href'))
return CrawlerResult(page.src('.content-image-full img'))
|
Add crawler for KAL's cartoon
|
Add crawler for KAL's cartoon
|
Python
|
agpl-3.0
|
klette/comics,jodal/comics,datagutten/comics,jodal/comics,jodal/comics,jodal/comics,klette/comics,datagutten/comics,datagutten/comics,datagutten/comics,klette/comics
|
Add crawler for KAL's cartoon
|
from dateutil.parser import parse
from comics.aggregator.crawler import CrawlerBase, CrawlerResult
from comics.meta.base import MetaBase
class Meta(MetaBase):
name = "KAL's Cartoon"
language = 'en'
url = 'http://www.economist.com'
start_date = '2006-01-05'
rights = 'Kevin Kallaugher'
class Crawler(CrawlerBase):
history_capable_days = 1000
schedule = 'Th'
def crawl(self, pub_date):
article_list = self.parse_page('http://www.economist.com/research/articlesBySubject/display.cfm?id=8717275&startRow=1&endrow=500')
article_list.remove('.web-only')
for block in article_list.root.cssselect('.article-list .block'):
date = block.cssselect('.date')[0]
if pub_date != parse(date.text_content()).date():
continue
anchor = blockdate.cssselect('h2 a')[0]
if "KAL's cartoon" not in anchor.text_content():
continue
page = self.parse_page(anchor.get('href'))
return CrawlerResult(page.src('.content-image-full img'))
|
<commit_before><commit_msg>Add crawler for KAL's cartoon<commit_after>
|
from dateutil.parser import parse
from comics.aggregator.crawler import CrawlerBase, CrawlerResult
from comics.meta.base import MetaBase
class Meta(MetaBase):
name = "KAL's Cartoon"
language = 'en'
url = 'http://www.economist.com'
start_date = '2006-01-05'
rights = 'Kevin Kallaugher'
class Crawler(CrawlerBase):
history_capable_days = 1000
schedule = 'Th'
def crawl(self, pub_date):
article_list = self.parse_page('http://www.economist.com/research/articlesBySubject/display.cfm?id=8717275&startRow=1&endrow=500')
article_list.remove('.web-only')
for block in article_list.root.cssselect('.article-list .block'):
date = block.cssselect('.date')[0]
if pub_date != parse(date.text_content()).date():
continue
anchor = blockdate.cssselect('h2 a')[0]
if "KAL's cartoon" not in anchor.text_content():
continue
page = self.parse_page(anchor.get('href'))
return CrawlerResult(page.src('.content-image-full img'))
|
Add crawler for KAL's cartoonfrom dateutil.parser import parse
from comics.aggregator.crawler import CrawlerBase, CrawlerResult
from comics.meta.base import MetaBase
class Meta(MetaBase):
name = "KAL's Cartoon"
language = 'en'
url = 'http://www.economist.com'
start_date = '2006-01-05'
rights = 'Kevin Kallaugher'
class Crawler(CrawlerBase):
history_capable_days = 1000
schedule = 'Th'
def crawl(self, pub_date):
article_list = self.parse_page('http://www.economist.com/research/articlesBySubject/display.cfm?id=8717275&startRow=1&endrow=500')
article_list.remove('.web-only')
for block in article_list.root.cssselect('.article-list .block'):
date = block.cssselect('.date')[0]
if pub_date != parse(date.text_content()).date():
continue
anchor = blockdate.cssselect('h2 a')[0]
if "KAL's cartoon" not in anchor.text_content():
continue
page = self.parse_page(anchor.get('href'))
return CrawlerResult(page.src('.content-image-full img'))
|
<commit_before><commit_msg>Add crawler for KAL's cartoon<commit_after>from dateutil.parser import parse
from comics.aggregator.crawler import CrawlerBase, CrawlerResult
from comics.meta.base import MetaBase
class Meta(MetaBase):
name = "KAL's Cartoon"
language = 'en'
url = 'http://www.economist.com'
start_date = '2006-01-05'
rights = 'Kevin Kallaugher'
class Crawler(CrawlerBase):
history_capable_days = 1000
schedule = 'Th'
def crawl(self, pub_date):
article_list = self.parse_page('http://www.economist.com/research/articlesBySubject/display.cfm?id=8717275&startRow=1&endrow=500')
article_list.remove('.web-only')
for block in article_list.root.cssselect('.article-list .block'):
date = block.cssselect('.date')[0]
if pub_date != parse(date.text_content()).date():
continue
anchor = blockdate.cssselect('h2 a')[0]
if "KAL's cartoon" not in anchor.text_content():
continue
page = self.parse_page(anchor.get('href'))
return CrawlerResult(page.src('.content-image-full img'))
|
|
832b22df0dc8cc7d9b4a3ae4095d5508b732978f
|
uoftscrapers/scrapers/food/__init__.py
|
uoftscrapers/scrapers/food/__init__.py
|
import requests
from bs4 import BeautifulSoup
from collections import OrderedDict
import json
from ..scraper import Scraper
class Food(Scraper):
"""A scraper for UofT restaurants.
UofT Food data is located at http://map.utoronto.ca
"""
def __init__(self, output_location='.'):
super().__init__('Food', output_location)
self.host = 'http://map.utoronto.ca/'
self.campuses = ['utsg', 'utm', 'utsc']
self.s = requests.Session()
def get_hours(self, food_id):
"""Parse and return the restaurant's opening and closing times."""
headers = {
'Referer': self.host
}
html = self.s.get('%s%s%s' % (self.host, 'json/hours/', food_id),
headers=headers).text
soup = BeautifulSoup(html, 'html.parser')
if not soup.find('tbody').text == '':
hours = OrderedDict()
days = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday']
timings = soup.find('tbody').find_all('td')
for i in range(len(timings)):
closed, opening, closing = False, '', ''
day, timing = days[i], timings[i].text
# when closed for the full day, timing will not have a '-'
if '-' in timing:
opening, closing = timing.split(' -')
else:
closed = True
hours.update({day: OrderedDict([
('closed', closed),
('open', opening),
('close', closing)])})
return hours
else:
return ''
|
Initialize Food scraper, add timing scraper
|
Initialize Food scraper, add timing scraper
|
Python
|
mit
|
kshvmdn/uoft-scrapers,cobalt-uoft/uoft-scrapers,arkon/uoft-scrapers,g3wanghc/uoft-scrapers
|
Initialize Food scraper, add timing scraper
|
import requests
from bs4 import BeautifulSoup
from collections import OrderedDict
import json
from ..scraper import Scraper
class Food(Scraper):
"""A scraper for UofT restaurants.
UofT Food data is located at http://map.utoronto.ca
"""
def __init__(self, output_location='.'):
super().__init__('Food', output_location)
self.host = 'http://map.utoronto.ca/'
self.campuses = ['utsg', 'utm', 'utsc']
self.s = requests.Session()
def get_hours(self, food_id):
"""Parse and return the restaurant's opening and closing times."""
headers = {
'Referer': self.host
}
html = self.s.get('%s%s%s' % (self.host, 'json/hours/', food_id),
headers=headers).text
soup = BeautifulSoup(html, 'html.parser')
if not soup.find('tbody').text == '':
hours = OrderedDict()
days = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday']
timings = soup.find('tbody').find_all('td')
for i in range(len(timings)):
closed, opening, closing = False, '', ''
day, timing = days[i], timings[i].text
# when closed for the full day, timing will not have a '-'
if '-' in timing:
opening, closing = timing.split(' -')
else:
closed = True
hours.update({day: OrderedDict([
('closed', closed),
('open', opening),
('close', closing)])})
return hours
else:
return ''
|
<commit_before><commit_msg>Initialize Food scraper, add timing scraper<commit_after>
|
import requests
from bs4 import BeautifulSoup
from collections import OrderedDict
import json
from ..scraper import Scraper
class Food(Scraper):
"""A scraper for UofT restaurants.
UofT Food data is located at http://map.utoronto.ca
"""
def __init__(self, output_location='.'):
super().__init__('Food', output_location)
self.host = 'http://map.utoronto.ca/'
self.campuses = ['utsg', 'utm', 'utsc']
self.s = requests.Session()
def get_hours(self, food_id):
"""Parse and return the restaurant's opening and closing times."""
headers = {
'Referer': self.host
}
html = self.s.get('%s%s%s' % (self.host, 'json/hours/', food_id),
headers=headers).text
soup = BeautifulSoup(html, 'html.parser')
if not soup.find('tbody').text == '':
hours = OrderedDict()
days = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday']
timings = soup.find('tbody').find_all('td')
for i in range(len(timings)):
closed, opening, closing = False, '', ''
day, timing = days[i], timings[i].text
# when closed for the full day, timing will not have a '-'
if '-' in timing:
opening, closing = timing.split(' -')
else:
closed = True
hours.update({day: OrderedDict([
('closed', closed),
('open', opening),
('close', closing)])})
return hours
else:
return ''
|
Initialize Food scraper, add timing scraperimport requests
from bs4 import BeautifulSoup
from collections import OrderedDict
import json
from ..scraper import Scraper
class Food(Scraper):
"""A scraper for UofT restaurants.
UofT Food data is located at http://map.utoronto.ca
"""
def __init__(self, output_location='.'):
super().__init__('Food', output_location)
self.host = 'http://map.utoronto.ca/'
self.campuses = ['utsg', 'utm', 'utsc']
self.s = requests.Session()
def get_hours(self, food_id):
"""Parse and return the restaurant's opening and closing times."""
headers = {
'Referer': self.host
}
html = self.s.get('%s%s%s' % (self.host, 'json/hours/', food_id),
headers=headers).text
soup = BeautifulSoup(html, 'html.parser')
if not soup.find('tbody').text == '':
hours = OrderedDict()
days = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday']
timings = soup.find('tbody').find_all('td')
for i in range(len(timings)):
closed, opening, closing = False, '', ''
day, timing = days[i], timings[i].text
# when closed for the full day, timing will not have a '-'
if '-' in timing:
opening, closing = timing.split(' -')
else:
closed = True
hours.update({day: OrderedDict([
('closed', closed),
('open', opening),
('close', closing)])})
return hours
else:
return ''
|
<commit_before><commit_msg>Initialize Food scraper, add timing scraper<commit_after>import requests
from bs4 import BeautifulSoup
from collections import OrderedDict
import json
from ..scraper import Scraper
class Food(Scraper):
"""A scraper for UofT restaurants.
UofT Food data is located at http://map.utoronto.ca
"""
def __init__(self, output_location='.'):
super().__init__('Food', output_location)
self.host = 'http://map.utoronto.ca/'
self.campuses = ['utsg', 'utm', 'utsc']
self.s = requests.Session()
def get_hours(self, food_id):
"""Parse and return the restaurant's opening and closing times."""
headers = {
'Referer': self.host
}
html = self.s.get('%s%s%s' % (self.host, 'json/hours/', food_id),
headers=headers).text
soup = BeautifulSoup(html, 'html.parser')
if not soup.find('tbody').text == '':
hours = OrderedDict()
days = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday']
timings = soup.find('tbody').find_all('td')
for i in range(len(timings)):
closed, opening, closing = False, '', ''
day, timing = days[i], timings[i].text
# when closed for the full day, timing will not have a '-'
if '-' in timing:
opening, closing = timing.split(' -')
else:
closed = True
hours.update({day: OrderedDict([
('closed', closed),
('open', opening),
('close', closing)])})
return hours
else:
return ''
|
|
c0f7dc1dcfdbabff116f8d7132b191397fd9007f
|
src/sentry/api/serializers/models/filechange.py
|
src/sentry/api/serializers/models/filechange.py
|
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import Commit, CommitFileChange
from sentry.api.serializers.models.release import get_users_for_commits
@register(CommitFileChange)
class CommitFileChangeSerializer(Serializer):
def get_attrs(self, item_list, user):
commits = Commit.objects.filter(id__in=[f.commit_id for f in item_list]).select_related('author')
author_objs = get_users_for_commits(commits)
commits_by_id = {commit.id: commit for commit in commits}
result = {}
for item in item_list:
result[item] = {
'user': author_objs.get(commits_by_id[item.commit_id].author_id, {}),
'message': commits_by_id[item.commit_id].message
}
return result
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'org_id': obj.organization_id,
'author': attrs.get('user', {}),
'commit_message': attrs.get('message', ''),
'filename': obj.filename,
'type': obj.type
}
|
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import Commit, CommitFileChange
from sentry.api.serializers.models.release import get_users_for_commits
@register(CommitFileChange)
class CommitFileChangeSerializer(Serializer):
def get_attrs(self, item_list, user):
commits = Commit.objects.filter(id__in=[f.commit_id for f in item_list]).select_related('author')
author_objs = get_users_for_commits(commits)
commits_by_id = {commit.id: commit for commit in commits}
result = {}
for item in item_list:
commit = commits_by_id[item.commit_id]
result[item] = {
'user': author_objs.get(commit.author_id, {}),
'message': commit.message
}
return result
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'org_id': obj.organization_id,
'author': attrs.get('user', {}),
'commit_message': attrs.get('message', ''),
'filename': obj.filename,
'type': obj.type
}
|
Use dictionary lookup only once
|
Use dictionary lookup only once
|
Python
|
bsd-3-clause
|
looker/sentry,JackDanger/sentry,mvaled/sentry,gencer/sentry,beeftornado/sentry,jean/sentry,jean/sentry,BuildingLink/sentry,ifduyue/sentry,mvaled/sentry,ifduyue/sentry,BuildingLink/sentry,looker/sentry,JamesMura/sentry,looker/sentry,jean/sentry,gencer/sentry,JamesMura/sentry,ifduyue/sentry,JamesMura/sentry,BuildingLink/sentry,JackDanger/sentry,JackDanger/sentry,looker/sentry,jean/sentry,mvaled/sentry,JamesMura/sentry,BuildingLink/sentry,gencer/sentry,mvaled/sentry,ifduyue/sentry,mvaled/sentry,BuildingLink/sentry,JamesMura/sentry,gencer/sentry,mvaled/sentry,beeftornado/sentry,ifduyue/sentry,gencer/sentry,looker/sentry,jean/sentry,beeftornado/sentry
|
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import Commit, CommitFileChange
from sentry.api.serializers.models.release import get_users_for_commits
@register(CommitFileChange)
class CommitFileChangeSerializer(Serializer):
def get_attrs(self, item_list, user):
commits = Commit.objects.filter(id__in=[f.commit_id for f in item_list]).select_related('author')
author_objs = get_users_for_commits(commits)
commits_by_id = {commit.id: commit for commit in commits}
result = {}
for item in item_list:
result[item] = {
'user': author_objs.get(commits_by_id[item.commit_id].author_id, {}),
'message': commits_by_id[item.commit_id].message
}
return result
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'org_id': obj.organization_id,
'author': attrs.get('user', {}),
'commit_message': attrs.get('message', ''),
'filename': obj.filename,
'type': obj.type
}
Use dictionary lookup only once
|
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import Commit, CommitFileChange
from sentry.api.serializers.models.release import get_users_for_commits
@register(CommitFileChange)
class CommitFileChangeSerializer(Serializer):
def get_attrs(self, item_list, user):
commits = Commit.objects.filter(id__in=[f.commit_id for f in item_list]).select_related('author')
author_objs = get_users_for_commits(commits)
commits_by_id = {commit.id: commit for commit in commits}
result = {}
for item in item_list:
commit = commits_by_id[item.commit_id]
result[item] = {
'user': author_objs.get(commit.author_id, {}),
'message': commit.message
}
return result
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'org_id': obj.organization_id,
'author': attrs.get('user', {}),
'commit_message': attrs.get('message', ''),
'filename': obj.filename,
'type': obj.type
}
|
<commit_before>from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import Commit, CommitFileChange
from sentry.api.serializers.models.release import get_users_for_commits
@register(CommitFileChange)
class CommitFileChangeSerializer(Serializer):
def get_attrs(self, item_list, user):
commits = Commit.objects.filter(id__in=[f.commit_id for f in item_list]).select_related('author')
author_objs = get_users_for_commits(commits)
commits_by_id = {commit.id: commit for commit in commits}
result = {}
for item in item_list:
result[item] = {
'user': author_objs.get(commits_by_id[item.commit_id].author_id, {}),
'message': commits_by_id[item.commit_id].message
}
return result
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'org_id': obj.organization_id,
'author': attrs.get('user', {}),
'commit_message': attrs.get('message', ''),
'filename': obj.filename,
'type': obj.type
}
<commit_msg>Use dictionary lookup only once<commit_after>
|
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import Commit, CommitFileChange
from sentry.api.serializers.models.release import get_users_for_commits
@register(CommitFileChange)
class CommitFileChangeSerializer(Serializer):
def get_attrs(self, item_list, user):
commits = Commit.objects.filter(id__in=[f.commit_id for f in item_list]).select_related('author')
author_objs = get_users_for_commits(commits)
commits_by_id = {commit.id: commit for commit in commits}
result = {}
for item in item_list:
commit = commits_by_id[item.commit_id]
result[item] = {
'user': author_objs.get(commit.author_id, {}),
'message': commit.message
}
return result
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'org_id': obj.organization_id,
'author': attrs.get('user', {}),
'commit_message': attrs.get('message', ''),
'filename': obj.filename,
'type': obj.type
}
|
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import Commit, CommitFileChange
from sentry.api.serializers.models.release import get_users_for_commits
@register(CommitFileChange)
class CommitFileChangeSerializer(Serializer):
def get_attrs(self, item_list, user):
commits = Commit.objects.filter(id__in=[f.commit_id for f in item_list]).select_related('author')
author_objs = get_users_for_commits(commits)
commits_by_id = {commit.id: commit for commit in commits}
result = {}
for item in item_list:
result[item] = {
'user': author_objs.get(commits_by_id[item.commit_id].author_id, {}),
'message': commits_by_id[item.commit_id].message
}
return result
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'org_id': obj.organization_id,
'author': attrs.get('user', {}),
'commit_message': attrs.get('message', ''),
'filename': obj.filename,
'type': obj.type
}
Use dictionary lookup only oncefrom __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import Commit, CommitFileChange
from sentry.api.serializers.models.release import get_users_for_commits
@register(CommitFileChange)
class CommitFileChangeSerializer(Serializer):
def get_attrs(self, item_list, user):
commits = Commit.objects.filter(id__in=[f.commit_id for f in item_list]).select_related('author')
author_objs = get_users_for_commits(commits)
commits_by_id = {commit.id: commit for commit in commits}
result = {}
for item in item_list:
commit = commits_by_id[item.commit_id]
result[item] = {
'user': author_objs.get(commit.author_id, {}),
'message': commit.message
}
return result
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'org_id': obj.organization_id,
'author': attrs.get('user', {}),
'commit_message': attrs.get('message', ''),
'filename': obj.filename,
'type': obj.type
}
|
<commit_before>from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import Commit, CommitFileChange
from sentry.api.serializers.models.release import get_users_for_commits
@register(CommitFileChange)
class CommitFileChangeSerializer(Serializer):
def get_attrs(self, item_list, user):
commits = Commit.objects.filter(id__in=[f.commit_id for f in item_list]).select_related('author')
author_objs = get_users_for_commits(commits)
commits_by_id = {commit.id: commit for commit in commits}
result = {}
for item in item_list:
result[item] = {
'user': author_objs.get(commits_by_id[item.commit_id].author_id, {}),
'message': commits_by_id[item.commit_id].message
}
return result
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'org_id': obj.organization_id,
'author': attrs.get('user', {}),
'commit_message': attrs.get('message', ''),
'filename': obj.filename,
'type': obj.type
}
<commit_msg>Use dictionary lookup only once<commit_after>from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import Commit, CommitFileChange
from sentry.api.serializers.models.release import get_users_for_commits
@register(CommitFileChange)
class CommitFileChangeSerializer(Serializer):
def get_attrs(self, item_list, user):
commits = Commit.objects.filter(id__in=[f.commit_id for f in item_list]).select_related('author')
author_objs = get_users_for_commits(commits)
commits_by_id = {commit.id: commit for commit in commits}
result = {}
for item in item_list:
commit = commits_by_id[item.commit_id]
result[item] = {
'user': author_objs.get(commit.author_id, {}),
'message': commit.message
}
return result
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.id),
'org_id': obj.organization_id,
'author': attrs.get('user', {}),
'commit_message': attrs.get('message', ''),
'filename': obj.filename,
'type': obj.type
}
|
98b317f0727b53cee2faefb0a7eaa977bceb13d5
|
src/ggrc/migrations/versions/20160317173315_3715694bd315_rename_date_columns_in_requests.py
|
src/ggrc/migrations/versions/20160317173315_3715694bd315_rename_date_columns_in_requests.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: samo@reciprocitylabs.com
# Maintained By: samo@reciprocitylabs.com
"""
Rename date columns in requests
Create Date: 2016-03-17 17:33:15.817255
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3715694bd315'
down_revision = '1894405f14ef'
def upgrade():
op.alter_column('requests', 'requested_on', new_column_name='start_date',
existing_type=sa.Date(), nullable=False)
op.alter_column('requests', 'due_on', new_column_name='end_date',
existing_type=sa.Date(), nullable=False)
def downgrade():
op.alter_column('requests', 'start_date', new_column_name='requested_on',
existing_type=sa.Date(), nullable=False)
op.alter_column('requests', 'end_date', new_column_name='due_on',
existing_type=sa.Date(), nullable=False)
|
Rename relevant columns on the requests table
|
Rename relevant columns on the requests table
|
Python
|
apache-2.0
|
edofic/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,kr41/ggrc-core
|
Rename relevant columns on the requests table
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: samo@reciprocitylabs.com
# Maintained By: samo@reciprocitylabs.com
"""
Rename date columns in requests
Create Date: 2016-03-17 17:33:15.817255
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3715694bd315'
down_revision = '1894405f14ef'
def upgrade():
op.alter_column('requests', 'requested_on', new_column_name='start_date',
existing_type=sa.Date(), nullable=False)
op.alter_column('requests', 'due_on', new_column_name='end_date',
existing_type=sa.Date(), nullable=False)
def downgrade():
op.alter_column('requests', 'start_date', new_column_name='requested_on',
existing_type=sa.Date(), nullable=False)
op.alter_column('requests', 'end_date', new_column_name='due_on',
existing_type=sa.Date(), nullable=False)
|
<commit_before><commit_msg>Rename relevant columns on the requests table<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: samo@reciprocitylabs.com
# Maintained By: samo@reciprocitylabs.com
"""
Rename date columns in requests
Create Date: 2016-03-17 17:33:15.817255
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3715694bd315'
down_revision = '1894405f14ef'
def upgrade():
op.alter_column('requests', 'requested_on', new_column_name='start_date',
existing_type=sa.Date(), nullable=False)
op.alter_column('requests', 'due_on', new_column_name='end_date',
existing_type=sa.Date(), nullable=False)
def downgrade():
op.alter_column('requests', 'start_date', new_column_name='requested_on',
existing_type=sa.Date(), nullable=False)
op.alter_column('requests', 'end_date', new_column_name='due_on',
existing_type=sa.Date(), nullable=False)
|
Rename relevant columns on the requests table# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: samo@reciprocitylabs.com
# Maintained By: samo@reciprocitylabs.com
"""
Rename date columns in requests
Create Date: 2016-03-17 17:33:15.817255
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3715694bd315'
down_revision = '1894405f14ef'
def upgrade():
op.alter_column('requests', 'requested_on', new_column_name='start_date',
existing_type=sa.Date(), nullable=False)
op.alter_column('requests', 'due_on', new_column_name='end_date',
existing_type=sa.Date(), nullable=False)
def downgrade():
op.alter_column('requests', 'start_date', new_column_name='requested_on',
existing_type=sa.Date(), nullable=False)
op.alter_column('requests', 'end_date', new_column_name='due_on',
existing_type=sa.Date(), nullable=False)
|
<commit_before><commit_msg>Rename relevant columns on the requests table<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: samo@reciprocitylabs.com
# Maintained By: samo@reciprocitylabs.com
"""
Rename date columns in requests
Create Date: 2016-03-17 17:33:15.817255
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3715694bd315'
down_revision = '1894405f14ef'
def upgrade():
op.alter_column('requests', 'requested_on', new_column_name='start_date',
existing_type=sa.Date(), nullable=False)
op.alter_column('requests', 'due_on', new_column_name='end_date',
existing_type=sa.Date(), nullable=False)
def downgrade():
op.alter_column('requests', 'start_date', new_column_name='requested_on',
existing_type=sa.Date(), nullable=False)
op.alter_column('requests', 'end_date', new_column_name='due_on',
existing_type=sa.Date(), nullable=False)
|
|
c0ad7a6e3048b7691daabeef7779f044709d6a81
|
admin/simpleloadtest.py
|
admin/simpleloadtest.py
|
# A simple loadtest using locust.
# launch using: locust -f simpleloadtest.py --host=http://pencilcode.net
from locust import HttpLocust, TaskSet, task
import simplejson, random
class MyTaskSet(TaskSet):
def userbase(self, user):
return self.client.base_url.replace('//', '//' + user + '.')
def topget(self, url):
return self.client.get(url, headers={"User-Agent":"locust"})
def myget(self, user, url):
return self.client.get(self.userbase(user) + url,
headers={"User-Agent":"locust"})
def mypost(self, user, url, data):
return self.client.post(self.userbase(user) + url, data,
headers={"User-Agent":"locust"})
# @task(1)
def index(self):
for url in ['/', '/welcome.css', '/image/vpencil-20-64.png',
'/image/art.png', '/image/music.png', '/image/adventure.png',
'/lib/jquery.js', '/lib/jquery.autocomplete.min.js',
'/load/?callback=loadusers']:
self.topget(url)
for url in ['/home/promo1', '/home/goldwheel-code.png',
'/lib/seedrandom.js', '/turtlebits.js']:
self.myget('promo', url)
# @task(1)
def edit(self):
topdir = self.topget("/load/").json()
randuser = random.choice(topdir['list'])
randname = randuser['name']
if 'd' not in randuser['mode']:
return
for url in ['/edit/', '/editor.js', '/favicon.ico',
'/apple-touch-icon.png', '/load']:
mydir = self.myget(randname, url)
@task(1)
def browserandom(self):
topdir = self.topget("/load/").json()
randuser = random.choice(topdir['list'])
randname = randuser['name']
if 'd' in randuser['mode']:
try:
mydir = self.myget(randname, '/load/').json()
except:
print 'error listing', randuser
return
randfile = random.choice(mydir['list'])['name']
try:
self.myget(randname, '/load/' + randfile).json()
except:
print 'error reading', randuser, randfile
@task(1)
def livetestsave(self):
myok = self.mypost('livetest', '/save/loadtest', {
'data': 'pen red\nfor [1..4]\n fd 100\n rt 90',
'key': '123'
}).json()
class MyLocust(HttpLocust):
task_set = MyTaskSet
min_wait = 5000
max_wait = 15000
|
Add a simple load test.
|
Add a simple load test.
|
Python
|
mit
|
davidbau/pencilcode,davidbau/pencilcode,sakagg/pencilcode,dweintrop/pencilcode,cacticouncil/pencilcode,sakagg/pencilcode,Dinuka2013513/pencilcode,davidbau/pencilcode,davidbau/pencilcode,Dinuka2013513/pencilcode,xinan/pencilcode,davidbau/pencilcode,PencilCode/pencilcode,cacticouncil/pencilcode,cacticouncil/pencilcode,dweintrop/pencilcode,xinan/pencilcode,Dinuka2013513/pencilcode,PencilCode/pencilcode,PencilCode/pencilcode,xinan/pencilcode,sakagg/pencilcode,Dinuka2013513/pencilcode,cacticouncil/pencilcode,PencilCode/pencilcode,xinan/pencilcode,dweintrop/pencilcode,dweintrop/pencilcode,sakagg/pencilcode
|
Add a simple load test.
|
# A simple loadtest using locust.
# launch using: locust -f simpleloadtest.py --host=http://pencilcode.net
from locust import HttpLocust, TaskSet, task
import simplejson, random
class MyTaskSet(TaskSet):
def userbase(self, user):
return self.client.base_url.replace('//', '//' + user + '.')
def topget(self, url):
return self.client.get(url, headers={"User-Agent":"locust"})
def myget(self, user, url):
return self.client.get(self.userbase(user) + url,
headers={"User-Agent":"locust"})
def mypost(self, user, url, data):
return self.client.post(self.userbase(user) + url, data,
headers={"User-Agent":"locust"})
# @task(1)
def index(self):
for url in ['/', '/welcome.css', '/image/vpencil-20-64.png',
'/image/art.png', '/image/music.png', '/image/adventure.png',
'/lib/jquery.js', '/lib/jquery.autocomplete.min.js',
'/load/?callback=loadusers']:
self.topget(url)
for url in ['/home/promo1', '/home/goldwheel-code.png',
'/lib/seedrandom.js', '/turtlebits.js']:
self.myget('promo', url)
# @task(1)
def edit(self):
topdir = self.topget("/load/").json()
randuser = random.choice(topdir['list'])
randname = randuser['name']
if 'd' not in randuser['mode']:
return
for url in ['/edit/', '/editor.js', '/favicon.ico',
'/apple-touch-icon.png', '/load']:
mydir = self.myget(randname, url)
@task(1)
def browserandom(self):
topdir = self.topget("/load/").json()
randuser = random.choice(topdir['list'])
randname = randuser['name']
if 'd' in randuser['mode']:
try:
mydir = self.myget(randname, '/load/').json()
except:
print 'error listing', randuser
return
randfile = random.choice(mydir['list'])['name']
try:
self.myget(randname, '/load/' + randfile).json()
except:
print 'error reading', randuser, randfile
@task(1)
def livetestsave(self):
myok = self.mypost('livetest', '/save/loadtest', {
'data': 'pen red\nfor [1..4]\n fd 100\n rt 90',
'key': '123'
}).json()
class MyLocust(HttpLocust):
task_set = MyTaskSet
min_wait = 5000
max_wait = 15000
|
<commit_before><commit_msg>Add a simple load test.<commit_after>
|
# A simple loadtest using locust.
# launch using: locust -f simpleloadtest.py --host=http://pencilcode.net
from locust import HttpLocust, TaskSet, task
import simplejson, random
class MyTaskSet(TaskSet):
def userbase(self, user):
return self.client.base_url.replace('//', '//' + user + '.')
def topget(self, url):
return self.client.get(url, headers={"User-Agent":"locust"})
def myget(self, user, url):
return self.client.get(self.userbase(user) + url,
headers={"User-Agent":"locust"})
def mypost(self, user, url, data):
return self.client.post(self.userbase(user) + url, data,
headers={"User-Agent":"locust"})
# @task(1)
def index(self):
for url in ['/', '/welcome.css', '/image/vpencil-20-64.png',
'/image/art.png', '/image/music.png', '/image/adventure.png',
'/lib/jquery.js', '/lib/jquery.autocomplete.min.js',
'/load/?callback=loadusers']:
self.topget(url)
for url in ['/home/promo1', '/home/goldwheel-code.png',
'/lib/seedrandom.js', '/turtlebits.js']:
self.myget('promo', url)
# @task(1)
def edit(self):
topdir = self.topget("/load/").json()
randuser = random.choice(topdir['list'])
randname = randuser['name']
if 'd' not in randuser['mode']:
return
for url in ['/edit/', '/editor.js', '/favicon.ico',
'/apple-touch-icon.png', '/load']:
mydir = self.myget(randname, url)
@task(1)
def browserandom(self):
topdir = self.topget("/load/").json()
randuser = random.choice(topdir['list'])
randname = randuser['name']
if 'd' in randuser['mode']:
try:
mydir = self.myget(randname, '/load/').json()
except:
print 'error listing', randuser
return
randfile = random.choice(mydir['list'])['name']
try:
self.myget(randname, '/load/' + randfile).json()
except:
print 'error reading', randuser, randfile
@task(1)
def livetestsave(self):
myok = self.mypost('livetest', '/save/loadtest', {
'data': 'pen red\nfor [1..4]\n fd 100\n rt 90',
'key': '123'
}).json()
class MyLocust(HttpLocust):
task_set = MyTaskSet
min_wait = 5000
max_wait = 15000
|
Add a simple load test.# A simple loadtest using locust.
# launch using: locust -f simpleloadtest.py --host=http://pencilcode.net
from locust import HttpLocust, TaskSet, task
import simplejson, random
class MyTaskSet(TaskSet):
def userbase(self, user):
return self.client.base_url.replace('//', '//' + user + '.')
def topget(self, url):
return self.client.get(url, headers={"User-Agent":"locust"})
def myget(self, user, url):
return self.client.get(self.userbase(user) + url,
headers={"User-Agent":"locust"})
def mypost(self, user, url, data):
return self.client.post(self.userbase(user) + url, data,
headers={"User-Agent":"locust"})
# @task(1)
def index(self):
for url in ['/', '/welcome.css', '/image/vpencil-20-64.png',
'/image/art.png', '/image/music.png', '/image/adventure.png',
'/lib/jquery.js', '/lib/jquery.autocomplete.min.js',
'/load/?callback=loadusers']:
self.topget(url)
for url in ['/home/promo1', '/home/goldwheel-code.png',
'/lib/seedrandom.js', '/turtlebits.js']:
self.myget('promo', url)
# @task(1)
def edit(self):
topdir = self.topget("/load/").json()
randuser = random.choice(topdir['list'])
randname = randuser['name']
if 'd' not in randuser['mode']:
return
for url in ['/edit/', '/editor.js', '/favicon.ico',
'/apple-touch-icon.png', '/load']:
mydir = self.myget(randname, url)
@task(1)
def browserandom(self):
topdir = self.topget("/load/").json()
randuser = random.choice(topdir['list'])
randname = randuser['name']
if 'd' in randuser['mode']:
try:
mydir = self.myget(randname, '/load/').json()
except:
print 'error listing', randuser
return
randfile = random.choice(mydir['list'])['name']
try:
self.myget(randname, '/load/' + randfile).json()
except:
print 'error reading', randuser, randfile
@task(1)
def livetestsave(self):
myok = self.mypost('livetest', '/save/loadtest', {
'data': 'pen red\nfor [1..4]\n fd 100\n rt 90',
'key': '123'
}).json()
class MyLocust(HttpLocust):
task_set = MyTaskSet
min_wait = 5000
max_wait = 15000
|
<commit_before><commit_msg>Add a simple load test.<commit_after># A simple loadtest using locust.
# launch using: locust -f simpleloadtest.py --host=http://pencilcode.net
from locust import HttpLocust, TaskSet, task
import simplejson, random
class MyTaskSet(TaskSet):
def userbase(self, user):
return self.client.base_url.replace('//', '//' + user + '.')
def topget(self, url):
return self.client.get(url, headers={"User-Agent":"locust"})
def myget(self, user, url):
return self.client.get(self.userbase(user) + url,
headers={"User-Agent":"locust"})
def mypost(self, user, url, data):
return self.client.post(self.userbase(user) + url, data,
headers={"User-Agent":"locust"})
# @task(1)
def index(self):
for url in ['/', '/welcome.css', '/image/vpencil-20-64.png',
'/image/art.png', '/image/music.png', '/image/adventure.png',
'/lib/jquery.js', '/lib/jquery.autocomplete.min.js',
'/load/?callback=loadusers']:
self.topget(url)
for url in ['/home/promo1', '/home/goldwheel-code.png',
'/lib/seedrandom.js', '/turtlebits.js']:
self.myget('promo', url)
# @task(1)
def edit(self):
topdir = self.topget("/load/").json()
randuser = random.choice(topdir['list'])
randname = randuser['name']
if 'd' not in randuser['mode']:
return
for url in ['/edit/', '/editor.js', '/favicon.ico',
'/apple-touch-icon.png', '/load']:
mydir = self.myget(randname, url)
@task(1)
def browserandom(self):
topdir = self.topget("/load/").json()
randuser = random.choice(topdir['list'])
randname = randuser['name']
if 'd' in randuser['mode']:
try:
mydir = self.myget(randname, '/load/').json()
except:
print 'error listing', randuser
return
randfile = random.choice(mydir['list'])['name']
try:
self.myget(randname, '/load/' + randfile).json()
except:
print 'error reading', randuser, randfile
@task(1)
def livetestsave(self):
myok = self.mypost('livetest', '/save/loadtest', {
'data': 'pen red\nfor [1..4]\n fd 100\n rt 90',
'key': '123'
}).json()
class MyLocust(HttpLocust):
task_set = MyTaskSet
min_wait = 5000
max_wait = 15000
|
|
78ebafc9364fac9bdad3ba3172c645f35673bec7
|
examples/gui_example.py
|
examples/gui_example.py
|
from topo.inputsheet import *
from topo.kernelfactory import *
from topo.simulator import *
from topo.rfsom import RFSOM
from topo.image import ImageSaver
from math import pi
from topo.params import Dynamic
import random
import pdb #debugger
###########################################
# Set parameters
print "Setting parameters..."
# input generation params
InputSheet.period = 1.0
InputSheet.density = 900
FuzzyLineFactory.x = Dynamic(lambda : random.uniform(-0.5,0.5))
FuzzyLineFactory.y = Dynamic(lambda : random.uniform(-0.5,0.5))
FuzzyLineFactory.theta = Dynamic(lambda :random.uniform(-pi,pi))
FuzzyLineFactory.width = 0.02
FuzzyLineFactory.height = 0.9
FuzzyLineFactory.bounds = BoundingBox(points=((-0.8,-0.8),(0.8,0.8)))
# rf som parameters
RFSOM.density = 2500
RFSOM.weights_factory = UniformRandomFactory(bounds=BoundingBox(points=((-0.1,-0.1),(0.1,0.1))))
RFSOM.training_length = 10000
RFSOM.radius_0 = 0.1
# image saver parameters
ImageSaver.file_format='png'
ImageSaver.time_format='%0.4d'
###########################################
# build simulation
base.min_print_level = base.MESSAGE
print "Creating simulation objects..."
s = topo.simulator.Simulator()
retina = InputSheet(input_generator=FuzzyLineFactory(),name='Retina')
V1 = RFSOM(name='V1')
save = ImageSaver(name='RFSOM')
s.connect(retina,V1,delay=1)
# Uncomment the connections to the image saver, to save all the activation
# images to disk.
#s.connect(retina,save,dest_port='retina',delay=2)
#s.connect(V1,save,dest_port='V1',delay=1)
topo.gui.link_to_sim(s)
s.run(200)
#V1.projections['Retina'][0].plot_rfs()
# import profile,pstats
#
# p = profile.Profile()
# p.runctx('s.run(10)',locals(),globals())
|
Copy of rfsom_example.py except with 200 steps instead of 10000, and a topo.gui.link_to_sim() call
|
Copy of rfsom_example.py except with 200 steps instead of 10000, and a topo.gui.link_to_sim() call
|
Python
|
bsd-3-clause
|
ioam/svn-history,ioam/svn-history,ioam/svn-history,ioam/svn-history,ioam/svn-history
|
Copy of rfsom_example.py except with 200 steps instead of 10000, and a topo.gui.link_to_sim() call
|
from topo.inputsheet import *
from topo.kernelfactory import *
from topo.simulator import *
from topo.rfsom import RFSOM
from topo.image import ImageSaver
from math import pi
from topo.params import Dynamic
import random
import pdb #debugger
###########################################
# Set parameters
print "Setting parameters..."
# input generation params
InputSheet.period = 1.0
InputSheet.density = 900
FuzzyLineFactory.x = Dynamic(lambda : random.uniform(-0.5,0.5))
FuzzyLineFactory.y = Dynamic(lambda : random.uniform(-0.5,0.5))
FuzzyLineFactory.theta = Dynamic(lambda :random.uniform(-pi,pi))
FuzzyLineFactory.width = 0.02
FuzzyLineFactory.height = 0.9
FuzzyLineFactory.bounds = BoundingBox(points=((-0.8,-0.8),(0.8,0.8)))
# rf som parameters
RFSOM.density = 2500
RFSOM.weights_factory = UniformRandomFactory(bounds=BoundingBox(points=((-0.1,-0.1),(0.1,0.1))))
RFSOM.training_length = 10000
RFSOM.radius_0 = 0.1
# image saver parameters
ImageSaver.file_format='png'
ImageSaver.time_format='%0.4d'
###########################################
# build simulation
base.min_print_level = base.MESSAGE
print "Creating simulation objects..."
s = topo.simulator.Simulator()
retina = InputSheet(input_generator=FuzzyLineFactory(),name='Retina')
V1 = RFSOM(name='V1')
save = ImageSaver(name='RFSOM')
s.connect(retina,V1,delay=1)
# Uncomment the connections to the image saver, to save all the activation
# images to disk.
#s.connect(retina,save,dest_port='retina',delay=2)
#s.connect(V1,save,dest_port='V1',delay=1)
topo.gui.link_to_sim(s)
s.run(200)
#V1.projections['Retina'][0].plot_rfs()
# import profile,pstats
#
# p = profile.Profile()
# p.runctx('s.run(10)',locals(),globals())
|
<commit_before><commit_msg>Copy of rfsom_example.py except with 200 steps instead of 10000, and a topo.gui.link_to_sim() call<commit_after>
|
from topo.inputsheet import *
from topo.kernelfactory import *
from topo.simulator import *
from topo.rfsom import RFSOM
from topo.image import ImageSaver
from math import pi
from topo.params import Dynamic
import random
import pdb #debugger
###########################################
# Set parameters
print "Setting parameters..."
# input generation params
InputSheet.period = 1.0
InputSheet.density = 900
FuzzyLineFactory.x = Dynamic(lambda : random.uniform(-0.5,0.5))
FuzzyLineFactory.y = Dynamic(lambda : random.uniform(-0.5,0.5))
FuzzyLineFactory.theta = Dynamic(lambda :random.uniform(-pi,pi))
FuzzyLineFactory.width = 0.02
FuzzyLineFactory.height = 0.9
FuzzyLineFactory.bounds = BoundingBox(points=((-0.8,-0.8),(0.8,0.8)))
# rf som parameters
RFSOM.density = 2500
RFSOM.weights_factory = UniformRandomFactory(bounds=BoundingBox(points=((-0.1,-0.1),(0.1,0.1))))
RFSOM.training_length = 10000
RFSOM.radius_0 = 0.1
# image saver parameters
ImageSaver.file_format='png'
ImageSaver.time_format='%0.4d'
###########################################
# build simulation
base.min_print_level = base.MESSAGE
print "Creating simulation objects..."
s = topo.simulator.Simulator()
retina = InputSheet(input_generator=FuzzyLineFactory(),name='Retina')
V1 = RFSOM(name='V1')
save = ImageSaver(name='RFSOM')
s.connect(retina,V1,delay=1)
# Uncomment the connections to the image saver, to save all the activation
# images to disk.
#s.connect(retina,save,dest_port='retina',delay=2)
#s.connect(V1,save,dest_port='V1',delay=1)
topo.gui.link_to_sim(s)
s.run(200)
#V1.projections['Retina'][0].plot_rfs()
# import profile,pstats
#
# p = profile.Profile()
# p.runctx('s.run(10)',locals(),globals())
|
Copy of rfsom_example.py except with 200 steps instead of 10000, and a topo.gui.link_to_sim() call
from topo.inputsheet import *
from topo.kernelfactory import *
from topo.simulator import *
from topo.rfsom import RFSOM
from topo.image import ImageSaver
from math import pi
from topo.params import Dynamic
import random
import pdb #debugger
###########################################
# Set parameters
print "Setting parameters..."
# input generation params
InputSheet.period = 1.0
InputSheet.density = 900
FuzzyLineFactory.x = Dynamic(lambda : random.uniform(-0.5,0.5))
FuzzyLineFactory.y = Dynamic(lambda : random.uniform(-0.5,0.5))
FuzzyLineFactory.theta = Dynamic(lambda :random.uniform(-pi,pi))
FuzzyLineFactory.width = 0.02
FuzzyLineFactory.height = 0.9
FuzzyLineFactory.bounds = BoundingBox(points=((-0.8,-0.8),(0.8,0.8)))
# rf som parameters
RFSOM.density = 2500
RFSOM.weights_factory = UniformRandomFactory(bounds=BoundingBox(points=((-0.1,-0.1),(0.1,0.1))))
RFSOM.training_length = 10000
RFSOM.radius_0 = 0.1
# image saver parameters
ImageSaver.file_format='png'
ImageSaver.time_format='%0.4d'
###########################################
# build simulation
base.min_print_level = base.MESSAGE
print "Creating simulation objects..."
s = topo.simulator.Simulator()
retina = InputSheet(input_generator=FuzzyLineFactory(),name='Retina')
V1 = RFSOM(name='V1')
save = ImageSaver(name='RFSOM')
s.connect(retina,V1,delay=1)
# Uncomment the connections to the image saver, to save all the activation
# images to disk.
#s.connect(retina,save,dest_port='retina',delay=2)
#s.connect(V1,save,dest_port='V1',delay=1)
topo.gui.link_to_sim(s)
s.run(200)
#V1.projections['Retina'][0].plot_rfs()
# import profile,pstats
#
# p = profile.Profile()
# p.runctx('s.run(10)',locals(),globals())
|
<commit_before><commit_msg>Copy of rfsom_example.py except with 200 steps instead of 10000, and a topo.gui.link_to_sim() call<commit_after>
from topo.inputsheet import *
from topo.kernelfactory import *
from topo.simulator import *
from topo.rfsom import RFSOM
from topo.image import ImageSaver
from math import pi
from topo.params import Dynamic
import random
import pdb #debugger
###########################################
# Set parameters
print "Setting parameters..."
# input generation params
InputSheet.period = 1.0
InputSheet.density = 900
FuzzyLineFactory.x = Dynamic(lambda : random.uniform(-0.5,0.5))
FuzzyLineFactory.y = Dynamic(lambda : random.uniform(-0.5,0.5))
FuzzyLineFactory.theta = Dynamic(lambda :random.uniform(-pi,pi))
FuzzyLineFactory.width = 0.02
FuzzyLineFactory.height = 0.9
FuzzyLineFactory.bounds = BoundingBox(points=((-0.8,-0.8),(0.8,0.8)))
# rf som parameters
RFSOM.density = 2500
RFSOM.weights_factory = UniformRandomFactory(bounds=BoundingBox(points=((-0.1,-0.1),(0.1,0.1))))
RFSOM.training_length = 10000
RFSOM.radius_0 = 0.1
# image saver parameters
ImageSaver.file_format='png'
ImageSaver.time_format='%0.4d'
###########################################
# build simulation
base.min_print_level = base.MESSAGE
print "Creating simulation objects..."
s = topo.simulator.Simulator()
retina = InputSheet(input_generator=FuzzyLineFactory(),name='Retina')
V1 = RFSOM(name='V1')
save = ImageSaver(name='RFSOM')
s.connect(retina,V1,delay=1)
# Uncomment the connections to the image saver, to save all the activation
# images to disk.
#s.connect(retina,save,dest_port='retina',delay=2)
#s.connect(V1,save,dest_port='V1',delay=1)
topo.gui.link_to_sim(s)
s.run(200)
#V1.projections['Retina'][0].plot_rfs()
# import profile,pstats
#
# p = profile.Profile()
# p.runctx('s.run(10)',locals(),globals())
|
|
ff3ba46f20ec06c5362b948019530fa6ffb43475
|
mzalendo/core/management/commands/core_check_mp_aspirants.py
|
mzalendo/core/management/commands/core_check_mp_aspirants.py
|
from core.models import *
from django.core.management.base import NoArgsCommand, CommandError
class Command(NoArgsCommand):
help = "Go through people who are MP aspirants, and check that they're associated with a 2013 constituency"
def handle_noargs(self, **options):
next_session = ParliamentarySession.objects.get(slug="na2013")
for person in Person.objects.all():
if not person.is_aspirant():
continue
aspirant_mp_positions = [ap for ap in person.aspirant_positions() if ap.title.slug == 'aspirant-mp']
if not aspirant_mp_positions:
continue
print person
if len(aspirant_mp_positions) > 1:
print " Warning: more than one Aspirant MP position:"
for amp in aspirant_mp_positions:
print " ", amp
continue
amp = aspirant_mp_positions[0]
if amp.place.parliamentary_session != next_session:
print """ Warning: the place associated with this Aspirant MP position
%s - %s
is for the wrong parliamentary session. It should be a
place associated with the parliamentary session: %s""" % (amp.place,
amp.place.parliamentary_session,
next_session)
|
Add a script to check the aspirant MPs
|
Add a script to check the aspirant MPs
|
Python
|
agpl-3.0
|
ken-muturi/pombola,mysociety/pombola,geoffkilpin/pombola,ken-muturi/pombola,hzj123/56th,patricmutwiri/pombola,mysociety/pombola,geoffkilpin/pombola,hzj123/56th,ken-muturi/pombola,mysociety/pombola,mysociety/pombola,hzj123/56th,geoffkilpin/pombola,geoffkilpin/pombola,geoffkilpin/pombola,patricmutwiri/pombola,mysociety/pombola,hzj123/56th,mysociety/pombola,hzj123/56th,hzj123/56th,ken-muturi/pombola,geoffkilpin/pombola,patricmutwiri/pombola,ken-muturi/pombola,patricmutwiri/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola
|
Add a script to check the aspirant MPs
|
from core.models import *
from django.core.management.base import NoArgsCommand, CommandError
class Command(NoArgsCommand):
help = "Go through people who are MP aspirants, and check that they're associated with a 2013 constituency"
def handle_noargs(self, **options):
next_session = ParliamentarySession.objects.get(slug="na2013")
for person in Person.objects.all():
if not person.is_aspirant():
continue
aspirant_mp_positions = [ap for ap in person.aspirant_positions() if ap.title.slug == 'aspirant-mp']
if not aspirant_mp_positions:
continue
print person
if len(aspirant_mp_positions) > 1:
print " Warning: more than one Aspirant MP position:"
for amp in aspirant_mp_positions:
print " ", amp
continue
amp = aspirant_mp_positions[0]
if amp.place.parliamentary_session != next_session:
print """ Warning: the place associated with this Aspirant MP position
%s - %s
is for the wrong parliamentary session. It should be a
place associated with the parliamentary session: %s""" % (amp.place,
amp.place.parliamentary_session,
next_session)
|
<commit_before><commit_msg>Add a script to check the aspirant MPs<commit_after>
|
from core.models import *
from django.core.management.base import NoArgsCommand, CommandError
class Command(NoArgsCommand):
help = "Go through people who are MP aspirants, and check that they're associated with a 2013 constituency"
def handle_noargs(self, **options):
next_session = ParliamentarySession.objects.get(slug="na2013")
for person in Person.objects.all():
if not person.is_aspirant():
continue
aspirant_mp_positions = [ap for ap in person.aspirant_positions() if ap.title.slug == 'aspirant-mp']
if not aspirant_mp_positions:
continue
print person
if len(aspirant_mp_positions) > 1:
print " Warning: more than one Aspirant MP position:"
for amp in aspirant_mp_positions:
print " ", amp
continue
amp = aspirant_mp_positions[0]
if amp.place.parliamentary_session != next_session:
print """ Warning: the place associated with this Aspirant MP position
%s - %s
is for the wrong parliamentary session. It should be a
place associated with the parliamentary session: %s""" % (amp.place,
amp.place.parliamentary_session,
next_session)
|
Add a script to check the aspirant MPsfrom core.models import *
from django.core.management.base import NoArgsCommand, CommandError
class Command(NoArgsCommand):
help = "Go through people who are MP aspirants, and check that they're associated with a 2013 constituency"
def handle_noargs(self, **options):
next_session = ParliamentarySession.objects.get(slug="na2013")
for person in Person.objects.all():
if not person.is_aspirant():
continue
aspirant_mp_positions = [ap for ap in person.aspirant_positions() if ap.title.slug == 'aspirant-mp']
if not aspirant_mp_positions:
continue
print person
if len(aspirant_mp_positions) > 1:
print " Warning: more than one Aspirant MP position:"
for amp in aspirant_mp_positions:
print " ", amp
continue
amp = aspirant_mp_positions[0]
if amp.place.parliamentary_session != next_session:
print """ Warning: the place associated with this Aspirant MP position
%s - %s
is for the wrong parliamentary session. It should be a
place associated with the parliamentary session: %s""" % (amp.place,
amp.place.parliamentary_session,
next_session)
|
<commit_before><commit_msg>Add a script to check the aspirant MPs<commit_after>from core.models import *
from django.core.management.base import NoArgsCommand, CommandError
class Command(NoArgsCommand):
help = "Go through people who are MP aspirants, and check that they're associated with a 2013 constituency"
def handle_noargs(self, **options):
next_session = ParliamentarySession.objects.get(slug="na2013")
for person in Person.objects.all():
if not person.is_aspirant():
continue
aspirant_mp_positions = [ap for ap in person.aspirant_positions() if ap.title.slug == 'aspirant-mp']
if not aspirant_mp_positions:
continue
print person
if len(aspirant_mp_positions) > 1:
print " Warning: more than one Aspirant MP position:"
for amp in aspirant_mp_positions:
print " ", amp
continue
amp = aspirant_mp_positions[0]
if amp.place.parliamentary_session != next_session:
print """ Warning: the place associated with this Aspirant MP position
%s - %s
is for the wrong parliamentary session. It should be a
place associated with the parliamentary session: %s""" % (amp.place,
amp.place.parliamentary_session,
next_session)
|
|
e5dad84c986e94fe85c5923130ecc9e379ae2c93
|
camoco/cli/commands/Analysis.py
|
camoco/cli/commands/Analysis.py
|
import camoco as co
class Analysis(object):
'''
Perform an analysis based on CLI arguments:
set up, event loop, tear down
'''
def __init__(self):
# Init needs to just store args and other analysis level data
self.args = args
self.tag = "Analysis"
def __call__(self):
set_up()
event_loop()
tear_down()
def set_up(self):
pass
def event_loop(self):
pass
def tear_down(self):
pass
# ------------------------------------------------------------------------
# Extra methods should fit into the above methods
def _generate_output_file(self, filename):
if args.out != sys.stdout:
args.out = "{}_Locality.csv".format(args.out.replace('.csv',''))
if os.path.dirname(args.out) != '':
os.makedirs(os.path.dirname(args.out),exist_ok=True)
if os.path.exists("{}_Locality.csv".format(args.out.replace('.csv',''))):
print(
"{}_Locality.csv exists! Skipping!".format(
args.out.replace('.csv','')
)
)
return None
def _build_camoco_objects(self):
pass
|
Add stub class for future analysis class
|
Add stub class for future analysis class
|
Python
|
mit
|
schae234/Camoco,schae234/Camoco
|
Add stub class for future analysis class
|
import camoco as co
class Analysis(object):
'''
Perform an analysis based on CLI arguments:
set up, event loop, tear down
'''
def __init__(self):
# Init needs to just store args and other analysis level data
self.args = args
self.tag = "Analysis"
def __call__(self):
set_up()
event_loop()
tear_down()
def set_up(self):
pass
def event_loop(self):
pass
def tear_down(self):
pass
# ------------------------------------------------------------------------
# Extra methods should fit into the above methods
def _generate_output_file(self, filename):
if args.out != sys.stdout:
args.out = "{}_Locality.csv".format(args.out.replace('.csv',''))
if os.path.dirname(args.out) != '':
os.makedirs(os.path.dirname(args.out),exist_ok=True)
if os.path.exists("{}_Locality.csv".format(args.out.replace('.csv',''))):
print(
"{}_Locality.csv exists! Skipping!".format(
args.out.replace('.csv','')
)
)
return None
def _build_camoco_objects(self):
pass
|
<commit_before><commit_msg>Add stub class for future analysis class<commit_after>
|
import camoco as co
class Analysis(object):
'''
Perform an analysis based on CLI arguments:
set up, event loop, tear down
'''
def __init__(self):
# Init needs to just store args and other analysis level data
self.args = args
self.tag = "Analysis"
def __call__(self):
set_up()
event_loop()
tear_down()
def set_up(self):
pass
def event_loop(self):
pass
def tear_down(self):
pass
# ------------------------------------------------------------------------
# Extra methods should fit into the above methods
def _generate_output_file(self, filename):
if args.out != sys.stdout:
args.out = "{}_Locality.csv".format(args.out.replace('.csv',''))
if os.path.dirname(args.out) != '':
os.makedirs(os.path.dirname(args.out),exist_ok=True)
if os.path.exists("{}_Locality.csv".format(args.out.replace('.csv',''))):
print(
"{}_Locality.csv exists! Skipping!".format(
args.out.replace('.csv','')
)
)
return None
def _build_camoco_objects(self):
pass
|
Add stub class for future analysis classimport camoco as co
class Analysis(object):
'''
Perform an analysis based on CLI arguments:
set up, event loop, tear down
'''
def __init__(self):
# Init needs to just store args and other analysis level data
self.args = args
self.tag = "Analysis"
def __call__(self):
set_up()
event_loop()
tear_down()
def set_up(self):
pass
def event_loop(self):
pass
def tear_down(self):
pass
# ------------------------------------------------------------------------
# Extra methods should fit into the above methods
def _generate_output_file(self, filename):
if args.out != sys.stdout:
args.out = "{}_Locality.csv".format(args.out.replace('.csv',''))
if os.path.dirname(args.out) != '':
os.makedirs(os.path.dirname(args.out),exist_ok=True)
if os.path.exists("{}_Locality.csv".format(args.out.replace('.csv',''))):
print(
"{}_Locality.csv exists! Skipping!".format(
args.out.replace('.csv','')
)
)
return None
def _build_camoco_objects(self):
pass
|
<commit_before><commit_msg>Add stub class for future analysis class<commit_after>import camoco as co
class Analysis(object):
'''
Perform an analysis based on CLI arguments:
set up, event loop, tear down
'''
def __init__(self):
# Init needs to just store args and other analysis level data
self.args = args
self.tag = "Analysis"
def __call__(self):
set_up()
event_loop()
tear_down()
def set_up(self):
pass
def event_loop(self):
pass
def tear_down(self):
pass
# ------------------------------------------------------------------------
# Extra methods should fit into the above methods
def _generate_output_file(self, filename):
if args.out != sys.stdout:
args.out = "{}_Locality.csv".format(args.out.replace('.csv',''))
if os.path.dirname(args.out) != '':
os.makedirs(os.path.dirname(args.out),exist_ok=True)
if os.path.exists("{}_Locality.csv".format(args.out.replace('.csv',''))):
print(
"{}_Locality.csv exists! Skipping!".format(
args.out.replace('.csv','')
)
)
return None
def _build_camoco_objects(self):
pass
|
|
e83a58de8b0ac7c2d8ed001d5d4d609d2005d7d8
|
approximate_patern_count.py
|
approximate_patern_count.py
|
from hamming_distance import hamming_distance
def approximate_pattern_count(text, pattern, d):
count = 0
pattern_length = len(pattern)
for i in range(0, len(text) - pattern_length + 1):
_pattern = text[i:i + pattern_length]
if (hamming_distance(pattern, _pattern) <= d):
count += 1
return count
if __name__ == "__main__":
text = raw_input("Text: ")
pattern = raw_input("Pattern: ")
d = int(raw_input("D: "))
print approximate_pattern_count(text, pattern, d)
|
Add aproximate pattern count algorithm
|
Add aproximate pattern count algorithm
|
Python
|
mit
|
dennis95stumm/bioinformatics_algorithms,dennis95stumm/bioinformatics_algorithms
|
Add aproximate pattern count algorithm
|
from hamming_distance import hamming_distance
def approximate_pattern_count(text, pattern, d):
count = 0
pattern_length = len(pattern)
for i in range(0, len(text) - pattern_length + 1):
_pattern = text[i:i + pattern_length]
if (hamming_distance(pattern, _pattern) <= d):
count += 1
return count
if __name__ == "__main__":
text = raw_input("Text: ")
pattern = raw_input("Pattern: ")
d = int(raw_input("D: "))
print approximate_pattern_count(text, pattern, d)
|
<commit_before><commit_msg>Add aproximate pattern count algorithm<commit_after>
|
from hamming_distance import hamming_distance
def approximate_pattern_count(text, pattern, d):
count = 0
pattern_length = len(pattern)
for i in range(0, len(text) - pattern_length + 1):
_pattern = text[i:i + pattern_length]
if (hamming_distance(pattern, _pattern) <= d):
count += 1
return count
if __name__ == "__main__":
text = raw_input("Text: ")
pattern = raw_input("Pattern: ")
d = int(raw_input("D: "))
print approximate_pattern_count(text, pattern, d)
|
Add aproximate pattern count algorithmfrom hamming_distance import hamming_distance
def approximate_pattern_count(text, pattern, d):
count = 0
pattern_length = len(pattern)
for i in range(0, len(text) - pattern_length + 1):
_pattern = text[i:i + pattern_length]
if (hamming_distance(pattern, _pattern) <= d):
count += 1
return count
if __name__ == "__main__":
text = raw_input("Text: ")
pattern = raw_input("Pattern: ")
d = int(raw_input("D: "))
print approximate_pattern_count(text, pattern, d)
|
<commit_before><commit_msg>Add aproximate pattern count algorithm<commit_after>from hamming_distance import hamming_distance
def approximate_pattern_count(text, pattern, d):
count = 0
pattern_length = len(pattern)
for i in range(0, len(text) - pattern_length + 1):
_pattern = text[i:i + pattern_length]
if (hamming_distance(pattern, _pattern) <= d):
count += 1
return count
if __name__ == "__main__":
text = raw_input("Text: ")
pattern = raw_input("Pattern: ")
d = int(raw_input("D: "))
print approximate_pattern_count(text, pattern, d)
|
|
4153237428b73c85062d1842aecc96c8ca42d06e
|
mainapp/migrations/0010_auto_20170911_1858.py
|
mainapp/migrations/0010_auto_20170911_1858.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-11 16:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0009_auto_20170911_1201'),
]
operations = [
migrations.RenameField(
model_name='meeting',
old_name='locations',
new_name='location',
),
]
|
Fix naming of location in meeting
|
Fix naming of location in meeting
|
Python
|
mit
|
meine-stadt-transparent/meine-stadt-transparent,meine-stadt-transparent/meine-stadt-transparent,meine-stadt-transparent/meine-stadt-transparent,meine-stadt-transparent/meine-stadt-transparent
|
Fix naming of location in meeting
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-11 16:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0009_auto_20170911_1201'),
]
operations = [
migrations.RenameField(
model_name='meeting',
old_name='locations',
new_name='location',
),
]
|
<commit_before><commit_msg>Fix naming of location in meeting<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-11 16:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0009_auto_20170911_1201'),
]
operations = [
migrations.RenameField(
model_name='meeting',
old_name='locations',
new_name='location',
),
]
|
Fix naming of location in meeting# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-11 16:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0009_auto_20170911_1201'),
]
operations = [
migrations.RenameField(
model_name='meeting',
old_name='locations',
new_name='location',
),
]
|
<commit_before><commit_msg>Fix naming of location in meeting<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-11 16:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0009_auto_20170911_1201'),
]
operations = [
migrations.RenameField(
model_name='meeting',
old_name='locations',
new_name='location',
),
]
|
|
d5950a23679753e5cfb70b4955b45dcf9298faec
|
corehq/apps/accounting/tests/test_wire_invoice.py
|
corehq/apps/accounting/tests/test_wire_invoice.py
|
from decimal import Decimal
from django.core import mail
from corehq.apps.accounting.tests.test_invoicing import BaseInvoiceTestCase
from corehq.apps.accounting import generator, utils, tasks
from corehq.apps.accounting.invoicing import DomainWireInvoiceFactory
from corehq.apps.accounting.models import Invoice, WireInvoice
class TestWireInvoice(BaseInvoiceTestCase):
def setUp(self):
super(TestWireInvoice, self).setUp()
invoice_date = utils.months_from_date(self.subscription.date_start, 2)
tasks.generate_invoices(invoice_date)
invoice_date = utils.months_from_date(self.subscription.date_start, 3)
tasks.generate_invoices(invoice_date)
self.invoices = Invoice.objects.all()
self.domain_name = self.invoices[0].get_domain()
def tearDown(self):
super(TestWireInvoice, self).tearDown()
def test_factory(self):
factory = DomainWireInvoiceFactory(self.domain_name)
balance = Decimal(100)
mail.outbox = []
wi = factory.create_wire_invoice(balance)
self.assertEqual(wi.balance, balance)
self.assertEqual(wi.domain, self.domain.name)
self.assertEqual(len(mail.outbox), 1)
|
Test for wire invoice factory
|
Test for wire invoice factory
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq
|
Test for wire invoice factory
|
from decimal import Decimal
from django.core import mail
from corehq.apps.accounting.tests.test_invoicing import BaseInvoiceTestCase
from corehq.apps.accounting import generator, utils, tasks
from corehq.apps.accounting.invoicing import DomainWireInvoiceFactory
from corehq.apps.accounting.models import Invoice, WireInvoice
class TestWireInvoice(BaseInvoiceTestCase):
def setUp(self):
super(TestWireInvoice, self).setUp()
invoice_date = utils.months_from_date(self.subscription.date_start, 2)
tasks.generate_invoices(invoice_date)
invoice_date = utils.months_from_date(self.subscription.date_start, 3)
tasks.generate_invoices(invoice_date)
self.invoices = Invoice.objects.all()
self.domain_name = self.invoices[0].get_domain()
def tearDown(self):
super(TestWireInvoice, self).tearDown()
def test_factory(self):
factory = DomainWireInvoiceFactory(self.domain_name)
balance = Decimal(100)
mail.outbox = []
wi = factory.create_wire_invoice(balance)
self.assertEqual(wi.balance, balance)
self.assertEqual(wi.domain, self.domain.name)
self.assertEqual(len(mail.outbox), 1)
|
<commit_before><commit_msg>Test for wire invoice factory<commit_after>
|
from decimal import Decimal
from django.core import mail
from corehq.apps.accounting.tests.test_invoicing import BaseInvoiceTestCase
from corehq.apps.accounting import generator, utils, tasks
from corehq.apps.accounting.invoicing import DomainWireInvoiceFactory
from corehq.apps.accounting.models import Invoice, WireInvoice
class TestWireInvoice(BaseInvoiceTestCase):
def setUp(self):
super(TestWireInvoice, self).setUp()
invoice_date = utils.months_from_date(self.subscription.date_start, 2)
tasks.generate_invoices(invoice_date)
invoice_date = utils.months_from_date(self.subscription.date_start, 3)
tasks.generate_invoices(invoice_date)
self.invoices = Invoice.objects.all()
self.domain_name = self.invoices[0].get_domain()
def tearDown(self):
super(TestWireInvoice, self).tearDown()
def test_factory(self):
factory = DomainWireInvoiceFactory(self.domain_name)
balance = Decimal(100)
mail.outbox = []
wi = factory.create_wire_invoice(balance)
self.assertEqual(wi.balance, balance)
self.assertEqual(wi.domain, self.domain.name)
self.assertEqual(len(mail.outbox), 1)
|
Test for wire invoice factoryfrom decimal import Decimal
from django.core import mail
from corehq.apps.accounting.tests.test_invoicing import BaseInvoiceTestCase
from corehq.apps.accounting import generator, utils, tasks
from corehq.apps.accounting.invoicing import DomainWireInvoiceFactory
from corehq.apps.accounting.models import Invoice, WireInvoice
class TestWireInvoice(BaseInvoiceTestCase):
def setUp(self):
super(TestWireInvoice, self).setUp()
invoice_date = utils.months_from_date(self.subscription.date_start, 2)
tasks.generate_invoices(invoice_date)
invoice_date = utils.months_from_date(self.subscription.date_start, 3)
tasks.generate_invoices(invoice_date)
self.invoices = Invoice.objects.all()
self.domain_name = self.invoices[0].get_domain()
def tearDown(self):
super(TestWireInvoice, self).tearDown()
def test_factory(self):
factory = DomainWireInvoiceFactory(self.domain_name)
balance = Decimal(100)
mail.outbox = []
wi = factory.create_wire_invoice(balance)
self.assertEqual(wi.balance, balance)
self.assertEqual(wi.domain, self.domain.name)
self.assertEqual(len(mail.outbox), 1)
|
<commit_before><commit_msg>Test for wire invoice factory<commit_after>from decimal import Decimal
from django.core import mail
from corehq.apps.accounting.tests.test_invoicing import BaseInvoiceTestCase
from corehq.apps.accounting import generator, utils, tasks
from corehq.apps.accounting.invoicing import DomainWireInvoiceFactory
from corehq.apps.accounting.models import Invoice, WireInvoice
class TestWireInvoice(BaseInvoiceTestCase):
def setUp(self):
super(TestWireInvoice, self).setUp()
invoice_date = utils.months_from_date(self.subscription.date_start, 2)
tasks.generate_invoices(invoice_date)
invoice_date = utils.months_from_date(self.subscription.date_start, 3)
tasks.generate_invoices(invoice_date)
self.invoices = Invoice.objects.all()
self.domain_name = self.invoices[0].get_domain()
def tearDown(self):
super(TestWireInvoice, self).tearDown()
def test_factory(self):
factory = DomainWireInvoiceFactory(self.domain_name)
balance = Decimal(100)
mail.outbox = []
wi = factory.create_wire_invoice(balance)
self.assertEqual(wi.balance, balance)
self.assertEqual(wi.domain, self.domain.name)
self.assertEqual(len(mail.outbox), 1)
|
|
d421a51be6b8bc0cc30228f26e3e22ea62814efa
|
bmi_ilamb/tests/test_bmi.py
|
bmi_ilamb/tests/test_bmi.py
|
from nose.tools import assert_is, assert_equal
from bmi_ilamb import BmiIlamb
def test_component_name():
component = BmiIlamb()
name = component.get_component_name()
assert_equal(name, 'ILAMB')
assert_is(component.get_component_name(), name)
def test_start_time():
component = BmiIlamb()
assert_equal(component.get_start_time(), 0.0)
def test_end_time():
component = BmiIlamb()
assert_equal(component.get_end_time(), 1.0)
def test_current_time():
component = BmiIlamb()
assert_equal(component.get_current_time(), 0.0)
def test_time_step():
component = BmiIlamb()
assert_equal(component.get_time_step(), 1.0)
def test_time_units():
component = BmiIlamb()
assert_equal(component.get_time_units(), 's')
def test_get_input_var_names():
component = BmiIlamb()
assert_equal(component.get_input_var_names(), ())
def test_get_output_var_names():
component = BmiIlamb()
assert_equal(component.get_output_var_names(), ())
# Todo: test initialize, update, update_until, finalize.
|
Add nose tests for BMI methods
|
Add nose tests for BMI methods
|
Python
|
mit
|
permamodel/bmi-ilamb
|
Add nose tests for BMI methods
|
from nose.tools import assert_is, assert_equal
from bmi_ilamb import BmiIlamb
def test_component_name():
component = BmiIlamb()
name = component.get_component_name()
assert_equal(name, 'ILAMB')
assert_is(component.get_component_name(), name)
def test_start_time():
component = BmiIlamb()
assert_equal(component.get_start_time(), 0.0)
def test_end_time():
component = BmiIlamb()
assert_equal(component.get_end_time(), 1.0)
def test_current_time():
component = BmiIlamb()
assert_equal(component.get_current_time(), 0.0)
def test_time_step():
component = BmiIlamb()
assert_equal(component.get_time_step(), 1.0)
def test_time_units():
component = BmiIlamb()
assert_equal(component.get_time_units(), 's')
def test_get_input_var_names():
component = BmiIlamb()
assert_equal(component.get_input_var_names(), ())
def test_get_output_var_names():
component = BmiIlamb()
assert_equal(component.get_output_var_names(), ())
# Todo: test initialize, update, update_until, finalize.
|
<commit_before><commit_msg>Add nose tests for BMI methods<commit_after>
|
from nose.tools import assert_is, assert_equal
from bmi_ilamb import BmiIlamb
def test_component_name():
component = BmiIlamb()
name = component.get_component_name()
assert_equal(name, 'ILAMB')
assert_is(component.get_component_name(), name)
def test_start_time():
component = BmiIlamb()
assert_equal(component.get_start_time(), 0.0)
def test_end_time():
component = BmiIlamb()
assert_equal(component.get_end_time(), 1.0)
def test_current_time():
component = BmiIlamb()
assert_equal(component.get_current_time(), 0.0)
def test_time_step():
component = BmiIlamb()
assert_equal(component.get_time_step(), 1.0)
def test_time_units():
component = BmiIlamb()
assert_equal(component.get_time_units(), 's')
def test_get_input_var_names():
component = BmiIlamb()
assert_equal(component.get_input_var_names(), ())
def test_get_output_var_names():
component = BmiIlamb()
assert_equal(component.get_output_var_names(), ())
# Todo: test initialize, update, update_until, finalize.
|
Add nose tests for BMI methodsfrom nose.tools import assert_is, assert_equal
from bmi_ilamb import BmiIlamb
def test_component_name():
component = BmiIlamb()
name = component.get_component_name()
assert_equal(name, 'ILAMB')
assert_is(component.get_component_name(), name)
def test_start_time():
component = BmiIlamb()
assert_equal(component.get_start_time(), 0.0)
def test_end_time():
component = BmiIlamb()
assert_equal(component.get_end_time(), 1.0)
def test_current_time():
component = BmiIlamb()
assert_equal(component.get_current_time(), 0.0)
def test_time_step():
component = BmiIlamb()
assert_equal(component.get_time_step(), 1.0)
def test_time_units():
component = BmiIlamb()
assert_equal(component.get_time_units(), 's')
def test_get_input_var_names():
component = BmiIlamb()
assert_equal(component.get_input_var_names(), ())
def test_get_output_var_names():
component = BmiIlamb()
assert_equal(component.get_output_var_names(), ())
# Todo: test initialize, update, update_until, finalize.
|
<commit_before><commit_msg>Add nose tests for BMI methods<commit_after>from nose.tools import assert_is, assert_equal
from bmi_ilamb import BmiIlamb
def test_component_name():
component = BmiIlamb()
name = component.get_component_name()
assert_equal(name, 'ILAMB')
assert_is(component.get_component_name(), name)
def test_start_time():
component = BmiIlamb()
assert_equal(component.get_start_time(), 0.0)
def test_end_time():
component = BmiIlamb()
assert_equal(component.get_end_time(), 1.0)
def test_current_time():
component = BmiIlamb()
assert_equal(component.get_current_time(), 0.0)
def test_time_step():
component = BmiIlamb()
assert_equal(component.get_time_step(), 1.0)
def test_time_units():
component = BmiIlamb()
assert_equal(component.get_time_units(), 's')
def test_get_input_var_names():
component = BmiIlamb()
assert_equal(component.get_input_var_names(), ())
def test_get_output_var_names():
component = BmiIlamb()
assert_equal(component.get_output_var_names(), ())
# Todo: test initialize, update, update_until, finalize.
|
|
53718f877a97fdff6e2a0862f6f6313c5ece5593
|
src/ggrc/migrations/versions/20140930092005_53bb0f4f6ec8_switch_fulltext_record_properties.py
|
src/ggrc/migrations/versions/20140930092005_53bb0f4f6ec8_switch_fulltext_record_properties.py
|
"""Switch fulltext_record_properties to innodb
Revision ID: 53bb0f4f6ec8
Revises: 63fc392c91a
Create Date: 2014-09-30 09:20:05.884100
"""
# revision identifiers, used by Alembic.
revision = '53bb0f4f6ec8'
down_revision = '63fc392c91a'
from alembic import op
def upgrade():
op.drop_index('fulltext_record_properties_text_idx',
table_name='fulltext_record_properties')
op.execute("ALTER TABLE fulltext_record_properties ENGINE=InnoDB")
def downgrade():
op.execute("""
ALTER TABLE fulltext_record_properties
ENGINE=MyISAM,
ADD FULLTEXT INDEX `fulltext_record_properties_text_idx` (`content`)
""")
|
Switch myisam to innodb for fulltext_record_properties
|
Switch myisam to innodb for fulltext_record_properties
This change was made because fulltext record properties table
crashed on grc-dev and grc-test. The issue was resolved with
REPAIR TABLE clause. Running the reindex script did not help.
Based on https://cloud.google.com/sql/faq#innodb myisam should
only be used for full text search, but we aren't using full
text search anymore.
|
Python
|
apache-2.0
|
uskudnik/ggrc-core,NejcZupec/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,hyperNURb/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,hasanalom/ggrc-core,selahssea/ggrc-core,vladan-m/ggrc-core,hasanalom/ggrc-core,hasanalom/ggrc-core,plamut/ggrc-core,uskudnik/ggrc-core,j0gurt/ggrc-core,vladan-m/ggrc-core,hyperNURb/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,uskudnik/ggrc-core,AleksNeStu/ggrc-core,hasanalom/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,hasanalom/ggrc-core,edofic/ggrc-core,vladan-m/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,uskudnik/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,vladan-m/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,vladan-m/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,uskudnik/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core
|
Switch myisam to innodb for fulltext_record_properties
This change was made because fulltext record properties table
crashed on grc-dev and grc-test. The issue was resolved with
REPAIR TABLE clause. Running the reindex script did not help.
Based on https://cloud.google.com/sql/faq#innodb myisam should
only be used for full text search, but we aren't using full
text search anymore.
|
"""Switch fulltext_record_properties to innodb
Revision ID: 53bb0f4f6ec8
Revises: 63fc392c91a
Create Date: 2014-09-30 09:20:05.884100
"""
# revision identifiers, used by Alembic.
revision = '53bb0f4f6ec8'
down_revision = '63fc392c91a'
from alembic import op
def upgrade():
op.drop_index('fulltext_record_properties_text_idx',
table_name='fulltext_record_properties')
op.execute("ALTER TABLE fulltext_record_properties ENGINE=InnoDB")
def downgrade():
op.execute("""
ALTER TABLE fulltext_record_properties
ENGINE=MyISAM,
ADD FULLTEXT INDEX `fulltext_record_properties_text_idx` (`content`)
""")
|
<commit_before><commit_msg>Switch myisam to innodb for fulltext_record_properties
This change was made because fulltext record properties table
crashed on grc-dev and grc-test. The issue was resolved with
REPAIR TABLE clause. Running the reindex script did not help.
Based on https://cloud.google.com/sql/faq#innodb myisam should
only be used for full text search, but we aren't using full
text search anymore.<commit_after>
|
"""Switch fulltext_record_properties to innodb
Revision ID: 53bb0f4f6ec8
Revises: 63fc392c91a
Create Date: 2014-09-30 09:20:05.884100
"""
# revision identifiers, used by Alembic.
revision = '53bb0f4f6ec8'
down_revision = '63fc392c91a'
from alembic import op
def upgrade():
op.drop_index('fulltext_record_properties_text_idx',
table_name='fulltext_record_properties')
op.execute("ALTER TABLE fulltext_record_properties ENGINE=InnoDB")
def downgrade():
op.execute("""
ALTER TABLE fulltext_record_properties
ENGINE=MyISAM,
ADD FULLTEXT INDEX `fulltext_record_properties_text_idx` (`content`)
""")
|
Switch myisam to innodb for fulltext_record_properties
This change was made because fulltext record properties table
crashed on grc-dev and grc-test. The issue was resolved with
REPAIR TABLE clause. Running the reindex script did not help.
Based on https://cloud.google.com/sql/faq#innodb myisam should
only be used for full text search, but we aren't using full
text search anymore.
"""Switch fulltext_record_properties to innodb
Revision ID: 53bb0f4f6ec8
Revises: 63fc392c91a
Create Date: 2014-09-30 09:20:05.884100
"""
# revision identifiers, used by Alembic.
revision = '53bb0f4f6ec8'
down_revision = '63fc392c91a'
from alembic import op
def upgrade():
op.drop_index('fulltext_record_properties_text_idx',
table_name='fulltext_record_properties')
op.execute("ALTER TABLE fulltext_record_properties ENGINE=InnoDB")
def downgrade():
op.execute("""
ALTER TABLE fulltext_record_properties
ENGINE=MyISAM,
ADD FULLTEXT INDEX `fulltext_record_properties_text_idx` (`content`)
""")
|
<commit_before><commit_msg>Switch myisam to innodb for fulltext_record_properties
This change was made because fulltext record properties table
crashed on grc-dev and grc-test. The issue was resolved with
REPAIR TABLE clause. Running the reindex script did not help.
Based on https://cloud.google.com/sql/faq#innodb myisam should
only be used for full text search, but we aren't using full
text search anymore.<commit_after>
"""Switch fulltext_record_properties to innodb
Revision ID: 53bb0f4f6ec8
Revises: 63fc392c91a
Create Date: 2014-09-30 09:20:05.884100
"""
# revision identifiers, used by Alembic.
revision = '53bb0f4f6ec8'
down_revision = '63fc392c91a'
from alembic import op
def upgrade():
op.drop_index('fulltext_record_properties_text_idx',
table_name='fulltext_record_properties')
op.execute("ALTER TABLE fulltext_record_properties ENGINE=InnoDB")
def downgrade():
op.execute("""
ALTER TABLE fulltext_record_properties
ENGINE=MyISAM,
ADD FULLTEXT INDEX `fulltext_record_properties_text_idx` (`content`)
""")
|
|
f4c4ceb7eb3dcdc80c343ef53d9146a664fb6107
|
site/api/migrations/0009_load_pos_fixture_data.py
|
site/api/migrations/0009_load_pos_fixture_data.py
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
from django.core.management import call_command
app_name = 'api'
fixture = model_name = 'PartOfSpeech'
def load_fixture(apps, schema_editor):
call_command('loaddata', fixture, app_label=app_name)
def unload_fixture(apps, schema_editor):
"Deleting all entries for this model"
PartOfSpeech = apps.get_model(api, model_name)
PartOfSpeech.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
(app_name, '0008_posmention'),
]
operations = [
migrations.RunPython(load_fixture, reverse_code=unload_fixture),
]
|
Create migration to load POS fixture data
|
Create migration to load POS fixture data
|
Python
|
mit
|
LitPalimpsest/parser-api-search,LitPalimpsest/parser-api-search,LitPalimpsest/parser-api-search
|
Create migration to load POS fixture data
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
from django.core.management import call_command
app_name = 'api'
fixture = model_name = 'PartOfSpeech'
def load_fixture(apps, schema_editor):
call_command('loaddata', fixture, app_label=app_name)
def unload_fixture(apps, schema_editor):
"Deleting all entries for this model"
PartOfSpeech = apps.get_model(api, model_name)
PartOfSpeech.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
(app_name, '0008_posmention'),
]
operations = [
migrations.RunPython(load_fixture, reverse_code=unload_fixture),
]
|
<commit_before><commit_msg>Create migration to load POS fixture data<commit_after>
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
from django.core.management import call_command
app_name = 'api'
fixture = model_name = 'PartOfSpeech'
def load_fixture(apps, schema_editor):
call_command('loaddata', fixture, app_label=app_name)
def unload_fixture(apps, schema_editor):
"Deleting all entries for this model"
PartOfSpeech = apps.get_model(api, model_name)
PartOfSpeech.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
(app_name, '0008_posmention'),
]
operations = [
migrations.RunPython(load_fixture, reverse_code=unload_fixture),
]
|
Create migration to load POS fixture data# -*- coding: utf-8 -*-
from django.db import models, migrations
from django.core.management import call_command
app_name = 'api'
fixture = model_name = 'PartOfSpeech'
def load_fixture(apps, schema_editor):
call_command('loaddata', fixture, app_label=app_name)
def unload_fixture(apps, schema_editor):
"Deleting all entries for this model"
PartOfSpeech = apps.get_model(api, model_name)
PartOfSpeech.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
(app_name, '0008_posmention'),
]
operations = [
migrations.RunPython(load_fixture, reverse_code=unload_fixture),
]
|
<commit_before><commit_msg>Create migration to load POS fixture data<commit_after># -*- coding: utf-8 -*-
from django.db import models, migrations
from django.core.management import call_command
app_name = 'api'
fixture = model_name = 'PartOfSpeech'
def load_fixture(apps, schema_editor):
call_command('loaddata', fixture, app_label=app_name)
def unload_fixture(apps, schema_editor):
"Deleting all entries for this model"
PartOfSpeech = apps.get_model(api, model_name)
PartOfSpeech.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
(app_name, '0008_posmention'),
]
operations = [
migrations.RunPython(load_fixture, reverse_code=unload_fixture),
]
|
|
c32fdfa008d89cdbab7bd06529647958224a4e49
|
migrations/versions/0253_set_template_postage_.py
|
migrations/versions/0253_set_template_postage_.py
|
"""
Revision ID: 0253_set_template_postage
Revises: 0252_letter_branding_table
Create Date: 2019-01-30 16:47:08.599448
"""
from alembic import op
import sqlalchemy as sa
revision = '0253_set_template_postage'
down_revision = '0252_letter_branding_table'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute(
"""UPDATE templates SET postage = services.postage
FROM services WHERE template_type = 'letter' AND service_id = services.id"""
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("UPDATE templates SET postage = null WHERE template_type = 'letter'")
# ### end Alembic commands ###
|
Set postage for all existing templates to service default
|
Set postage for all existing templates to service default
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Set postage for all existing templates to service default
|
"""
Revision ID: 0253_set_template_postage
Revises: 0252_letter_branding_table
Create Date: 2019-01-30 16:47:08.599448
"""
from alembic import op
import sqlalchemy as sa
revision = '0253_set_template_postage'
down_revision = '0252_letter_branding_table'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute(
"""UPDATE templates SET postage = services.postage
FROM services WHERE template_type = 'letter' AND service_id = services.id"""
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("UPDATE templates SET postage = null WHERE template_type = 'letter'")
# ### end Alembic commands ###
|
<commit_before><commit_msg>Set postage for all existing templates to service default<commit_after>
|
"""
Revision ID: 0253_set_template_postage
Revises: 0252_letter_branding_table
Create Date: 2019-01-30 16:47:08.599448
"""
from alembic import op
import sqlalchemy as sa
revision = '0253_set_template_postage'
down_revision = '0252_letter_branding_table'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute(
"""UPDATE templates SET postage = services.postage
FROM services WHERE template_type = 'letter' AND service_id = services.id"""
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("UPDATE templates SET postage = null WHERE template_type = 'letter'")
# ### end Alembic commands ###
|
Set postage for all existing templates to service default"""
Revision ID: 0253_set_template_postage
Revises: 0252_letter_branding_table
Create Date: 2019-01-30 16:47:08.599448
"""
from alembic import op
import sqlalchemy as sa
revision = '0253_set_template_postage'
down_revision = '0252_letter_branding_table'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute(
"""UPDATE templates SET postage = services.postage
FROM services WHERE template_type = 'letter' AND service_id = services.id"""
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("UPDATE templates SET postage = null WHERE template_type = 'letter'")
# ### end Alembic commands ###
|
<commit_before><commit_msg>Set postage for all existing templates to service default<commit_after>"""
Revision ID: 0253_set_template_postage
Revises: 0252_letter_branding_table
Create Date: 2019-01-30 16:47:08.599448
"""
from alembic import op
import sqlalchemy as sa
revision = '0253_set_template_postage'
down_revision = '0252_letter_branding_table'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute(
"""UPDATE templates SET postage = services.postage
FROM services WHERE template_type = 'letter' AND service_id = services.id"""
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("UPDATE templates SET postage = null WHERE template_type = 'letter'")
# ### end Alembic commands ###
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.