commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57e32770422981160e6de515f0a79e4075c101ef
|
server/data_updates/00006_20190201-125213_archive.py
|
server/data_updates/00006_20190201-125213_archive.py
|
# -*- coding: utf-8; -*-
# This file is part of Superdesk.
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
#
# Creation: 2018-11-14 10:31
from superdesk.commands.data_updates import DataUpdate
from superdesk import get_resource_service
from superdesk.metadata.item import CONTENT_STATE, \
CONTENT_TYPE, ITEM_STATE, ITEM_TYPE
from eve.utils import ParsedRequest
class DataUpdate(DataUpdate):
resource = 'archive'
def forwards(self, mongodb_collection, mongodb_database):
archive_service = get_resource_service('archive')
req = ParsedRequest()
req.max_results = 50
lookup = {
ITEM_STATE: CONTENT_STATE.DRAFT,
ITEM_TYPE: CONTENT_TYPE.PICTURE
}
while True:
items = list(archive_service.get(req=req, lookup=lookup))
if not items:
break
for item in items:
archive_service.system_update(
item['_id'], {ITEM_STATE: CONTENT_STATE.PROGRESS}, item
)
def backwards(self, mongodb_collection, mongodb_database):
pass
|
Set status in-progress for existing pictures
|
chore(pictures): Set status in-progress for existing pictures
|
Python
|
agpl-3.0
|
ioanpocol/superdesk,ioanpocol/superdesk,ioanpocol/superdesk
|
chore(pictures): Set status in-progress for existing pictures
|
# -*- coding: utf-8; -*-
# This file is part of Superdesk.
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
#
# Creation: 2018-11-14 10:31
from superdesk.commands.data_updates import DataUpdate
from superdesk import get_resource_service
from superdesk.metadata.item import CONTENT_STATE, \
CONTENT_TYPE, ITEM_STATE, ITEM_TYPE
from eve.utils import ParsedRequest
class DataUpdate(DataUpdate):
resource = 'archive'
def forwards(self, mongodb_collection, mongodb_database):
archive_service = get_resource_service('archive')
req = ParsedRequest()
req.max_results = 50
lookup = {
ITEM_STATE: CONTENT_STATE.DRAFT,
ITEM_TYPE: CONTENT_TYPE.PICTURE
}
while True:
items = list(archive_service.get(req=req, lookup=lookup))
if not items:
break
for item in items:
archive_service.system_update(
item['_id'], {ITEM_STATE: CONTENT_STATE.PROGRESS}, item
)
def backwards(self, mongodb_collection, mongodb_database):
pass
|
<commit_before><commit_msg>chore(pictures): Set status in-progress for existing pictures<commit_after>
|
# -*- coding: utf-8; -*-
# This file is part of Superdesk.
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
#
# Creation: 2018-11-14 10:31
from superdesk.commands.data_updates import DataUpdate
from superdesk import get_resource_service
from superdesk.metadata.item import CONTENT_STATE, \
CONTENT_TYPE, ITEM_STATE, ITEM_TYPE
from eve.utils import ParsedRequest
class DataUpdate(DataUpdate):
resource = 'archive'
def forwards(self, mongodb_collection, mongodb_database):
archive_service = get_resource_service('archive')
req = ParsedRequest()
req.max_results = 50
lookup = {
ITEM_STATE: CONTENT_STATE.DRAFT,
ITEM_TYPE: CONTENT_TYPE.PICTURE
}
while True:
items = list(archive_service.get(req=req, lookup=lookup))
if not items:
break
for item in items:
archive_service.system_update(
item['_id'], {ITEM_STATE: CONTENT_STATE.PROGRESS}, item
)
def backwards(self, mongodb_collection, mongodb_database):
pass
|
chore(pictures): Set status in-progress for existing pictures# -*- coding: utf-8; -*-
# This file is part of Superdesk.
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
#
# Creation: 2018-11-14 10:31
from superdesk.commands.data_updates import DataUpdate
from superdesk import get_resource_service
from superdesk.metadata.item import CONTENT_STATE, \
CONTENT_TYPE, ITEM_STATE, ITEM_TYPE
from eve.utils import ParsedRequest
class DataUpdate(DataUpdate):
resource = 'archive'
def forwards(self, mongodb_collection, mongodb_database):
archive_service = get_resource_service('archive')
req = ParsedRequest()
req.max_results = 50
lookup = {
ITEM_STATE: CONTENT_STATE.DRAFT,
ITEM_TYPE: CONTENT_TYPE.PICTURE
}
while True:
items = list(archive_service.get(req=req, lookup=lookup))
if not items:
break
for item in items:
archive_service.system_update(
item['_id'], {ITEM_STATE: CONTENT_STATE.PROGRESS}, item
)
def backwards(self, mongodb_collection, mongodb_database):
pass
|
<commit_before><commit_msg>chore(pictures): Set status in-progress for existing pictures<commit_after># -*- coding: utf-8; -*-
# This file is part of Superdesk.
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
#
# Creation: 2018-11-14 10:31
from superdesk.commands.data_updates import DataUpdate
from superdesk import get_resource_service
from superdesk.metadata.item import CONTENT_STATE, \
CONTENT_TYPE, ITEM_STATE, ITEM_TYPE
from eve.utils import ParsedRequest
class DataUpdate(DataUpdate):
resource = 'archive'
def forwards(self, mongodb_collection, mongodb_database):
archive_service = get_resource_service('archive')
req = ParsedRequest()
req.max_results = 50
lookup = {
ITEM_STATE: CONTENT_STATE.DRAFT,
ITEM_TYPE: CONTENT_TYPE.PICTURE
}
while True:
items = list(archive_service.get(req=req, lookup=lookup))
if not items:
break
for item in items:
archive_service.system_update(
item['_id'], {ITEM_STATE: CONTENT_STATE.PROGRESS}, item
)
def backwards(self, mongodb_collection, mongodb_database):
pass
|
|
4501160aff2cef3f6da66862d1c7524bc26420b4
|
samples/bulk_update.py
|
samples/bulk_update.py
|
import requests
from orionsdk import SwisClient
npm_server = 'localhost'
username = 'admin'
password = ''
verify = False
if not verify:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
swis = SwisClient(npm_server, username, password)
# select the top 3 nodes from the inventory
results = swis.query("SELECT TOP 3 Caption, URI FROM Orion.Nodes")
nodes = results['results']
# build the body that will be passed to the query
body = {"uris": [], "properties": {}}
# add the URIs with a '/CustomProperties' suffix to each
for node in nodes:
body["uris"].append(node["URI"] + "/CustomProperties")
# set as many custom properties as you like
body["properties"]["City"] = "Austin"
body["properties"]["DeviceType"] = "Router"
body["properties"]["Department"] = "Billing"
# submit the request
swis.bulkupdate(body)
|
Add bulk custom property example
|
Add bulk custom property example
|
Python
|
apache-2.0
|
solarwinds/orionsdk-python
|
Add bulk custom property example
|
import requests
from orionsdk import SwisClient
npm_server = 'localhost'
username = 'admin'
password = ''
verify = False
if not verify:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
swis = SwisClient(npm_server, username, password)
# select the top 3 nodes from the inventory
results = swis.query("SELECT TOP 3 Caption, URI FROM Orion.Nodes")
nodes = results['results']
# build the body that will be passed to the query
body = {"uris": [], "properties": {}}
# add the URIs with a '/CustomProperties' suffix to each
for node in nodes:
body["uris"].append(node["URI"] + "/CustomProperties")
# set as many custom properties as you like
body["properties"]["City"] = "Austin"
body["properties"]["DeviceType"] = "Router"
body["properties"]["Department"] = "Billing"
# submit the request
swis.bulkupdate(body)
|
<commit_before><commit_msg>Add bulk custom property example<commit_after>
|
import requests
from orionsdk import SwisClient
npm_server = 'localhost'
username = 'admin'
password = ''
verify = False
if not verify:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
swis = SwisClient(npm_server, username, password)
# select the top 3 nodes from the inventory
results = swis.query("SELECT TOP 3 Caption, URI FROM Orion.Nodes")
nodes = results['results']
# build the body that will be passed to the query
body = {"uris": [], "properties": {}}
# add the URIs with a '/CustomProperties' suffix to each
for node in nodes:
body["uris"].append(node["URI"] + "/CustomProperties")
# set as many custom properties as you like
body["properties"]["City"] = "Austin"
body["properties"]["DeviceType"] = "Router"
body["properties"]["Department"] = "Billing"
# submit the request
swis.bulkupdate(body)
|
Add bulk custom property exampleimport requests
from orionsdk import SwisClient
npm_server = 'localhost'
username = 'admin'
password = ''
verify = False
if not verify:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
swis = SwisClient(npm_server, username, password)
# select the top 3 nodes from the inventory
results = swis.query("SELECT TOP 3 Caption, URI FROM Orion.Nodes")
nodes = results['results']
# build the body that will be passed to the query
body = {"uris": [], "properties": {}}
# add the URIs with a '/CustomProperties' suffix to each
for node in nodes:
body["uris"].append(node["URI"] + "/CustomProperties")
# set as many custom properties as you like
body["properties"]["City"] = "Austin"
body["properties"]["DeviceType"] = "Router"
body["properties"]["Department"] = "Billing"
# submit the request
swis.bulkupdate(body)
|
<commit_before><commit_msg>Add bulk custom property example<commit_after>import requests
from orionsdk import SwisClient
npm_server = 'localhost'
username = 'admin'
password = ''
verify = False
if not verify:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
swis = SwisClient(npm_server, username, password)
# select the top 3 nodes from the inventory
results = swis.query("SELECT TOP 3 Caption, URI FROM Orion.Nodes")
nodes = results['results']
# build the body that will be passed to the query
body = {"uris": [], "properties": {}}
# add the URIs with a '/CustomProperties' suffix to each
for node in nodes:
body["uris"].append(node["URI"] + "/CustomProperties")
# set as many custom properties as you like
body["properties"]["City"] = "Austin"
body["properties"]["DeviceType"] = "Router"
body["properties"]["Department"] = "Billing"
# submit the request
swis.bulkupdate(body)
|
|
6b0d48f87f56b9485903f7422dbb9ece4d96d329
|
python/minimum_absolute_difference_in_an_array.py
|
python/minimum_absolute_difference_in_an_array.py
|
#!/bin/python3
import sys
def minimum_absolute_difference(array):
sorted_pairs = zip(sorted(array)[:-1], sorted(array)[1:])
differences = [abs(a - b) for a, b in sorted_pairs]
return min(differences)
if __name__ == "__main__":
_ = int(input().strip())
array = list(map(int, input().strip().split(' ')))
print(minimum_absolute_difference(array))
|
Solve Minimum Absolute Difference in an Array
|
Solve Minimum Absolute Difference in an Array
|
Python
|
mit
|
rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank
|
Solve Minimum Absolute Difference in an Array
|
#!/bin/python3
import sys
def minimum_absolute_difference(array):
sorted_pairs = zip(sorted(array)[:-1], sorted(array)[1:])
differences = [abs(a - b) for a, b in sorted_pairs]
return min(differences)
if __name__ == "__main__":
_ = int(input().strip())
array = list(map(int, input().strip().split(' ')))
print(minimum_absolute_difference(array))
|
<commit_before><commit_msg>Solve Minimum Absolute Difference in an Array<commit_after>
|
#!/bin/python3
import sys
def minimum_absolute_difference(array):
sorted_pairs = zip(sorted(array)[:-1], sorted(array)[1:])
differences = [abs(a - b) for a, b in sorted_pairs]
return min(differences)
if __name__ == "__main__":
_ = int(input().strip())
array = list(map(int, input().strip().split(' ')))
print(minimum_absolute_difference(array))
|
Solve Minimum Absolute Difference in an Array#!/bin/python3
import sys
def minimum_absolute_difference(array):
sorted_pairs = zip(sorted(array)[:-1], sorted(array)[1:])
differences = [abs(a - b) for a, b in sorted_pairs]
return min(differences)
if __name__ == "__main__":
_ = int(input().strip())
array = list(map(int, input().strip().split(' ')))
print(minimum_absolute_difference(array))
|
<commit_before><commit_msg>Solve Minimum Absolute Difference in an Array<commit_after>#!/bin/python3
import sys
def minimum_absolute_difference(array):
sorted_pairs = zip(sorted(array)[:-1], sorted(array)[1:])
differences = [abs(a - b) for a, b in sorted_pairs]
return min(differences)
if __name__ == "__main__":
_ = int(input().strip())
array = list(map(int, input().strip().split(' ')))
print(minimum_absolute_difference(array))
|
|
bdaa2fbf4147ce645a93921366953b3a93143899
|
test_AcmeTinyHelperConsole.py
|
test_AcmeTinyHelperConsole.py
|
from AcmeTinyHelperConsole import AcmeTinyHelperConsole
class TestAcmeTinyHelper:
def test_run(self, capsys):
console = AcmeTinyHelperConsole()
opts = [('--path', '/etc/letsencrypt/example.com'),
('--domains', 'example.com'),
('--acme-tiny-path', '/usr/src/acme-tiny.py'),
('--challenge-path', '/var/www/lets-encrypt-challenges')]
console.run('new', opts)
out, err = capsys.readouterr()
expected = (
'mkdir -p /etc/letsencrypt/example.com',
'openssl genrsa 4096 > /etc/letsencrypt/example.com/account.key',
'openssl genrsa 4096 > /etc/letsencrypt/example.com/domain.key',
'wget -O - https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem > /etc/letsencrypt/example.com/intermediate.pem',
'openssl req -new -sha256 -key /etc/letsencrypt/example.com/domain.key -subj "/CN=example.com" > /etc/letsencrypt/example.com/domain.csr',
'python /usr/src/acme-tiny.py --account-key /etc/letsencrypt/example.com/account.key --csr /etc/letsencrypt/example.com/domain.csr --acme-dir /var/www/lets-encrypt-challenges > /etc/letsencrypt/example.com/signed.crt',
'cat /etc/letsencrypt/example.com/signed.crt /etc/letsencrypt/example.com/intermediate.pem > /etc/letsencrypt/example.com/chained.pem'
)
for expected_line in expected:
assert expected_line in out
|
Add some basic console tests
|
Add some basic console tests
|
Python
|
mit
|
mariano-dagostino/AcmeTinyHelper
|
Add some basic console tests
|
from AcmeTinyHelperConsole import AcmeTinyHelperConsole
class TestAcmeTinyHelper:
def test_run(self, capsys):
console = AcmeTinyHelperConsole()
opts = [('--path', '/etc/letsencrypt/example.com'),
('--domains', 'example.com'),
('--acme-tiny-path', '/usr/src/acme-tiny.py'),
('--challenge-path', '/var/www/lets-encrypt-challenges')]
console.run('new', opts)
out, err = capsys.readouterr()
expected = (
'mkdir -p /etc/letsencrypt/example.com',
'openssl genrsa 4096 > /etc/letsencrypt/example.com/account.key',
'openssl genrsa 4096 > /etc/letsencrypt/example.com/domain.key',
'wget -O - https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem > /etc/letsencrypt/example.com/intermediate.pem',
'openssl req -new -sha256 -key /etc/letsencrypt/example.com/domain.key -subj "/CN=example.com" > /etc/letsencrypt/example.com/domain.csr',
'python /usr/src/acme-tiny.py --account-key /etc/letsencrypt/example.com/account.key --csr /etc/letsencrypt/example.com/domain.csr --acme-dir /var/www/lets-encrypt-challenges > /etc/letsencrypt/example.com/signed.crt',
'cat /etc/letsencrypt/example.com/signed.crt /etc/letsencrypt/example.com/intermediate.pem > /etc/letsencrypt/example.com/chained.pem'
)
for expected_line in expected:
assert expected_line in out
|
<commit_before><commit_msg>Add some basic console tests<commit_after>
|
from AcmeTinyHelperConsole import AcmeTinyHelperConsole
class TestAcmeTinyHelper:
def test_run(self, capsys):
console = AcmeTinyHelperConsole()
opts = [('--path', '/etc/letsencrypt/example.com'),
('--domains', 'example.com'),
('--acme-tiny-path', '/usr/src/acme-tiny.py'),
('--challenge-path', '/var/www/lets-encrypt-challenges')]
console.run('new', opts)
out, err = capsys.readouterr()
expected = (
'mkdir -p /etc/letsencrypt/example.com',
'openssl genrsa 4096 > /etc/letsencrypt/example.com/account.key',
'openssl genrsa 4096 > /etc/letsencrypt/example.com/domain.key',
'wget -O - https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem > /etc/letsencrypt/example.com/intermediate.pem',
'openssl req -new -sha256 -key /etc/letsencrypt/example.com/domain.key -subj "/CN=example.com" > /etc/letsencrypt/example.com/domain.csr',
'python /usr/src/acme-tiny.py --account-key /etc/letsencrypt/example.com/account.key --csr /etc/letsencrypt/example.com/domain.csr --acme-dir /var/www/lets-encrypt-challenges > /etc/letsencrypt/example.com/signed.crt',
'cat /etc/letsencrypt/example.com/signed.crt /etc/letsencrypt/example.com/intermediate.pem > /etc/letsencrypt/example.com/chained.pem'
)
for expected_line in expected:
assert expected_line in out
|
Add some basic console testsfrom AcmeTinyHelperConsole import AcmeTinyHelperConsole
class TestAcmeTinyHelper:
def test_run(self, capsys):
console = AcmeTinyHelperConsole()
opts = [('--path', '/etc/letsencrypt/example.com'),
('--domains', 'example.com'),
('--acme-tiny-path', '/usr/src/acme-tiny.py'),
('--challenge-path', '/var/www/lets-encrypt-challenges')]
console.run('new', opts)
out, err = capsys.readouterr()
expected = (
'mkdir -p /etc/letsencrypt/example.com',
'openssl genrsa 4096 > /etc/letsencrypt/example.com/account.key',
'openssl genrsa 4096 > /etc/letsencrypt/example.com/domain.key',
'wget -O - https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem > /etc/letsencrypt/example.com/intermediate.pem',
'openssl req -new -sha256 -key /etc/letsencrypt/example.com/domain.key -subj "/CN=example.com" > /etc/letsencrypt/example.com/domain.csr',
'python /usr/src/acme-tiny.py --account-key /etc/letsencrypt/example.com/account.key --csr /etc/letsencrypt/example.com/domain.csr --acme-dir /var/www/lets-encrypt-challenges > /etc/letsencrypt/example.com/signed.crt',
'cat /etc/letsencrypt/example.com/signed.crt /etc/letsencrypt/example.com/intermediate.pem > /etc/letsencrypt/example.com/chained.pem'
)
for expected_line in expected:
assert expected_line in out
|
<commit_before><commit_msg>Add some basic console tests<commit_after>from AcmeTinyHelperConsole import AcmeTinyHelperConsole
class TestAcmeTinyHelper:
def test_run(self, capsys):
console = AcmeTinyHelperConsole()
opts = [('--path', '/etc/letsencrypt/example.com'),
('--domains', 'example.com'),
('--acme-tiny-path', '/usr/src/acme-tiny.py'),
('--challenge-path', '/var/www/lets-encrypt-challenges')]
console.run('new', opts)
out, err = capsys.readouterr()
expected = (
'mkdir -p /etc/letsencrypt/example.com',
'openssl genrsa 4096 > /etc/letsencrypt/example.com/account.key',
'openssl genrsa 4096 > /etc/letsencrypt/example.com/domain.key',
'wget -O - https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem > /etc/letsencrypt/example.com/intermediate.pem',
'openssl req -new -sha256 -key /etc/letsencrypt/example.com/domain.key -subj "/CN=example.com" > /etc/letsencrypt/example.com/domain.csr',
'python /usr/src/acme-tiny.py --account-key /etc/letsencrypt/example.com/account.key --csr /etc/letsencrypt/example.com/domain.csr --acme-dir /var/www/lets-encrypt-challenges > /etc/letsencrypt/example.com/signed.crt',
'cat /etc/letsencrypt/example.com/signed.crt /etc/letsencrypt/example.com/intermediate.pem > /etc/letsencrypt/example.com/chained.pem'
)
for expected_line in expected:
assert expected_line in out
|
|
beb7d6ab2472c173528002be9f7190346884ce56
|
radar/radar/models/fetal_anomaly_scans.py
|
radar/radar/models/fetal_anomaly_scans.py
|
from sqlalchemy import Column, Integer, Date, Index, Boolean, String
from radar.database import db
from radar.models.common import MetaModelMixin, uuid_pk_column, patient_id_column, patient_relationship
class FetalAnomalyScan(db.Model, MetaModelMixin):
__tablename__ = 'fetal_anomaly_scans'
id = uuid_pk_column()
patient_id = patient_id_column()
patient = patient_relationship('fetal_anomaly_scans')
dateOfScan = Column(Date, nullable=False)
gestationDays = Column(Integer, nullable=False)
oligohydramnios = Column(Boolean)
right_anomaly_details = Column(String)
right_ultrasound_details = Column(String)
left_anomaly_details = Column(String)
left_ultrasound_details = Column(String)
Index('fetal_anomaly_scans_patient_id_idx', FetalAnomalyScan.patient_id)
|
Add fetal anomaly scan model
|
Add fetal anomaly scan model
|
Python
|
agpl-3.0
|
renalreg/radar,renalreg/radar,renalreg/radar,renalreg/radar
|
Add fetal anomaly scan model
|
from sqlalchemy import Column, Integer, Date, Index, Boolean, String
from radar.database import db
from radar.models.common import MetaModelMixin, uuid_pk_column, patient_id_column, patient_relationship
class FetalAnomalyScan(db.Model, MetaModelMixin):
__tablename__ = 'fetal_anomaly_scans'
id = uuid_pk_column()
patient_id = patient_id_column()
patient = patient_relationship('fetal_anomaly_scans')
dateOfScan = Column(Date, nullable=False)
gestationDays = Column(Integer, nullable=False)
oligohydramnios = Column(Boolean)
right_anomaly_details = Column(String)
right_ultrasound_details = Column(String)
left_anomaly_details = Column(String)
left_ultrasound_details = Column(String)
Index('fetal_anomaly_scans_patient_id_idx', FetalAnomalyScan.patient_id)
|
<commit_before><commit_msg>Add fetal anomaly scan model<commit_after>
|
from sqlalchemy import Column, Integer, Date, Index, Boolean, String
from radar.database import db
from radar.models.common import MetaModelMixin, uuid_pk_column, patient_id_column, patient_relationship
class FetalAnomalyScan(db.Model, MetaModelMixin):
__tablename__ = 'fetal_anomaly_scans'
id = uuid_pk_column()
patient_id = patient_id_column()
patient = patient_relationship('fetal_anomaly_scans')
dateOfScan = Column(Date, nullable=False)
gestationDays = Column(Integer, nullable=False)
oligohydramnios = Column(Boolean)
right_anomaly_details = Column(String)
right_ultrasound_details = Column(String)
left_anomaly_details = Column(String)
left_ultrasound_details = Column(String)
Index('fetal_anomaly_scans_patient_id_idx', FetalAnomalyScan.patient_id)
|
Add fetal anomaly scan modelfrom sqlalchemy import Column, Integer, Date, Index, Boolean, String
from radar.database import db
from radar.models.common import MetaModelMixin, uuid_pk_column, patient_id_column, patient_relationship
class FetalAnomalyScan(db.Model, MetaModelMixin):
__tablename__ = 'fetal_anomaly_scans'
id = uuid_pk_column()
patient_id = patient_id_column()
patient = patient_relationship('fetal_anomaly_scans')
dateOfScan = Column(Date, nullable=False)
gestationDays = Column(Integer, nullable=False)
oligohydramnios = Column(Boolean)
right_anomaly_details = Column(String)
right_ultrasound_details = Column(String)
left_anomaly_details = Column(String)
left_ultrasound_details = Column(String)
Index('fetal_anomaly_scans_patient_id_idx', FetalAnomalyScan.patient_id)
|
<commit_before><commit_msg>Add fetal anomaly scan model<commit_after>from sqlalchemy import Column, Integer, Date, Index, Boolean, String
from radar.database import db
from radar.models.common import MetaModelMixin, uuid_pk_column, patient_id_column, patient_relationship
class FetalAnomalyScan(db.Model, MetaModelMixin):
__tablename__ = 'fetal_anomaly_scans'
id = uuid_pk_column()
patient_id = patient_id_column()
patient = patient_relationship('fetal_anomaly_scans')
dateOfScan = Column(Date, nullable=False)
gestationDays = Column(Integer, nullable=False)
oligohydramnios = Column(Boolean)
right_anomaly_details = Column(String)
right_ultrasound_details = Column(String)
left_anomaly_details = Column(String)
left_ultrasound_details = Column(String)
Index('fetal_anomaly_scans_patient_id_idx', FetalAnomalyScan.patient_id)
|
|
9131040b8115bffc3e852c3507d128a2060f463d
|
astropy_helpers/tests/test_ah_bootstrap.py
|
astropy_helpers/tests/test_ah_bootstrap.py
|
import os
from setuptools.sandbox import run_setup
from . import run_cmd
TEST_SETUP_PY = """\
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
for k in list(sys.modules):
if k == 'astropy_helpers' or k.startswith('astropy_helpers.'):
del sys.modules[k]
import ah_bootstrap
ah_bootstrap.use_astropy_helpers({args})
import astropy_helpers
print(os.path.abspath(astropy_helpers.__file__))
"""
def test_bootstrap_from_submodule(tmpdir, capsys):
"""
Tests importing astropy_helpers from a submodule in a git repository.
This tests actually performing a fresh clone of the repository without
the submodule initialized, and that importing astropy_helpers in that
context works transparently after calling
`ah_boostrap.use_astropy_helpers`.
"""
orig_repo = tmpdir.mkdir('orig')
old_cwd = os.getcwd()
# Ensure ah_bootstrap is imported from the local directory
import ah_bootstrap
try:
os.chdir(str(orig_repo))
run_cmd('git', ['init'])
# Write a test setup.py that uses ah_bootstrap; it also ensures that
# any previous reference to astropy_helpers is first wiped from
# sys.modules
orig_repo.join('setup.py').write(TEST_SETUP_PY.format(args=''))
run_cmd('git', ['add', 'setup.py'])
# Add our own clone of the astropy_helpers repo as a submodule named
# astropy_helpers
run_cmd('git', ['submodule', 'add', os.path.abspath(old_cwd),
'astropy_helpers'])
run_cmd('git', ['commit', '-m', 'test repository'])
os.chdir(str(tmpdir))
# Creates a clone of our test repo in the directory 'clone'
run_cmd('git', ['clone', 'orig', 'clone'])
os.chdir('clone')
run_setup('setup.py', [])
stdout, stderr = capsys.readouterr()
path = stdout.strip()
# Ensure that the astropy_helpers used by the setup.py is the one that
# was imported from git submodule
assert path == str(tmpdir.join('clone', 'astropy_helpers',
'astropy_helpers', '__init__.py'))
finally:
os.chdir(old_cwd)
|
Add the first test that actually tests ah_bootstrap.use_astropy_helpers directly. Will be adding more tests soon following the same general pattern.
|
Add the first test that actually tests ah_bootstrap.use_astropy_helpers directly. Will be adding more tests soon following the same general pattern.
|
Python
|
bsd-3-clause
|
bsipocz/astropy-helpers,larrybradley/astropy-helpers,bsipocz/astropy-helpers,larrybradley/astropy-helpers,dpshelio/astropy-helpers,embray/astropy_helpers,Cadair/astropy-helpers,embray/astropy_helpers,Cadair/astropy-helpers,dpshelio/astropy-helpers,astropy/astropy-helpers,bsipocz/astropy-helpers,embray/astropy_helpers,embray/astropy_helpers,larrybradley/astropy-helpers,astropy/astropy-helpers
|
Add the first test that actually tests ah_bootstrap.use_astropy_helpers directly. Will be adding more tests soon following the same general pattern.
|
import os
from setuptools.sandbox import run_setup
from . import run_cmd
TEST_SETUP_PY = """\
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
for k in list(sys.modules):
if k == 'astropy_helpers' or k.startswith('astropy_helpers.'):
del sys.modules[k]
import ah_bootstrap
ah_bootstrap.use_astropy_helpers({args})
import astropy_helpers
print(os.path.abspath(astropy_helpers.__file__))
"""
def test_bootstrap_from_submodule(tmpdir, capsys):
"""
Tests importing astropy_helpers from a submodule in a git repository.
This tests actually performing a fresh clone of the repository without
the submodule initialized, and that importing astropy_helpers in that
context works transparently after calling
`ah_boostrap.use_astropy_helpers`.
"""
orig_repo = tmpdir.mkdir('orig')
old_cwd = os.getcwd()
# Ensure ah_bootstrap is imported from the local directory
import ah_bootstrap
try:
os.chdir(str(orig_repo))
run_cmd('git', ['init'])
# Write a test setup.py that uses ah_bootstrap; it also ensures that
# any previous reference to astropy_helpers is first wiped from
# sys.modules
orig_repo.join('setup.py').write(TEST_SETUP_PY.format(args=''))
run_cmd('git', ['add', 'setup.py'])
# Add our own clone of the astropy_helpers repo as a submodule named
# astropy_helpers
run_cmd('git', ['submodule', 'add', os.path.abspath(old_cwd),
'astropy_helpers'])
run_cmd('git', ['commit', '-m', 'test repository'])
os.chdir(str(tmpdir))
# Creates a clone of our test repo in the directory 'clone'
run_cmd('git', ['clone', 'orig', 'clone'])
os.chdir('clone')
run_setup('setup.py', [])
stdout, stderr = capsys.readouterr()
path = stdout.strip()
# Ensure that the astropy_helpers used by the setup.py is the one that
# was imported from git submodule
assert path == str(tmpdir.join('clone', 'astropy_helpers',
'astropy_helpers', '__init__.py'))
finally:
os.chdir(old_cwd)
|
<commit_before><commit_msg>Add the first test that actually tests ah_bootstrap.use_astropy_helpers directly. Will be adding more tests soon following the same general pattern.<commit_after>
|
import os
from setuptools.sandbox import run_setup
from . import run_cmd
TEST_SETUP_PY = """\
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
for k in list(sys.modules):
if k == 'astropy_helpers' or k.startswith('astropy_helpers.'):
del sys.modules[k]
import ah_bootstrap
ah_bootstrap.use_astropy_helpers({args})
import astropy_helpers
print(os.path.abspath(astropy_helpers.__file__))
"""
def test_bootstrap_from_submodule(tmpdir, capsys):
"""
Tests importing astropy_helpers from a submodule in a git repository.
This tests actually performing a fresh clone of the repository without
the submodule initialized, and that importing astropy_helpers in that
context works transparently after calling
`ah_boostrap.use_astropy_helpers`.
"""
orig_repo = tmpdir.mkdir('orig')
old_cwd = os.getcwd()
# Ensure ah_bootstrap is imported from the local directory
import ah_bootstrap
try:
os.chdir(str(orig_repo))
run_cmd('git', ['init'])
# Write a test setup.py that uses ah_bootstrap; it also ensures that
# any previous reference to astropy_helpers is first wiped from
# sys.modules
orig_repo.join('setup.py').write(TEST_SETUP_PY.format(args=''))
run_cmd('git', ['add', 'setup.py'])
# Add our own clone of the astropy_helpers repo as a submodule named
# astropy_helpers
run_cmd('git', ['submodule', 'add', os.path.abspath(old_cwd),
'astropy_helpers'])
run_cmd('git', ['commit', '-m', 'test repository'])
os.chdir(str(tmpdir))
# Creates a clone of our test repo in the directory 'clone'
run_cmd('git', ['clone', 'orig', 'clone'])
os.chdir('clone')
run_setup('setup.py', [])
stdout, stderr = capsys.readouterr()
path = stdout.strip()
# Ensure that the astropy_helpers used by the setup.py is the one that
# was imported from git submodule
assert path == str(tmpdir.join('clone', 'astropy_helpers',
'astropy_helpers', '__init__.py'))
finally:
os.chdir(old_cwd)
|
Add the first test that actually tests ah_bootstrap.use_astropy_helpers directly. Will be adding more tests soon following the same general pattern.import os
from setuptools.sandbox import run_setup
from . import run_cmd
TEST_SETUP_PY = """\
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
for k in list(sys.modules):
if k == 'astropy_helpers' or k.startswith('astropy_helpers.'):
del sys.modules[k]
import ah_bootstrap
ah_bootstrap.use_astropy_helpers({args})
import astropy_helpers
print(os.path.abspath(astropy_helpers.__file__))
"""
def test_bootstrap_from_submodule(tmpdir, capsys):
"""
Tests importing astropy_helpers from a submodule in a git repository.
This tests actually performing a fresh clone of the repository without
the submodule initialized, and that importing astropy_helpers in that
context works transparently after calling
`ah_boostrap.use_astropy_helpers`.
"""
orig_repo = tmpdir.mkdir('orig')
old_cwd = os.getcwd()
# Ensure ah_bootstrap is imported from the local directory
import ah_bootstrap
try:
os.chdir(str(orig_repo))
run_cmd('git', ['init'])
# Write a test setup.py that uses ah_bootstrap; it also ensures that
# any previous reference to astropy_helpers is first wiped from
# sys.modules
orig_repo.join('setup.py').write(TEST_SETUP_PY.format(args=''))
run_cmd('git', ['add', 'setup.py'])
# Add our own clone of the astropy_helpers repo as a submodule named
# astropy_helpers
run_cmd('git', ['submodule', 'add', os.path.abspath(old_cwd),
'astropy_helpers'])
run_cmd('git', ['commit', '-m', 'test repository'])
os.chdir(str(tmpdir))
# Creates a clone of our test repo in the directory 'clone'
run_cmd('git', ['clone', 'orig', 'clone'])
os.chdir('clone')
run_setup('setup.py', [])
stdout, stderr = capsys.readouterr()
path = stdout.strip()
# Ensure that the astropy_helpers used by the setup.py is the one that
# was imported from git submodule
assert path == str(tmpdir.join('clone', 'astropy_helpers',
'astropy_helpers', '__init__.py'))
finally:
os.chdir(old_cwd)
|
<commit_before><commit_msg>Add the first test that actually tests ah_bootstrap.use_astropy_helpers directly. Will be adding more tests soon following the same general pattern.<commit_after>import os
from setuptools.sandbox import run_setup
from . import run_cmd
TEST_SETUP_PY = """\
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
for k in list(sys.modules):
if k == 'astropy_helpers' or k.startswith('astropy_helpers.'):
del sys.modules[k]
import ah_bootstrap
ah_bootstrap.use_astropy_helpers({args})
import astropy_helpers
print(os.path.abspath(astropy_helpers.__file__))
"""
def test_bootstrap_from_submodule(tmpdir, capsys):
"""
Tests importing astropy_helpers from a submodule in a git repository.
This tests actually performing a fresh clone of the repository without
the submodule initialized, and that importing astropy_helpers in that
context works transparently after calling
`ah_boostrap.use_astropy_helpers`.
"""
orig_repo = tmpdir.mkdir('orig')
old_cwd = os.getcwd()
# Ensure ah_bootstrap is imported from the local directory
import ah_bootstrap
try:
os.chdir(str(orig_repo))
run_cmd('git', ['init'])
# Write a test setup.py that uses ah_bootstrap; it also ensures that
# any previous reference to astropy_helpers is first wiped from
# sys.modules
orig_repo.join('setup.py').write(TEST_SETUP_PY.format(args=''))
run_cmd('git', ['add', 'setup.py'])
# Add our own clone of the astropy_helpers repo as a submodule named
# astropy_helpers
run_cmd('git', ['submodule', 'add', os.path.abspath(old_cwd),
'astropy_helpers'])
run_cmd('git', ['commit', '-m', 'test repository'])
os.chdir(str(tmpdir))
# Creates a clone of our test repo in the directory 'clone'
run_cmd('git', ['clone', 'orig', 'clone'])
os.chdir('clone')
run_setup('setup.py', [])
stdout, stderr = capsys.readouterr()
path = stdout.strip()
# Ensure that the astropy_helpers used by the setup.py is the one that
# was imported from git submodule
assert path == str(tmpdir.join('clone', 'astropy_helpers',
'astropy_helpers', '__init__.py'))
finally:
os.chdir(old_cwd)
|
|
7be3c1c7d1a881683ff8f0a27b6d6dae0a093b20
|
main.py
|
main.py
|
import sys
if __name__ == '__main__':
print 'Hello world'
print 'Your current version is', sys.version
print 'Your current version should be 2.7.6'
print "Let's get started, shall we?"
|
Revert "Revert "Added version printing""
|
Revert "Revert "Added version printing""
This reverts commit 7f392958bc8d4327e0e9d6dc169eee26ea80c279.
|
Python
|
bsd-3-clause
|
rkawauchi/IHK,rkawauchi/IHK
|
Revert "Revert "Added version printing""
This reverts commit 7f392958bc8d4327e0e9d6dc169eee26ea80c279.
|
import sys
if __name__ == '__main__':
print 'Hello world'
print 'Your current version is', sys.version
print 'Your current version should be 2.7.6'
print "Let's get started, shall we?"
|
<commit_before><commit_msg>Revert "Revert "Added version printing""
This reverts commit 7f392958bc8d4327e0e9d6dc169eee26ea80c279.<commit_after>
|
import sys
if __name__ == '__main__':
print 'Hello world'
print 'Your current version is', sys.version
print 'Your current version should be 2.7.6'
print "Let's get started, shall we?"
|
Revert "Revert "Added version printing""
This reverts commit 7f392958bc8d4327e0e9d6dc169eee26ea80c279.import sys
if __name__ == '__main__':
print 'Hello world'
print 'Your current version is', sys.version
print 'Your current version should be 2.7.6'
print "Let's get started, shall we?"
|
<commit_before><commit_msg>Revert "Revert "Added version printing""
This reverts commit 7f392958bc8d4327e0e9d6dc169eee26ea80c279.<commit_after>import sys
if __name__ == '__main__':
print 'Hello world'
print 'Your current version is', sys.version
print 'Your current version should be 2.7.6'
print "Let's get started, shall we?"
|
|
920e20ea689e62e86f2adbf905db168063be00db
|
main.py
|
main.py
|
import urllib
import yaml
from datetime import datetime
from icalendar import Calendar
# Load configuration data
with open('config.yaml', 'r') as f:
config = yaml.load(f)
calendar = config['calendar']
topic_format = config['irc']['topic']
date_format = config['date_format']
def next_event(ical_url):
raw_ics = urllib.urlopen(ical_url).read()
gcal = Calendar.from_ical(raw_ics)
events = []
for component in gcal.walk():
if component.name != 'VEVENT':
continue
events.append(component);
def compare(a, b):
return cmp(a.get('dtstart').dt, b.get('dtstart').dt);
sorted_events = sorted(events, compare)
return sorted_events[0]
def chatroom_topic(event):
start = event.get('dtstart').dt
topic = topic_format.format(
datetime=start.strftime(date_format),
location=event.get('location')
)
return topic
print chatroom_topic(next_event(calendar))
|
Create a script for printing the chatroom topic with the next event.
|
Create a script for printing the chatroom topic with the next event.
|
Python
|
mit
|
thisgeek/topiCal
|
Create a script for printing the chatroom topic with the next event.
|
import urllib
import yaml
from datetime import datetime
from icalendar import Calendar
# Load configuration data
with open('config.yaml', 'r') as f:
config = yaml.load(f)
calendar = config['calendar']
topic_format = config['irc']['topic']
date_format = config['date_format']
def next_event(ical_url):
raw_ics = urllib.urlopen(ical_url).read()
gcal = Calendar.from_ical(raw_ics)
events = []
for component in gcal.walk():
if component.name != 'VEVENT':
continue
events.append(component);
def compare(a, b):
return cmp(a.get('dtstart').dt, b.get('dtstart').dt);
sorted_events = sorted(events, compare)
return sorted_events[0]
def chatroom_topic(event):
start = event.get('dtstart').dt
topic = topic_format.format(
datetime=start.strftime(date_format),
location=event.get('location')
)
return topic
print chatroom_topic(next_event(calendar))
|
<commit_before><commit_msg>Create a script for printing the chatroom topic with the next event.<commit_after>
|
import urllib
import yaml
from datetime import datetime
from icalendar import Calendar
# Load configuration data
with open('config.yaml', 'r') as f:
config = yaml.load(f)
calendar = config['calendar']
topic_format = config['irc']['topic']
date_format = config['date_format']
def next_event(ical_url):
raw_ics = urllib.urlopen(ical_url).read()
gcal = Calendar.from_ical(raw_ics)
events = []
for component in gcal.walk():
if component.name != 'VEVENT':
continue
events.append(component);
def compare(a, b):
return cmp(a.get('dtstart').dt, b.get('dtstart').dt);
sorted_events = sorted(events, compare)
return sorted_events[0]
def chatroom_topic(event):
start = event.get('dtstart').dt
topic = topic_format.format(
datetime=start.strftime(date_format),
location=event.get('location')
)
return topic
print chatroom_topic(next_event(calendar))
|
Create a script for printing the chatroom topic with the next event.import urllib
import yaml
from datetime import datetime
from icalendar import Calendar
# Load configuration data
with open('config.yaml', 'r') as f:
config = yaml.load(f)
calendar = config['calendar']
topic_format = config['irc']['topic']
date_format = config['date_format']
def next_event(ical_url):
raw_ics = urllib.urlopen(ical_url).read()
gcal = Calendar.from_ical(raw_ics)
events = []
for component in gcal.walk():
if component.name != 'VEVENT':
continue
events.append(component);
def compare(a, b):
return cmp(a.get('dtstart').dt, b.get('dtstart').dt);
sorted_events = sorted(events, compare)
return sorted_events[0]
def chatroom_topic(event):
start = event.get('dtstart').dt
topic = topic_format.format(
datetime=start.strftime(date_format),
location=event.get('location')
)
return topic
print chatroom_topic(next_event(calendar))
|
<commit_before><commit_msg>Create a script for printing the chatroom topic with the next event.<commit_after>import urllib
import yaml
from datetime import datetime
from icalendar import Calendar
# Load configuration data
with open('config.yaml', 'r') as f:
config = yaml.load(f)
calendar = config['calendar']
topic_format = config['irc']['topic']
date_format = config['date_format']
def next_event(ical_url):
raw_ics = urllib.urlopen(ical_url).read()
gcal = Calendar.from_ical(raw_ics)
events = []
for component in gcal.walk():
if component.name != 'VEVENT':
continue
events.append(component);
def compare(a, b):
return cmp(a.get('dtstart').dt, b.get('dtstart').dt);
sorted_events = sorted(events, compare)
return sorted_events[0]
def chatroom_topic(event):
start = event.get('dtstart').dt
topic = topic_format.format(
datetime=start.strftime(date_format),
location=event.get('location')
)
return topic
print chatroom_topic(next_event(calendar))
|
|
18a166e0831cccd0a08f859a3533ed01d810c4ee
|
binarycalcs.py
|
binarycalcs.py
|
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.constants import G, M_sun, au
from astropy.units.core import UnitConversionError
def keplerian_binary(givenquant):
'''Return equivalency for Keplerian binary orbit.
Parameters
----------
givenquant : `~astropy.units.Quantity`
`astropy.units.Quantity` associated with the parameter of the orbit
that is fixed for this conversion (e.g. to convert between period and
semimajor axis, this should be a mass quanitity).
'''
# Finding a pythonic way to to cycle through the three potential choices
# for givenquant has been difficult. This seems to follow the rule of EAFP
# best. First I will assume that givenquant is a mass, then a semimajor
# axis, then a period.
try:
fixedmass = givenquant.to(u.solMass)
except UnitConversionError:
try:
fixedsemimajor = givenquant.to(u.AU)
except UnitConversionError:
try:
fixedperiod = givenquant.to(u.year).value
except UnitConversionError:
# If it's neither a mass, length, or year, then the wrong
# quantity was given.
raise ValueError(
"The fixed quantity must be either a mass, time interval, "
"or length.")
else:
# givenquant is a time
fromunit = u.solMass
tounit = u.AU
def fromfunction(M):
return (M * fixedperiod**2)**(1/3)
def tofunction(a):
return a**3 / fixedperiod**2
else:
# givenquant is a length
fromunit = u.solMass
tounit = u.year
def fromfunction(M):
return (fixedsemimajor**3 / M)**(1/2)
def tofunction(P):
return fixedsemimajor**3 / P**2
else:
# givenquant is a mass
fromunit = u.year
tounit = u.AU
def fromfunction(P):
return (P**2 * fixedmass)**(1/3)
def tofunction(a):
return (a**3 / fixedmass)**(1/2)
equiv = [
(fromunit, tounit, fromfunction, tofunction)]
return equiv
def calc_velocity_of_binary(masses, period, mass_ratio):
'''Returns the orbital velocity of a binary specified by mass and period.
The masses should be the total mass of the system and the period should be
the orbital period of the system.
'''
vel = ((2 * np.pi * G * masses / period)**(1/3) * mass_ratio /
(1 + mass_ratio))
try:
return vel.to(u.km/u.s)
except u.UnitConversionError as e:
raise TypeError("Arguments should be Astropy Quantities with "
"appropriate units")
|
Convert between period, semimajor axis, and total mass for Keplerian orbit.
|
Convert between period, semimajor axis, and total mass for Keplerian orbit.
For cases where a quick and easy conversion between period and semimajor axis
is needed for some sort of binary system, this function will be able to do the
conversion relatively quickly by taking one aspect to be fixed, and doing the
rest of the conversions through the Astropy Quantitity framework.
What needs to be added now is testing guidelines.
|
Python
|
bsd-3-clause
|
cactaur/astropy-utils
|
Convert between period, semimajor axis, and total mass for Keplerian orbit.
For cases where a quick and easy conversion between period and semimajor axis
is needed for some sort of binary system, this function will be able to do the
conversion relatively quickly by taking one aspect to be fixed, and doing the
rest of the conversions through the Astropy Quantitity framework.
What needs to be added now is testing guidelines.
|
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.constants import G, M_sun, au
from astropy.units.core import UnitConversionError
def keplerian_binary(givenquant):
'''Return equivalency for Keplerian binary orbit.
Parameters
----------
givenquant : `~astropy.units.Quantity`
`astropy.units.Quantity` associated with the parameter of the orbit
that is fixed for this conversion (e.g. to convert between period and
semimajor axis, this should be a mass quanitity).
'''
# Finding a pythonic way to to cycle through the three potential choices
# for givenquant has been difficult. This seems to follow the rule of EAFP
# best. First I will assume that givenquant is a mass, then a semimajor
# axis, then a period.
try:
fixedmass = givenquant.to(u.solMass)
except UnitConversionError:
try:
fixedsemimajor = givenquant.to(u.AU)
except UnitConversionError:
try:
fixedperiod = givenquant.to(u.year).value
except UnitConversionError:
# If it's neither a mass, length, or year, then the wrong
# quantity was given.
raise ValueError(
"The fixed quantity must be either a mass, time interval, "
"or length.")
else:
# givenquant is a time
fromunit = u.solMass
tounit = u.AU
def fromfunction(M):
return (M * fixedperiod**2)**(1/3)
def tofunction(a):
return a**3 / fixedperiod**2
else:
# givenquant is a length
fromunit = u.solMass
tounit = u.year
def fromfunction(M):
return (fixedsemimajor**3 / M)**(1/2)
def tofunction(P):
return fixedsemimajor**3 / P**2
else:
# givenquant is a mass
fromunit = u.year
tounit = u.AU
def fromfunction(P):
return (P**2 * fixedmass)**(1/3)
def tofunction(a):
return (a**3 / fixedmass)**(1/2)
equiv = [
(fromunit, tounit, fromfunction, tofunction)]
return equiv
def calc_velocity_of_binary(masses, period, mass_ratio):
'''Returns the orbital velocity of a binary specified by mass and period.
The masses should be the total mass of the system and the period should be
the orbital period of the system.
'''
vel = ((2 * np.pi * G * masses / period)**(1/3) * mass_ratio /
(1 + mass_ratio))
try:
return vel.to(u.km/u.s)
except u.UnitConversionError as e:
raise TypeError("Arguments should be Astropy Quantities with "
"appropriate units")
|
<commit_before><commit_msg>Convert between period, semimajor axis, and total mass for Keplerian orbit.
For cases where a quick and easy conversion between period and semimajor axis
is needed for some sort of binary system, this function will be able to do the
conversion relatively quickly by taking one aspect to be fixed, and doing the
rest of the conversions through the Astropy Quantitity framework.
What needs to be added now is testing guidelines.<commit_after>
|
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.constants import G, M_sun, au
from astropy.units.core import UnitConversionError
def keplerian_binary(givenquant):
'''Return equivalency for Keplerian binary orbit.
Parameters
----------
givenquant : `~astropy.units.Quantity`
`astropy.units.Quantity` associated with the parameter of the orbit
that is fixed for this conversion (e.g. to convert between period and
semimajor axis, this should be a mass quanitity).
'''
# Finding a pythonic way to to cycle through the three potential choices
# for givenquant has been difficult. This seems to follow the rule of EAFP
# best. First I will assume that givenquant is a mass, then a semimajor
# axis, then a period.
try:
fixedmass = givenquant.to(u.solMass)
except UnitConversionError:
try:
fixedsemimajor = givenquant.to(u.AU)
except UnitConversionError:
try:
fixedperiod = givenquant.to(u.year).value
except UnitConversionError:
# If it's neither a mass, length, or year, then the wrong
# quantity was given.
raise ValueError(
"The fixed quantity must be either a mass, time interval, "
"or length.")
else:
# givenquant is a time
fromunit = u.solMass
tounit = u.AU
def fromfunction(M):
return (M * fixedperiod**2)**(1/3)
def tofunction(a):
return a**3 / fixedperiod**2
else:
# givenquant is a length
fromunit = u.solMass
tounit = u.year
def fromfunction(M):
return (fixedsemimajor**3 / M)**(1/2)
def tofunction(P):
return fixedsemimajor**3 / P**2
else:
# givenquant is a mass
fromunit = u.year
tounit = u.AU
def fromfunction(P):
return (P**2 * fixedmass)**(1/3)
def tofunction(a):
return (a**3 / fixedmass)**(1/2)
equiv = [
(fromunit, tounit, fromfunction, tofunction)]
return equiv
def calc_velocity_of_binary(masses, period, mass_ratio):
'''Returns the orbital velocity of a binary specified by mass and period.
The masses should be the total mass of the system and the period should be
the orbital period of the system.
'''
vel = ((2 * np.pi * G * masses / period)**(1/3) * mass_ratio /
(1 + mass_ratio))
try:
return vel.to(u.km/u.s)
except u.UnitConversionError as e:
raise TypeError("Arguments should be Astropy Quantities with "
"appropriate units")
|
Convert between period, semimajor axis, and total mass for Keplerian orbit.
For cases where a quick and easy conversion between period and semimajor axis
is needed for some sort of binary system, this function will be able to do the
conversion relatively quickly by taking one aspect to be fixed, and doing the
rest of the conversions through the Astropy Quantitity framework.
What needs to be added now is testing guidelines.import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.constants import G, M_sun, au
from astropy.units.core import UnitConversionError
def keplerian_binary(givenquant):
'''Return equivalency for Keplerian binary orbit.
Parameters
----------
givenquant : `~astropy.units.Quantity`
`astropy.units.Quantity` associated with the parameter of the orbit
that is fixed for this conversion (e.g. to convert between period and
semimajor axis, this should be a mass quanitity).
'''
# Finding a pythonic way to to cycle through the three potential choices
# for givenquant has been difficult. This seems to follow the rule of EAFP
# best. First I will assume that givenquant is a mass, then a semimajor
# axis, then a period.
try:
fixedmass = givenquant.to(u.solMass)
except UnitConversionError:
try:
fixedsemimajor = givenquant.to(u.AU)
except UnitConversionError:
try:
fixedperiod = givenquant.to(u.year).value
except UnitConversionError:
# If it's neither a mass, length, or year, then the wrong
# quantity was given.
raise ValueError(
"The fixed quantity must be either a mass, time interval, "
"or length.")
else:
# givenquant is a time
fromunit = u.solMass
tounit = u.AU
def fromfunction(M):
return (M * fixedperiod**2)**(1/3)
def tofunction(a):
return a**3 / fixedperiod**2
else:
# givenquant is a length
fromunit = u.solMass
tounit = u.year
def fromfunction(M):
return (fixedsemimajor**3 / M)**(1/2)
def tofunction(P):
return fixedsemimajor**3 / P**2
else:
# givenquant is a mass
fromunit = u.year
tounit = u.AU
def fromfunction(P):
return (P**2 * fixedmass)**(1/3)
def tofunction(a):
return (a**3 / fixedmass)**(1/2)
equiv = [
(fromunit, tounit, fromfunction, tofunction)]
return equiv
def calc_velocity_of_binary(masses, period, mass_ratio):
'''Returns the orbital velocity of a binary specified by mass and period.
The masses should be the total mass of the system and the period should be
the orbital period of the system.
'''
vel = ((2 * np.pi * G * masses / period)**(1/3) * mass_ratio /
(1 + mass_ratio))
try:
return vel.to(u.km/u.s)
except u.UnitConversionError as e:
raise TypeError("Arguments should be Astropy Quantities with "
"appropriate units")
|
<commit_before><commit_msg>Convert between period, semimajor axis, and total mass for Keplerian orbit.
For cases where a quick and easy conversion between period and semimajor axis
is needed for some sort of binary system, this function will be able to do the
conversion relatively quickly by taking one aspect to be fixed, and doing the
rest of the conversions through the Astropy Quantitity framework.
What needs to be added now is testing guidelines.<commit_after>import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.constants import G, M_sun, au
from astropy.units.core import UnitConversionError
def keplerian_binary(givenquant):
'''Return equivalency for Keplerian binary orbit.
Parameters
----------
givenquant : `~astropy.units.Quantity`
`astropy.units.Quantity` associated with the parameter of the orbit
that is fixed for this conversion (e.g. to convert between period and
semimajor axis, this should be a mass quanitity).
'''
# Finding a pythonic way to to cycle through the three potential choices
# for givenquant has been difficult. This seems to follow the rule of EAFP
# best. First I will assume that givenquant is a mass, then a semimajor
# axis, then a period.
try:
fixedmass = givenquant.to(u.solMass)
except UnitConversionError:
try:
fixedsemimajor = givenquant.to(u.AU)
except UnitConversionError:
try:
fixedperiod = givenquant.to(u.year).value
except UnitConversionError:
# If it's neither a mass, length, or year, then the wrong
# quantity was given.
raise ValueError(
"The fixed quantity must be either a mass, time interval, "
"or length.")
else:
# givenquant is a time
fromunit = u.solMass
tounit = u.AU
def fromfunction(M):
return (M * fixedperiod**2)**(1/3)
def tofunction(a):
return a**3 / fixedperiod**2
else:
# givenquant is a length
fromunit = u.solMass
tounit = u.year
def fromfunction(M):
return (fixedsemimajor**3 / M)**(1/2)
def tofunction(P):
return fixedsemimajor**3 / P**2
else:
# givenquant is a mass
fromunit = u.year
tounit = u.AU
def fromfunction(P):
return (P**2 * fixedmass)**(1/3)
def tofunction(a):
return (a**3 / fixedmass)**(1/2)
equiv = [
(fromunit, tounit, fromfunction, tofunction)]
return equiv
def calc_velocity_of_binary(masses, period, mass_ratio):
'''Returns the orbital velocity of a binary specified by mass and period.
The masses should be the total mass of the system and the period should be
the orbital period of the system.
'''
vel = ((2 * np.pi * G * masses / period)**(1/3) * mass_ratio /
(1 + mass_ratio))
try:
return vel.to(u.km/u.s)
except u.UnitConversionError as e:
raise TypeError("Arguments should be Astropy Quantities with "
"appropriate units")
|
|
960b95dc666753f59eff1b449124490fbec10184
|
kivy/_version.py
|
kivy/_version.py
|
# This file is imported from __init__.py and exec'd from setup.py
MAJOR = 2
MINOR = 1
MICRO = 0
RELEASE = False
__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
if not RELEASE:
# if it's a rcx release, it's not proceeded by a period. If it is a
# devx release, it must start with a period
__version__ += 'rc1'
_kivy_git_hash = ''
_kivy_build_date = ''
|
# This file is imported from __init__.py and exec'd from setup.py
MAJOR = 2
MINOR = 1
MICRO = 0
RELEASE = False
__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
if not RELEASE:
# if it's a rcx release, it's not proceeded by a period. If it is a
# devx release, it must start with a period
__version__ += '.dev0'
_kivy_git_hash = ''
_kivy_build_date = ''
|
Revert to 2.1.0.dev0 for test release.
|
Revert to 2.1.0.dev0 for test release.
|
Python
|
mit
|
akshayaurora/kivy,kivy/kivy,kivy/kivy,akshayaurora/kivy,kivy/kivy,akshayaurora/kivy
|
# This file is imported from __init__.py and exec'd from setup.py
MAJOR = 2
MINOR = 1
MICRO = 0
RELEASE = False
__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
if not RELEASE:
# if it's a rcx release, it's not proceeded by a period. If it is a
# devx release, it must start with a period
__version__ += 'rc1'
_kivy_git_hash = ''
_kivy_build_date = ''
Revert to 2.1.0.dev0 for test release.
|
# This file is imported from __init__.py and exec'd from setup.py
MAJOR = 2
MINOR = 1
MICRO = 0
RELEASE = False
__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
if not RELEASE:
# if it's a rcx release, it's not proceeded by a period. If it is a
# devx release, it must start with a period
__version__ += '.dev0'
_kivy_git_hash = ''
_kivy_build_date = ''
|
<commit_before># This file is imported from __init__.py and exec'd from setup.py
MAJOR = 2
MINOR = 1
MICRO = 0
RELEASE = False
__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
if not RELEASE:
# if it's a rcx release, it's not proceeded by a period. If it is a
# devx release, it must start with a period
__version__ += 'rc1'
_kivy_git_hash = ''
_kivy_build_date = ''
<commit_msg>Revert to 2.1.0.dev0 for test release.<commit_after>
|
# This file is imported from __init__.py and exec'd from setup.py
MAJOR = 2
MINOR = 1
MICRO = 0
RELEASE = False
__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
if not RELEASE:
# if it's a rcx release, it's not proceeded by a period. If it is a
# devx release, it must start with a period
__version__ += '.dev0'
_kivy_git_hash = ''
_kivy_build_date = ''
|
# This file is imported from __init__.py and exec'd from setup.py
MAJOR = 2
MINOR = 1
MICRO = 0
RELEASE = False
__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
if not RELEASE:
# if it's a rcx release, it's not proceeded by a period. If it is a
# devx release, it must start with a period
__version__ += 'rc1'
_kivy_git_hash = ''
_kivy_build_date = ''
Revert to 2.1.0.dev0 for test release.# This file is imported from __init__.py and exec'd from setup.py
MAJOR = 2
MINOR = 1
MICRO = 0
RELEASE = False
__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
if not RELEASE:
# if it's a rcx release, it's not proceeded by a period. If it is a
# devx release, it must start with a period
__version__ += '.dev0'
_kivy_git_hash = ''
_kivy_build_date = ''
|
<commit_before># This file is imported from __init__.py and exec'd from setup.py
MAJOR = 2
MINOR = 1
MICRO = 0
RELEASE = False
__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
if not RELEASE:
# if it's a rcx release, it's not proceeded by a period. If it is a
# devx release, it must start with a period
__version__ += 'rc1'
_kivy_git_hash = ''
_kivy_build_date = ''
<commit_msg>Revert to 2.1.0.dev0 for test release.<commit_after># This file is imported from __init__.py and exec'd from setup.py
MAJOR = 2
MINOR = 1
MICRO = 0
RELEASE = False
__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
if not RELEASE:
# if it's a rcx release, it's not proceeded by a period. If it is a
# devx release, it must start with a period
__version__ += '.dev0'
_kivy_git_hash = ''
_kivy_build_date = ''
|
47e6c52f0c2bf058c5d099dd2993192e0978e172
|
tests/cpydiff/core_function_moduleattr.py
|
tests/cpydiff/core_function_moduleattr.py
|
"""
categories: Core,Functions
description: Function objects do not have the ``__module__`` attribute
cause: MicroPython is optimized for reduced code size and RAM usage.
workaround: Use ``sys.modules[function.__globals__['__name__']]`` for non-builtin modules.
"""
def f():
pass
print(f.__module__)
|
Add test and workaround for function.__module__ attr.
|
tests/cpydiff: Add test and workaround for function.__module__ attr.
MicroPython does not store any reference from a function object to the
module it was defined in, but there is a way to use function.__globals__ to
indirectly get the module.
See issue #7259.
Signed-off-by: Damien George <99e6b749acbfbe2a596df99e91d24d6e1fdbee00@micropython.org>
|
Python
|
mit
|
bvernoux/micropython,adafruit/circuitpython,bvernoux/micropython,adafruit/circuitpython,adafruit/circuitpython,henriknelson/micropython,henriknelson/micropython,bvernoux/micropython,henriknelson/micropython,bvernoux/micropython,adafruit/circuitpython,henriknelson/micropython,adafruit/circuitpython,bvernoux/micropython,adafruit/circuitpython,henriknelson/micropython
|
tests/cpydiff: Add test and workaround for function.__module__ attr.
MicroPython does not store any reference from a function object to the
module it was defined in, but there is a way to use function.__globals__ to
indirectly get the module.
See issue #7259.
Signed-off-by: Damien George <99e6b749acbfbe2a596df99e91d24d6e1fdbee00@micropython.org>
|
"""
categories: Core,Functions
description: Function objects do not have the ``__module__`` attribute
cause: MicroPython is optimized for reduced code size and RAM usage.
workaround: Use ``sys.modules[function.__globals__['__name__']]`` for non-builtin modules.
"""
def f():
pass
print(f.__module__)
|
<commit_before><commit_msg>tests/cpydiff: Add test and workaround for function.__module__ attr.
MicroPython does not store any reference from a function object to the
module it was defined in, but there is a way to use function.__globals__ to
indirectly get the module.
See issue #7259.
Signed-off-by: Damien George <99e6b749acbfbe2a596df99e91d24d6e1fdbee00@micropython.org><commit_after>
|
"""
categories: Core,Functions
description: Function objects do not have the ``__module__`` attribute
cause: MicroPython is optimized for reduced code size and RAM usage.
workaround: Use ``sys.modules[function.__globals__['__name__']]`` for non-builtin modules.
"""
def f():
pass
print(f.__module__)
|
tests/cpydiff: Add test and workaround for function.__module__ attr.
MicroPython does not store any reference from a function object to the
module it was defined in, but there is a way to use function.__globals__ to
indirectly get the module.
See issue #7259.
Signed-off-by: Damien George <99e6b749acbfbe2a596df99e91d24d6e1fdbee00@micropython.org>"""
categories: Core,Functions
description: Function objects do not have the ``__module__`` attribute
cause: MicroPython is optimized for reduced code size and RAM usage.
workaround: Use ``sys.modules[function.__globals__['__name__']]`` for non-builtin modules.
"""
def f():
pass
print(f.__module__)
|
<commit_before><commit_msg>tests/cpydiff: Add test and workaround for function.__module__ attr.
MicroPython does not store any reference from a function object to the
module it was defined in, but there is a way to use function.__globals__ to
indirectly get the module.
See issue #7259.
Signed-off-by: Damien George <99e6b749acbfbe2a596df99e91d24d6e1fdbee00@micropython.org><commit_after>"""
categories: Core,Functions
description: Function objects do not have the ``__module__`` attribute
cause: MicroPython is optimized for reduced code size and RAM usage.
workaround: Use ``sys.modules[function.__globals__['__name__']]`` for non-builtin modules.
"""
def f():
pass
print(f.__module__)
|
|
753c7cad0fa93a7472f3210705be45f0e3917f6c
|
tests/test_utils.py
|
tests/test_utils.py
|
from cartolafc.util import json_default
from datetime import datetime
def test_json_default():
date = datetime(
year=2019,
month=10,
day=10,
hour=0,
minute=0,
second=0,
microsecond=0,
)
result = json_default(date)
assert isinstance(result, dict)
|
Add test to json default
|
Add test to json default
|
Python
|
mit
|
vicenteneto/python-cartolafc
|
Add test to json default
|
from cartolafc.util import json_default
from datetime import datetime
def test_json_default():
date = datetime(
year=2019,
month=10,
day=10,
hour=0,
minute=0,
second=0,
microsecond=0,
)
result = json_default(date)
assert isinstance(result, dict)
|
<commit_before><commit_msg>Add test to json default<commit_after>
|
from cartolafc.util import json_default
from datetime import datetime
def test_json_default():
date = datetime(
year=2019,
month=10,
day=10,
hour=0,
minute=0,
second=0,
microsecond=0,
)
result = json_default(date)
assert isinstance(result, dict)
|
Add test to json defaultfrom cartolafc.util import json_default
from datetime import datetime
def test_json_default():
date = datetime(
year=2019,
month=10,
day=10,
hour=0,
minute=0,
second=0,
microsecond=0,
)
result = json_default(date)
assert isinstance(result, dict)
|
<commit_before><commit_msg>Add test to json default<commit_after>from cartolafc.util import json_default
from datetime import datetime
def test_json_default():
date = datetime(
year=2019,
month=10,
day=10,
hour=0,
minute=0,
second=0,
microsecond=0,
)
result = json_default(date)
assert isinstance(result, dict)
|
|
24f5c0371ca59c9d7f0d86b6635ea263d456ca2f
|
alembic/versions/534f7c1fb55f_create_address_colum.py
|
alembic/versions/534f7c1fb55f_create_address_colum.py
|
"""Create address column in person table
Revision ID: 534f7c1fb55f
Revises: 13d42d50c79a
Create Date: 2013-05-12 14:33:19.490150
"""
# revision identifiers, used by Alembic.
revision = '534f7c1fb55f'
down_revision = '13d42d50c79a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.add_column('person', sa.Column('address_id', postgresql.ARRAY(sa.String(length=16)), nullable=True))
op.add_column('person', sa.Column('address', postgresql.ARRAY(sa.Unicode(length=20)), nullable=True))
op.drop_column('person', u'addr_city')
op.drop_column('person', u'addr_county')
op.drop_column('person', u'addr_detail')
def downgrade():
op.add_column('person', sa.Column(u'addr_detail', sa.VARCHAR(length=80), nullable=True))
op.add_column('person', sa.Column(u'addr_county', sa.VARCHAR(length=20), nullable=True))
op.add_column('person', sa.Column(u'addr_city', sa.VARCHAR(length=20), nullable=True))
op.drop_column('person', 'address')
op.drop_column('person', 'address_id')
|
Create address column on person table
|
Create address column on person table
|
Python
|
apache-2.0
|
teampopong/pokr.kr,teampopong/pokr.kr,teampopong/pokr.kr,teampopong/pokr.kr
|
Create address column on person table
|
"""Create address column in person table
Revision ID: 534f7c1fb55f
Revises: 13d42d50c79a
Create Date: 2013-05-12 14:33:19.490150
"""
# revision identifiers, used by Alembic.
revision = '534f7c1fb55f'
down_revision = '13d42d50c79a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.add_column('person', sa.Column('address_id', postgresql.ARRAY(sa.String(length=16)), nullable=True))
op.add_column('person', sa.Column('address', postgresql.ARRAY(sa.Unicode(length=20)), nullable=True))
op.drop_column('person', u'addr_city')
op.drop_column('person', u'addr_county')
op.drop_column('person', u'addr_detail')
def downgrade():
op.add_column('person', sa.Column(u'addr_detail', sa.VARCHAR(length=80), nullable=True))
op.add_column('person', sa.Column(u'addr_county', sa.VARCHAR(length=20), nullable=True))
op.add_column('person', sa.Column(u'addr_city', sa.VARCHAR(length=20), nullable=True))
op.drop_column('person', 'address')
op.drop_column('person', 'address_id')
|
<commit_before><commit_msg>Create address column on person table<commit_after>
|
"""Create address column in person table
Revision ID: 534f7c1fb55f
Revises: 13d42d50c79a
Create Date: 2013-05-12 14:33:19.490150
"""
# revision identifiers, used by Alembic.
revision = '534f7c1fb55f'
down_revision = '13d42d50c79a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.add_column('person', sa.Column('address_id', postgresql.ARRAY(sa.String(length=16)), nullable=True))
op.add_column('person', sa.Column('address', postgresql.ARRAY(sa.Unicode(length=20)), nullable=True))
op.drop_column('person', u'addr_city')
op.drop_column('person', u'addr_county')
op.drop_column('person', u'addr_detail')
def downgrade():
op.add_column('person', sa.Column(u'addr_detail', sa.VARCHAR(length=80), nullable=True))
op.add_column('person', sa.Column(u'addr_county', sa.VARCHAR(length=20), nullable=True))
op.add_column('person', sa.Column(u'addr_city', sa.VARCHAR(length=20), nullable=True))
op.drop_column('person', 'address')
op.drop_column('person', 'address_id')
|
Create address column on person table"""Create address column in person table
Revision ID: 534f7c1fb55f
Revises: 13d42d50c79a
Create Date: 2013-05-12 14:33:19.490150
"""
# revision identifiers, used by Alembic.
revision = '534f7c1fb55f'
down_revision = '13d42d50c79a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.add_column('person', sa.Column('address_id', postgresql.ARRAY(sa.String(length=16)), nullable=True))
op.add_column('person', sa.Column('address', postgresql.ARRAY(sa.Unicode(length=20)), nullable=True))
op.drop_column('person', u'addr_city')
op.drop_column('person', u'addr_county')
op.drop_column('person', u'addr_detail')
def downgrade():
op.add_column('person', sa.Column(u'addr_detail', sa.VARCHAR(length=80), nullable=True))
op.add_column('person', sa.Column(u'addr_county', sa.VARCHAR(length=20), nullable=True))
op.add_column('person', sa.Column(u'addr_city', sa.VARCHAR(length=20), nullable=True))
op.drop_column('person', 'address')
op.drop_column('person', 'address_id')
|
<commit_before><commit_msg>Create address column on person table<commit_after>"""Create address column in person table
Revision ID: 534f7c1fb55f
Revises: 13d42d50c79a
Create Date: 2013-05-12 14:33:19.490150
"""
# revision identifiers, used by Alembic.
revision = '534f7c1fb55f'
down_revision = '13d42d50c79a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.add_column('person', sa.Column('address_id', postgresql.ARRAY(sa.String(length=16)), nullable=True))
op.add_column('person', sa.Column('address', postgresql.ARRAY(sa.Unicode(length=20)), nullable=True))
op.drop_column('person', u'addr_city')
op.drop_column('person', u'addr_county')
op.drop_column('person', u'addr_detail')
def downgrade():
op.add_column('person', sa.Column(u'addr_detail', sa.VARCHAR(length=80), nullable=True))
op.add_column('person', sa.Column(u'addr_county', sa.VARCHAR(length=20), nullable=True))
op.add_column('person', sa.Column(u'addr_city', sa.VARCHAR(length=20), nullable=True))
op.drop_column('person', 'address')
op.drop_column('person', 'address_id')
|
|
1f103880e2c6652dcfb999158b74aa7c12a1b536
|
set1/challenge-3.py
|
set1/challenge-3.py
|
from __future__ import division
import base64
import collections
import string
expected_frequency = {
'e': .12702,
't': .9056,
'a': .8167,
'o': .7507,
'i': .6966,
'n': .6749,
's': .6327,
'h': .6094,
'r': .5987,
'd': .4253,
'l': .4025,
'c': .2782,
'u': .2758,
'm': .2406,
'w': .2360,
'f': .2228,
'g': .2015,
'y': .1974,
'p': .1929,
'b': .1492,
'v': .0978,
'k': .0772,
'j': .0153,
'x': .0150,
'q': .0095,
'z': .0074
}
def decrypt_message(hex_string):
decoded = base64.b16decode(hex_string, True)
xor_text_map = {
char: single_byte_xor(decoded, char) for char in string.lowercase
}
scores = {
char: score_text(text) for char, text in xor_text_map.items()
}
best_result = max(scores, key = scores.get)
print xor_text_map[best_result]
def single_byte_xor(string, character):
return ''.join([chr(ord(a) ^ ord(character)) for a in string])
def score_text(text):
counter = collections.Counter(text.lower())
total = sum(counter.values())
return sum([
expected_frequency[char] - (count / total)
if char in expected_frequency else 0
for char, count in counter.items()
])
if __name__ == '__main__':
encrypted = raw_input("> ")
decrypt_message(encrypted)
|
Add solution to challenge 3.
|
Add solution to challenge 3.
|
Python
|
mit
|
ericnorris/cryptopals-solutions
|
Add solution to challenge 3.
|
from __future__ import division
import base64
import collections
import string
expected_frequency = {
'e': .12702,
't': .9056,
'a': .8167,
'o': .7507,
'i': .6966,
'n': .6749,
's': .6327,
'h': .6094,
'r': .5987,
'd': .4253,
'l': .4025,
'c': .2782,
'u': .2758,
'm': .2406,
'w': .2360,
'f': .2228,
'g': .2015,
'y': .1974,
'p': .1929,
'b': .1492,
'v': .0978,
'k': .0772,
'j': .0153,
'x': .0150,
'q': .0095,
'z': .0074
}
def decrypt_message(hex_string):
decoded = base64.b16decode(hex_string, True)
xor_text_map = {
char: single_byte_xor(decoded, char) for char in string.lowercase
}
scores = {
char: score_text(text) for char, text in xor_text_map.items()
}
best_result = max(scores, key = scores.get)
print xor_text_map[best_result]
def single_byte_xor(string, character):
return ''.join([chr(ord(a) ^ ord(character)) for a in string])
def score_text(text):
counter = collections.Counter(text.lower())
total = sum(counter.values())
return sum([
expected_frequency[char] - (count / total)
if char in expected_frequency else 0
for char, count in counter.items()
])
if __name__ == '__main__':
encrypted = raw_input("> ")
decrypt_message(encrypted)
|
<commit_before><commit_msg>Add solution to challenge 3.<commit_after>
|
from __future__ import division
import base64
import collections
import string
expected_frequency = {
'e': .12702,
't': .9056,
'a': .8167,
'o': .7507,
'i': .6966,
'n': .6749,
's': .6327,
'h': .6094,
'r': .5987,
'd': .4253,
'l': .4025,
'c': .2782,
'u': .2758,
'm': .2406,
'w': .2360,
'f': .2228,
'g': .2015,
'y': .1974,
'p': .1929,
'b': .1492,
'v': .0978,
'k': .0772,
'j': .0153,
'x': .0150,
'q': .0095,
'z': .0074
}
def decrypt_message(hex_string):
decoded = base64.b16decode(hex_string, True)
xor_text_map = {
char: single_byte_xor(decoded, char) for char in string.lowercase
}
scores = {
char: score_text(text) for char, text in xor_text_map.items()
}
best_result = max(scores, key = scores.get)
print xor_text_map[best_result]
def single_byte_xor(string, character):
return ''.join([chr(ord(a) ^ ord(character)) for a in string])
def score_text(text):
counter = collections.Counter(text.lower())
total = sum(counter.values())
return sum([
expected_frequency[char] - (count / total)
if char in expected_frequency else 0
for char, count in counter.items()
])
if __name__ == '__main__':
encrypted = raw_input("> ")
decrypt_message(encrypted)
|
Add solution to challenge 3.from __future__ import division
import base64
import collections
import string
expected_frequency = {
'e': .12702,
't': .9056,
'a': .8167,
'o': .7507,
'i': .6966,
'n': .6749,
's': .6327,
'h': .6094,
'r': .5987,
'd': .4253,
'l': .4025,
'c': .2782,
'u': .2758,
'm': .2406,
'w': .2360,
'f': .2228,
'g': .2015,
'y': .1974,
'p': .1929,
'b': .1492,
'v': .0978,
'k': .0772,
'j': .0153,
'x': .0150,
'q': .0095,
'z': .0074
}
def decrypt_message(hex_string):
decoded = base64.b16decode(hex_string, True)
xor_text_map = {
char: single_byte_xor(decoded, char) for char in string.lowercase
}
scores = {
char: score_text(text) for char, text in xor_text_map.items()
}
best_result = max(scores, key = scores.get)
print xor_text_map[best_result]
def single_byte_xor(string, character):
return ''.join([chr(ord(a) ^ ord(character)) for a in string])
def score_text(text):
counter = collections.Counter(text.lower())
total = sum(counter.values())
return sum([
expected_frequency[char] - (count / total)
if char in expected_frequency else 0
for char, count in counter.items()
])
if __name__ == '__main__':
encrypted = raw_input("> ")
decrypt_message(encrypted)
|
<commit_before><commit_msg>Add solution to challenge 3.<commit_after>from __future__ import division
import base64
import collections
import string
expected_frequency = {
'e': .12702,
't': .9056,
'a': .8167,
'o': .7507,
'i': .6966,
'n': .6749,
's': .6327,
'h': .6094,
'r': .5987,
'd': .4253,
'l': .4025,
'c': .2782,
'u': .2758,
'm': .2406,
'w': .2360,
'f': .2228,
'g': .2015,
'y': .1974,
'p': .1929,
'b': .1492,
'v': .0978,
'k': .0772,
'j': .0153,
'x': .0150,
'q': .0095,
'z': .0074
}
def decrypt_message(hex_string):
decoded = base64.b16decode(hex_string, True)
xor_text_map = {
char: single_byte_xor(decoded, char) for char in string.lowercase
}
scores = {
char: score_text(text) for char, text in xor_text_map.items()
}
best_result = max(scores, key = scores.get)
print xor_text_map[best_result]
def single_byte_xor(string, character):
return ''.join([chr(ord(a) ^ ord(character)) for a in string])
def score_text(text):
counter = collections.Counter(text.lower())
total = sum(counter.values())
return sum([
expected_frequency[char] - (count / total)
if char in expected_frequency else 0
for char, count in counter.items()
])
if __name__ == '__main__':
encrypted = raw_input("> ")
decrypt_message(encrypted)
|
|
75a4434a88a57c39df41dafbb798f0e6814d763c
|
interview-questions/conversion.py
|
interview-questions/conversion.py
|
'''
Task: Convert a non-negative integer to a hex value for printing
Copyright 2017, Dave Cuthbert. License MIT
'''
def int_to_hex(number):
"""
Check 0
>>> int_to_hex(0)
'0'
Value less than 10
>>> int_to_hex(9)
'9'
Value requiring letter digits
>>> int_to_hex(15)
'F'
Boundary (Uses 10s place)
>>> int_to_hex(16)
'10'
Boundary (Uses 10s, and 1s places)
>>> int_to_hex(17)
'11'
Multiple 16ths
>>> int_to_hex(129)
'81'
Boundary (Uses 100s, 10s, and 1s places)
>>> int_to_hex(301)
'12D'
"""
hex_string = ''
hex_digits = [0,1,2,3,4,5,6,7,8,9,'A','B','C','D','E','F']
if number == 0:
hex_string += str(0)
else:
while number > 0:
digit = number % 16
hex_string += str(hex_digits[digit])
number = int(number / 16)
return hex_string[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Print an int as a hex value
|
Print an int as a hex value
|
Python
|
mit
|
daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various
|
Print an int as a hex value
|
'''
Task: Convert a non-negative integer to a hex value for printing
Copyright 2017, Dave Cuthbert. License MIT
'''
def int_to_hex(number):
"""
Check 0
>>> int_to_hex(0)
'0'
Value less than 10
>>> int_to_hex(9)
'9'
Value requiring letter digits
>>> int_to_hex(15)
'F'
Boundary (Uses 10s place)
>>> int_to_hex(16)
'10'
Boundary (Uses 10s, and 1s places)
>>> int_to_hex(17)
'11'
Multiple 16ths
>>> int_to_hex(129)
'81'
Boundary (Uses 100s, 10s, and 1s places)
>>> int_to_hex(301)
'12D'
"""
hex_string = ''
hex_digits = [0,1,2,3,4,5,6,7,8,9,'A','B','C','D','E','F']
if number == 0:
hex_string += str(0)
else:
while number > 0:
digit = number % 16
hex_string += str(hex_digits[digit])
number = int(number / 16)
return hex_string[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
|
<commit_before><commit_msg>Print an int as a hex value<commit_after>
|
'''
Task: Convert a non-negative integer to a hex value for printing
Copyright 2017, Dave Cuthbert. License MIT
'''
def int_to_hex(number):
"""
Check 0
>>> int_to_hex(0)
'0'
Value less than 10
>>> int_to_hex(9)
'9'
Value requiring letter digits
>>> int_to_hex(15)
'F'
Boundary (Uses 10s place)
>>> int_to_hex(16)
'10'
Boundary (Uses 10s, and 1s places)
>>> int_to_hex(17)
'11'
Multiple 16ths
>>> int_to_hex(129)
'81'
Boundary (Uses 100s, 10s, and 1s places)
>>> int_to_hex(301)
'12D'
"""
hex_string = ''
hex_digits = [0,1,2,3,4,5,6,7,8,9,'A','B','C','D','E','F']
if number == 0:
hex_string += str(0)
else:
while number > 0:
digit = number % 16
hex_string += str(hex_digits[digit])
number = int(number / 16)
return hex_string[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Print an int as a hex value'''
Task: Convert a non-negative integer to a hex value for printing
Copyright 2017, Dave Cuthbert. License MIT
'''
def int_to_hex(number):
"""
Check 0
>>> int_to_hex(0)
'0'
Value less than 10
>>> int_to_hex(9)
'9'
Value requiring letter digits
>>> int_to_hex(15)
'F'
Boundary (Uses 10s place)
>>> int_to_hex(16)
'10'
Boundary (Uses 10s, and 1s places)
>>> int_to_hex(17)
'11'
Multiple 16ths
>>> int_to_hex(129)
'81'
Boundary (Uses 100s, 10s, and 1s places)
>>> int_to_hex(301)
'12D'
"""
hex_string = ''
hex_digits = [0,1,2,3,4,5,6,7,8,9,'A','B','C','D','E','F']
if number == 0:
hex_string += str(0)
else:
while number > 0:
digit = number % 16
hex_string += str(hex_digits[digit])
number = int(number / 16)
return hex_string[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
|
<commit_before><commit_msg>Print an int as a hex value<commit_after>'''
Task: Convert a non-negative integer to a hex value for printing
Copyright 2017, Dave Cuthbert. License MIT
'''
def int_to_hex(number):
"""
Check 0
>>> int_to_hex(0)
'0'
Value less than 10
>>> int_to_hex(9)
'9'
Value requiring letter digits
>>> int_to_hex(15)
'F'
Boundary (Uses 10s place)
>>> int_to_hex(16)
'10'
Boundary (Uses 10s, and 1s places)
>>> int_to_hex(17)
'11'
Multiple 16ths
>>> int_to_hex(129)
'81'
Boundary (Uses 100s, 10s, and 1s places)
>>> int_to_hex(301)
'12D'
"""
hex_string = ''
hex_digits = [0,1,2,3,4,5,6,7,8,9,'A','B','C','D','E','F']
if number == 0:
hex_string += str(0)
else:
while number > 0:
digit = number % 16
hex_string += str(hex_digits[digit])
number = int(number / 16)
return hex_string[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
e9bb81fced105d41aed1a6069e31b41891cea41c
|
tests/test_web_service.py
|
tests/test_web_service.py
|
# -*- coding: UTF-8 -*-
from expects import *
import xml.dom.minidom
from sii.resource import SII
from dicttoxml import dicttoxml
class Period():
def __init__(self, name):
self.name = name
class Partner():
def __init__(self, name, nif):
self.name = name
self.nif = nif
class InvoiceLineTaxes():
def __init__(self, name, base_imponible):
self.name = name
self.base = base_imponible
class Invoice():
def __init__(self, number, type, partner, amount_total, period_id, date_invoice, tax_line_ids):
self.number = number
self.type = type
self.partner_id = partner
self.period_id = period_id
self.amount_total = amount_total
self.date_invoice = date_invoice
self.tax_line = tax_line_ids
with description("El XML Generado"):
with before.all:
print 'Generando obj'
period = Period(name='03/2016')
tax_line = [
InvoiceLineTaxes('IVA 21%', base_imponible=12.34),
InvoiceLineTaxes('IBI 15%', base_imponible=56.78)
]
partner = Partner(name='Francisco García', nif='12345678T')
invoice = Invoice(
number='F012345', type='out_invoice', partner=partner,
amount_total=15, period_id=period, date_invoice='2016-03-25',
tax_line_ids=tax_line
)
dict_to_xml = SII.generate_object(invoice)
dict_to_xml = dicttoxml(dict_to_xml, root=False, attr_type=False)
xml_pretty = xml.dom.minidom.parseString(dict_to_xml)
pretty_xml_as_string = xml_pretty.toprettyxml()
print 'El XML generado es:\n', pretty_xml_as_string
|
Add first version of tests file
|
Add first version of tests file
|
Python
|
mit
|
gisce/sii
|
Add first version of tests file
|
# -*- coding: UTF-8 -*-
from expects import *
import xml.dom.minidom
from sii.resource import SII
from dicttoxml import dicttoxml
class Period():
def __init__(self, name):
self.name = name
class Partner():
def __init__(self, name, nif):
self.name = name
self.nif = nif
class InvoiceLineTaxes():
def __init__(self, name, base_imponible):
self.name = name
self.base = base_imponible
class Invoice():
def __init__(self, number, type, partner, amount_total, period_id, date_invoice, tax_line_ids):
self.number = number
self.type = type
self.partner_id = partner
self.period_id = period_id
self.amount_total = amount_total
self.date_invoice = date_invoice
self.tax_line = tax_line_ids
with description("El XML Generado"):
with before.all:
print 'Generando obj'
period = Period(name='03/2016')
tax_line = [
InvoiceLineTaxes('IVA 21%', base_imponible=12.34),
InvoiceLineTaxes('IBI 15%', base_imponible=56.78)
]
partner = Partner(name='Francisco García', nif='12345678T')
invoice = Invoice(
number='F012345', type='out_invoice', partner=partner,
amount_total=15, period_id=period, date_invoice='2016-03-25',
tax_line_ids=tax_line
)
dict_to_xml = SII.generate_object(invoice)
dict_to_xml = dicttoxml(dict_to_xml, root=False, attr_type=False)
xml_pretty = xml.dom.minidom.parseString(dict_to_xml)
pretty_xml_as_string = xml_pretty.toprettyxml()
print 'El XML generado es:\n', pretty_xml_as_string
|
<commit_before><commit_msg>Add first version of tests file<commit_after>
|
# -*- coding: UTF-8 -*-
from expects import *
import xml.dom.minidom
from sii.resource import SII
from dicttoxml import dicttoxml
class Period():
def __init__(self, name):
self.name = name
class Partner():
def __init__(self, name, nif):
self.name = name
self.nif = nif
class InvoiceLineTaxes():
def __init__(self, name, base_imponible):
self.name = name
self.base = base_imponible
class Invoice():
def __init__(self, number, type, partner, amount_total, period_id, date_invoice, tax_line_ids):
self.number = number
self.type = type
self.partner_id = partner
self.period_id = period_id
self.amount_total = amount_total
self.date_invoice = date_invoice
self.tax_line = tax_line_ids
with description("El XML Generado"):
with before.all:
print 'Generando obj'
period = Period(name='03/2016')
tax_line = [
InvoiceLineTaxes('IVA 21%', base_imponible=12.34),
InvoiceLineTaxes('IBI 15%', base_imponible=56.78)
]
partner = Partner(name='Francisco García', nif='12345678T')
invoice = Invoice(
number='F012345', type='out_invoice', partner=partner,
amount_total=15, period_id=period, date_invoice='2016-03-25',
tax_line_ids=tax_line
)
dict_to_xml = SII.generate_object(invoice)
dict_to_xml = dicttoxml(dict_to_xml, root=False, attr_type=False)
xml_pretty = xml.dom.minidom.parseString(dict_to_xml)
pretty_xml_as_string = xml_pretty.toprettyxml()
print 'El XML generado es:\n', pretty_xml_as_string
|
Add first version of tests file# -*- coding: UTF-8 -*-
from expects import *
import xml.dom.minidom
from sii.resource import SII
from dicttoxml import dicttoxml
class Period():
def __init__(self, name):
self.name = name
class Partner():
def __init__(self, name, nif):
self.name = name
self.nif = nif
class InvoiceLineTaxes():
def __init__(self, name, base_imponible):
self.name = name
self.base = base_imponible
class Invoice():
def __init__(self, number, type, partner, amount_total, period_id, date_invoice, tax_line_ids):
self.number = number
self.type = type
self.partner_id = partner
self.period_id = period_id
self.amount_total = amount_total
self.date_invoice = date_invoice
self.tax_line = tax_line_ids
with description("El XML Generado"):
with before.all:
print 'Generando obj'
period = Period(name='03/2016')
tax_line = [
InvoiceLineTaxes('IVA 21%', base_imponible=12.34),
InvoiceLineTaxes('IBI 15%', base_imponible=56.78)
]
partner = Partner(name='Francisco García', nif='12345678T')
invoice = Invoice(
number='F012345', type='out_invoice', partner=partner,
amount_total=15, period_id=period, date_invoice='2016-03-25',
tax_line_ids=tax_line
)
dict_to_xml = SII.generate_object(invoice)
dict_to_xml = dicttoxml(dict_to_xml, root=False, attr_type=False)
xml_pretty = xml.dom.minidom.parseString(dict_to_xml)
pretty_xml_as_string = xml_pretty.toprettyxml()
print 'El XML generado es:\n', pretty_xml_as_string
|
<commit_before><commit_msg>Add first version of tests file<commit_after># -*- coding: UTF-8 -*-
from expects import *
import xml.dom.minidom
from sii.resource import SII
from dicttoxml import dicttoxml
class Period():
def __init__(self, name):
self.name = name
class Partner():
def __init__(self, name, nif):
self.name = name
self.nif = nif
class InvoiceLineTaxes():
def __init__(self, name, base_imponible):
self.name = name
self.base = base_imponible
class Invoice():
def __init__(self, number, type, partner, amount_total, period_id, date_invoice, tax_line_ids):
self.number = number
self.type = type
self.partner_id = partner
self.period_id = period_id
self.amount_total = amount_total
self.date_invoice = date_invoice
self.tax_line = tax_line_ids
with description("El XML Generado"):
with before.all:
print 'Generando obj'
period = Period(name='03/2016')
tax_line = [
InvoiceLineTaxes('IVA 21%', base_imponible=12.34),
InvoiceLineTaxes('IBI 15%', base_imponible=56.78)
]
partner = Partner(name='Francisco García', nif='12345678T')
invoice = Invoice(
number='F012345', type='out_invoice', partner=partner,
amount_total=15, period_id=period, date_invoice='2016-03-25',
tax_line_ids=tax_line
)
dict_to_xml = SII.generate_object(invoice)
dict_to_xml = dicttoxml(dict_to_xml, root=False, attr_type=False)
xml_pretty = xml.dom.minidom.parseString(dict_to_xml)
pretty_xml_as_string = xml_pretty.toprettyxml()
print 'El XML generado es:\n', pretty_xml_as_string
|
|
4243b04eb6568f8fccab31509557bd7b7c6783a5
|
logistic-regression-performance.py
|
logistic-regression-performance.py
|
# IPython log file
# Run this in the NewEM data folder
from gala import classify
X, y = classify.load_training_data_from_disk('training-data-0.h5',
names=['data', 'labels'])
train_idxs = np.random.randint(0, X.shape[0], size=10_000)
y = y[:, 0]
Xtr, ytr = X[train_idxs], y[train_idxs]
test_idxs = np.random.randint(0, X.shape[0], size=1000)
test_idxs = np.setdiff1d(test_idxs, train_idxs)
Xts, yts = X[test_idxs], y[test_idxs]
rf = classify.default_random_forest()
# get_ipython().magic('timeit -n 1 -r 1 rf.fit(Xtr, ytr)')
lg = classify.get_classifier('logist')
# get_ipython().magic('timeit -n 1 -r 1 lg.fit(Xtr, ytr)')
# 20x faster training
lgacc = 1 - np.sum(lg.predict(Xts) != yts) / len(yts)
# 73%
rfacc = 1 - np.sum(rf.predict(Xts) != yts) / len(yts)
# 79.2%
# get_ipython().magic('timeit -r 1 -n 1 lg.predict(Xts)')
# get_ipython().magic('timeit -r 1 -n 1 rf.predict(Xts)')
# 20x faster prediction
# get_ipython().magic('timeit rf.predict(Xts[0:1])')
# get_ipython().magic('timeit lg.predict(Xts[0:1])')
# 30x faster single line prediction
from sklearn.preprocessing import StandardScaler
s = StandardScaler()
Xtrn = s.fit_transform(Xtr)
from sklearn import pipeline
lg = classify.get_classifier('logist')
lg.fit(Xtrn, ytr)
lg2 = pipeline.make_pipeline(s, lg)
lgacc2 = 1 - np.sum(lg2.predict(Xts) != yts) / len(yts)
# 79.6% (!!!)
lg3 = pipeline.make_pipeline(StandardScaler(),
classify.get_classifier('logist'))
lg3 = lg3.fit(Xtr, ytr)
lgacc3 = 1 - np.sum(lg3.predict(Xts) != yts) / len(yts)
# 79.6%
|
Add logistic regression experiment; matching RF performance
|
Add logistic regression experiment; matching RF performance
|
Python
|
bsd-3-clause
|
jni/useful-histories
|
Add logistic regression experiment; matching RF performance
|
# IPython log file
# Run this in the NewEM data folder
from gala import classify
X, y = classify.load_training_data_from_disk('training-data-0.h5',
names=['data', 'labels'])
train_idxs = np.random.randint(0, X.shape[0], size=10_000)
y = y[:, 0]
Xtr, ytr = X[train_idxs], y[train_idxs]
test_idxs = np.random.randint(0, X.shape[0], size=1000)
test_idxs = np.setdiff1d(test_idxs, train_idxs)
Xts, yts = X[test_idxs], y[test_idxs]
rf = classify.default_random_forest()
# get_ipython().magic('timeit -n 1 -r 1 rf.fit(Xtr, ytr)')
lg = classify.get_classifier('logist')
# get_ipython().magic('timeit -n 1 -r 1 lg.fit(Xtr, ytr)')
# 20x faster training
lgacc = 1 - np.sum(lg.predict(Xts) != yts) / len(yts)
# 73%
rfacc = 1 - np.sum(rf.predict(Xts) != yts) / len(yts)
# 79.2%
# get_ipython().magic('timeit -r 1 -n 1 lg.predict(Xts)')
# get_ipython().magic('timeit -r 1 -n 1 rf.predict(Xts)')
# 20x faster prediction
# get_ipython().magic('timeit rf.predict(Xts[0:1])')
# get_ipython().magic('timeit lg.predict(Xts[0:1])')
# 30x faster single line prediction
from sklearn.preprocessing import StandardScaler
s = StandardScaler()
Xtrn = s.fit_transform(Xtr)
from sklearn import pipeline
lg = classify.get_classifier('logist')
lg.fit(Xtrn, ytr)
lg2 = pipeline.make_pipeline(s, lg)
lgacc2 = 1 - np.sum(lg2.predict(Xts) != yts) / len(yts)
# 79.6% (!!!)
lg3 = pipeline.make_pipeline(StandardScaler(),
classify.get_classifier('logist'))
lg3 = lg3.fit(Xtr, ytr)
lgacc3 = 1 - np.sum(lg3.predict(Xts) != yts) / len(yts)
# 79.6%
|
<commit_before><commit_msg>Add logistic regression experiment; matching RF performance<commit_after>
|
# IPython log file
# Run this in the NewEM data folder
from gala import classify
X, y = classify.load_training_data_from_disk('training-data-0.h5',
names=['data', 'labels'])
train_idxs = np.random.randint(0, X.shape[0], size=10_000)
y = y[:, 0]
Xtr, ytr = X[train_idxs], y[train_idxs]
test_idxs = np.random.randint(0, X.shape[0], size=1000)
test_idxs = np.setdiff1d(test_idxs, train_idxs)
Xts, yts = X[test_idxs], y[test_idxs]
rf = classify.default_random_forest()
# get_ipython().magic('timeit -n 1 -r 1 rf.fit(Xtr, ytr)')
lg = classify.get_classifier('logist')
# get_ipython().magic('timeit -n 1 -r 1 lg.fit(Xtr, ytr)')
# 20x faster training
lgacc = 1 - np.sum(lg.predict(Xts) != yts) / len(yts)
# 73%
rfacc = 1 - np.sum(rf.predict(Xts) != yts) / len(yts)
# 79.2%
# get_ipython().magic('timeit -r 1 -n 1 lg.predict(Xts)')
# get_ipython().magic('timeit -r 1 -n 1 rf.predict(Xts)')
# 20x faster prediction
# get_ipython().magic('timeit rf.predict(Xts[0:1])')
# get_ipython().magic('timeit lg.predict(Xts[0:1])')
# 30x faster single line prediction
from sklearn.preprocessing import StandardScaler
s = StandardScaler()
Xtrn = s.fit_transform(Xtr)
from sklearn import pipeline
lg = classify.get_classifier('logist')
lg.fit(Xtrn, ytr)
lg2 = pipeline.make_pipeline(s, lg)
lgacc2 = 1 - np.sum(lg2.predict(Xts) != yts) / len(yts)
# 79.6% (!!!)
lg3 = pipeline.make_pipeline(StandardScaler(),
classify.get_classifier('logist'))
lg3 = lg3.fit(Xtr, ytr)
lgacc3 = 1 - np.sum(lg3.predict(Xts) != yts) / len(yts)
# 79.6%
|
Add logistic regression experiment; matching RF performance# IPython log file
# Run this in the NewEM data folder
from gala import classify
X, y = classify.load_training_data_from_disk('training-data-0.h5',
names=['data', 'labels'])
train_idxs = np.random.randint(0, X.shape[0], size=10_000)
y = y[:, 0]
Xtr, ytr = X[train_idxs], y[train_idxs]
test_idxs = np.random.randint(0, X.shape[0], size=1000)
test_idxs = np.setdiff1d(test_idxs, train_idxs)
Xts, yts = X[test_idxs], y[test_idxs]
rf = classify.default_random_forest()
# get_ipython().magic('timeit -n 1 -r 1 rf.fit(Xtr, ytr)')
lg = classify.get_classifier('logist')
# get_ipython().magic('timeit -n 1 -r 1 lg.fit(Xtr, ytr)')
# 20x faster training
lgacc = 1 - np.sum(lg.predict(Xts) != yts) / len(yts)
# 73%
rfacc = 1 - np.sum(rf.predict(Xts) != yts) / len(yts)
# 79.2%
# get_ipython().magic('timeit -r 1 -n 1 lg.predict(Xts)')
# get_ipython().magic('timeit -r 1 -n 1 rf.predict(Xts)')
# 20x faster prediction
# get_ipython().magic('timeit rf.predict(Xts[0:1])')
# get_ipython().magic('timeit lg.predict(Xts[0:1])')
# 30x faster single line prediction
from sklearn.preprocessing import StandardScaler
s = StandardScaler()
Xtrn = s.fit_transform(Xtr)
from sklearn import pipeline
lg = classify.get_classifier('logist')
lg.fit(Xtrn, ytr)
lg2 = pipeline.make_pipeline(s, lg)
lgacc2 = 1 - np.sum(lg2.predict(Xts) != yts) / len(yts)
# 79.6% (!!!)
lg3 = pipeline.make_pipeline(StandardScaler(),
classify.get_classifier('logist'))
lg3 = lg3.fit(Xtr, ytr)
lgacc3 = 1 - np.sum(lg3.predict(Xts) != yts) / len(yts)
# 79.6%
|
<commit_before><commit_msg>Add logistic regression experiment; matching RF performance<commit_after># IPython log file
# Run this in the NewEM data folder
from gala import classify
X, y = classify.load_training_data_from_disk('training-data-0.h5',
names=['data', 'labels'])
train_idxs = np.random.randint(0, X.shape[0], size=10_000)
y = y[:, 0]
Xtr, ytr = X[train_idxs], y[train_idxs]
test_idxs = np.random.randint(0, X.shape[0], size=1000)
test_idxs = np.setdiff1d(test_idxs, train_idxs)
Xts, yts = X[test_idxs], y[test_idxs]
rf = classify.default_random_forest()
# get_ipython().magic('timeit -n 1 -r 1 rf.fit(Xtr, ytr)')
lg = classify.get_classifier('logist')
# get_ipython().magic('timeit -n 1 -r 1 lg.fit(Xtr, ytr)')
# 20x faster training
lgacc = 1 - np.sum(lg.predict(Xts) != yts) / len(yts)
# 73%
rfacc = 1 - np.sum(rf.predict(Xts) != yts) / len(yts)
# 79.2%
# get_ipython().magic('timeit -r 1 -n 1 lg.predict(Xts)')
# get_ipython().magic('timeit -r 1 -n 1 rf.predict(Xts)')
# 20x faster prediction
# get_ipython().magic('timeit rf.predict(Xts[0:1])')
# get_ipython().magic('timeit lg.predict(Xts[0:1])')
# 30x faster single line prediction
from sklearn.preprocessing import StandardScaler
s = StandardScaler()
Xtrn = s.fit_transform(Xtr)
from sklearn import pipeline
lg = classify.get_classifier('logist')
lg.fit(Xtrn, ytr)
lg2 = pipeline.make_pipeline(s, lg)
lgacc2 = 1 - np.sum(lg2.predict(Xts) != yts) / len(yts)
# 79.6% (!!!)
lg3 = pipeline.make_pipeline(StandardScaler(),
classify.get_classifier('logist'))
lg3 = lg3.fit(Xtr, ytr)
lgacc3 = 1 - np.sum(lg3.predict(Xts) != yts) / len(yts)
# 79.6%
|
|
3de26996fcd56b0371e73583d880f96c0f55b906
|
clusterCompanies.py
|
clusterCompanies.py
|
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
def clusterCompanies():
###############################################################################
# Retrieve the data from Internet
# Choose a time period
d1 = datetime.datetime(2013, 1, 1)
d2 = datetime.datetime(2015, 1, 1)
# symbol definitions
symbol_dict = {
'MSFT': 'Microsoft',
'AAPL': 'Apple',
'AMZN': 'Amazon',
'IBM': 'IBM',
'YHOO': 'Yahoo',
'SNDK': 'Sandisk',
'EA': 'Electronic Arts',
'XRX': 'Xerox',
'SNE': 'Sony',
'FB': 'Facebook',
'GOOGL': 'Google',
'LNKD': 'LinkedIn',
'PCRFY': 'Panasonic'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
#file = open("ClusterList.txt", "w")
stockList = []
for i in range(n_labels + 1):
stockList.append(symbols[labels == i])
np.savetxt('ClusterList.txt',stockList, delimiter = ' ', fmt="%s")
#txtF.write('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
return stockList
clusterCompanies()
|
Add function to return list of stocks
|
Add function to return list of stocks
|
Python
|
mit
|
dankolbman/MarketCents
|
Add function to return list of stocks
|
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
def clusterCompanies():
###############################################################################
# Retrieve the data from Internet
# Choose a time period
d1 = datetime.datetime(2013, 1, 1)
d2 = datetime.datetime(2015, 1, 1)
# symbol definitions
symbol_dict = {
'MSFT': 'Microsoft',
'AAPL': 'Apple',
'AMZN': 'Amazon',
'IBM': 'IBM',
'YHOO': 'Yahoo',
'SNDK': 'Sandisk',
'EA': 'Electronic Arts',
'XRX': 'Xerox',
'SNE': 'Sony',
'FB': 'Facebook',
'GOOGL': 'Google',
'LNKD': 'LinkedIn',
'PCRFY': 'Panasonic'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
#file = open("ClusterList.txt", "w")
stockList = []
for i in range(n_labels + 1):
stockList.append(symbols[labels == i])
np.savetxt('ClusterList.txt',stockList, delimiter = ' ', fmt="%s")
#txtF.write('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
return stockList
clusterCompanies()
|
<commit_before><commit_msg>Add function to return list of stocks<commit_after>
|
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
def clusterCompanies():
###############################################################################
# Retrieve the data from Internet
# Choose a time period
d1 = datetime.datetime(2013, 1, 1)
d2 = datetime.datetime(2015, 1, 1)
# symbol definitions
symbol_dict = {
'MSFT': 'Microsoft',
'AAPL': 'Apple',
'AMZN': 'Amazon',
'IBM': 'IBM',
'YHOO': 'Yahoo',
'SNDK': 'Sandisk',
'EA': 'Electronic Arts',
'XRX': 'Xerox',
'SNE': 'Sony',
'FB': 'Facebook',
'GOOGL': 'Google',
'LNKD': 'LinkedIn',
'PCRFY': 'Panasonic'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
#file = open("ClusterList.txt", "w")
stockList = []
for i in range(n_labels + 1):
stockList.append(symbols[labels == i])
np.savetxt('ClusterList.txt',stockList, delimiter = ' ', fmt="%s")
#txtF.write('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
return stockList
clusterCompanies()
|
Add function to return list of stocksimport datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
def clusterCompanies():
###############################################################################
# Retrieve the data from Internet
# Choose a time period
d1 = datetime.datetime(2013, 1, 1)
d2 = datetime.datetime(2015, 1, 1)
# symbol definitions
symbol_dict = {
'MSFT': 'Microsoft',
'AAPL': 'Apple',
'AMZN': 'Amazon',
'IBM': 'IBM',
'YHOO': 'Yahoo',
'SNDK': 'Sandisk',
'EA': 'Electronic Arts',
'XRX': 'Xerox',
'SNE': 'Sony',
'FB': 'Facebook',
'GOOGL': 'Google',
'LNKD': 'LinkedIn',
'PCRFY': 'Panasonic'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
#file = open("ClusterList.txt", "w")
stockList = []
for i in range(n_labels + 1):
stockList.append(symbols[labels == i])
np.savetxt('ClusterList.txt',stockList, delimiter = ' ', fmt="%s")
#txtF.write('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
return stockList
clusterCompanies()
|
<commit_before><commit_msg>Add function to return list of stocks<commit_after>import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
def clusterCompanies():
###############################################################################
# Retrieve the data from Internet
# Choose a time period
d1 = datetime.datetime(2013, 1, 1)
d2 = datetime.datetime(2015, 1, 1)
# symbol definitions
symbol_dict = {
'MSFT': 'Microsoft',
'AAPL': 'Apple',
'AMZN': 'Amazon',
'IBM': 'IBM',
'YHOO': 'Yahoo',
'SNDK': 'Sandisk',
'EA': 'Electronic Arts',
'XRX': 'Xerox',
'SNE': 'Sony',
'FB': 'Facebook',
'GOOGL': 'Google',
'LNKD': 'LinkedIn',
'PCRFY': 'Panasonic'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
#file = open("ClusterList.txt", "w")
stockList = []
for i in range(n_labels + 1):
stockList.append(symbols[labels == i])
np.savetxt('ClusterList.txt',stockList, delimiter = ' ', fmt="%s")
#txtF.write('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
return stockList
clusterCompanies()
|
|
f84e0b83eb1abf84104ba3de7c845be147ffabe5
|
tests/units/test_release.py
|
tests/units/test_release.py
|
import pytest
from magnate import release
def test_release_info():
assert hasattr(release, 'AUTHOR')
assert hasattr(release, 'MAINTAINER')
assert hasattr(release, 'PROGRAM_NAME')
assert hasattr(release, 'COPYRIGHT_YEAR')
assert hasattr(release, 'LICENSE')
def test_version_correct():
assert '.'.join(release.__version_info__) == release.__version__
|
Add simple tests that the release information exists
|
Add simple tests that the release information exists
|
Python
|
agpl-3.0
|
abadger/stellarmagnate
|
Add simple tests that the release information exists
|
import pytest
from magnate import release
def test_release_info():
assert hasattr(release, 'AUTHOR')
assert hasattr(release, 'MAINTAINER')
assert hasattr(release, 'PROGRAM_NAME')
assert hasattr(release, 'COPYRIGHT_YEAR')
assert hasattr(release, 'LICENSE')
def test_version_correct():
assert '.'.join(release.__version_info__) == release.__version__
|
<commit_before><commit_msg>Add simple tests that the release information exists<commit_after>
|
import pytest
from magnate import release
def test_release_info():
assert hasattr(release, 'AUTHOR')
assert hasattr(release, 'MAINTAINER')
assert hasattr(release, 'PROGRAM_NAME')
assert hasattr(release, 'COPYRIGHT_YEAR')
assert hasattr(release, 'LICENSE')
def test_version_correct():
assert '.'.join(release.__version_info__) == release.__version__
|
Add simple tests that the release information existsimport pytest
from magnate import release
def test_release_info():
assert hasattr(release, 'AUTHOR')
assert hasattr(release, 'MAINTAINER')
assert hasattr(release, 'PROGRAM_NAME')
assert hasattr(release, 'COPYRIGHT_YEAR')
assert hasattr(release, 'LICENSE')
def test_version_correct():
assert '.'.join(release.__version_info__) == release.__version__
|
<commit_before><commit_msg>Add simple tests that the release information exists<commit_after>import pytest
from magnate import release
def test_release_info():
assert hasattr(release, 'AUTHOR')
assert hasattr(release, 'MAINTAINER')
assert hasattr(release, 'PROGRAM_NAME')
assert hasattr(release, 'COPYRIGHT_YEAR')
assert hasattr(release, 'LICENSE')
def test_version_correct():
assert '.'.join(release.__version_info__) == release.__version__
|
|
38c8901aafdfedc2263bd5fd1cce6bf322525f38
|
samples/plasma.py
|
samples/plasma.py
|
from asciimatics.renderers import Plasma
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.effects import Print
from asciimatics.exceptions import ResizeScreenError
import sys
def demo(screen):
scenes = []
effects = [
Print(screen,
Plasma(screen.height, screen.width, screen.colours),
0,
speed=1,
transparent=False),
]
scenes.append(Scene(effects, -1))
screen.play(scenes, stop_on_resize=True)
if __name__ == "__main__":
while True:
try:
Screen.wrapper(demo)
sys.exit(0)
except ResizeScreenError:
pass
|
Add sample for Plasma effect.
|
Add sample for Plasma effect.
|
Python
|
apache-2.0
|
peterbrittain/asciimatics,peterbrittain/asciimatics
|
Add sample for Plasma effect.
|
from asciimatics.renderers import Plasma
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.effects import Print
from asciimatics.exceptions import ResizeScreenError
import sys
def demo(screen):
scenes = []
effects = [
Print(screen,
Plasma(screen.height, screen.width, screen.colours),
0,
speed=1,
transparent=False),
]
scenes.append(Scene(effects, -1))
screen.play(scenes, stop_on_resize=True)
if __name__ == "__main__":
while True:
try:
Screen.wrapper(demo)
sys.exit(0)
except ResizeScreenError:
pass
|
<commit_before><commit_msg>Add sample for Plasma effect.<commit_after>
|
from asciimatics.renderers import Plasma
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.effects import Print
from asciimatics.exceptions import ResizeScreenError
import sys
def demo(screen):
scenes = []
effects = [
Print(screen,
Plasma(screen.height, screen.width, screen.colours),
0,
speed=1,
transparent=False),
]
scenes.append(Scene(effects, -1))
screen.play(scenes, stop_on_resize=True)
if __name__ == "__main__":
while True:
try:
Screen.wrapper(demo)
sys.exit(0)
except ResizeScreenError:
pass
|
Add sample for Plasma effect.from asciimatics.renderers import Plasma
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.effects import Print
from asciimatics.exceptions import ResizeScreenError
import sys
def demo(screen):
scenes = []
effects = [
Print(screen,
Plasma(screen.height, screen.width, screen.colours),
0,
speed=1,
transparent=False),
]
scenes.append(Scene(effects, -1))
screen.play(scenes, stop_on_resize=True)
if __name__ == "__main__":
while True:
try:
Screen.wrapper(demo)
sys.exit(0)
except ResizeScreenError:
pass
|
<commit_before><commit_msg>Add sample for Plasma effect.<commit_after>from asciimatics.renderers import Plasma
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.effects import Print
from asciimatics.exceptions import ResizeScreenError
import sys
def demo(screen):
scenes = []
effects = [
Print(screen,
Plasma(screen.height, screen.width, screen.colours),
0,
speed=1,
transparent=False),
]
scenes.append(Scene(effects, -1))
screen.play(scenes, stop_on_resize=True)
if __name__ == "__main__":
while True:
try:
Screen.wrapper(demo)
sys.exit(0)
except ResizeScreenError:
pass
|
|
e1be51d2a7bf9bd0b6244c8c4d10557c068fad6a
|
src/01_pre_processing/cutter.py
|
src/01_pre_processing/cutter.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Utility functions for spatial processing."""
import click
import fiona
import logging
import numpy as np
import numpy.ma as ma
import os
import rasterio
import sys
def cookie_cut(infile, clipfile, field, outfile, verbose=False):
""
# Read in the clipfile and get all the values in the field
with fiona.open(clipfile, 'r') as clip_src:
# Check that the provided field exists
field_names = list(clip_src.meta['schema']['properties'].keys())
if field not in field_names:
raise ValueError("Field name {} not found".format(field))
# Get the field values.
field_values = [item['properties'][field] for item in clip_src]
# Check if there are multiple same values
unique_field_values = list(set(field_values))
unique_field_values.sort()
if len(field_values) != len(unique_field_values):
click.echo(click.style("WARNING: {} contains multiple same values. Only one instance is retained.".format(field),
fg='orange'))
n_field_values = len(unique_field_values)
click.echo(click.style('Clipping {} into {} parts'.format(infile, n_field_values),
fg='green'))
i = 1
for field_value in unique_field_values:
click.echo(click.style('[{}/{}] Processing {} '.format(i, n_field_values, field_value),
fg='green'))
i += 1
@click.command()
@click.option('-v', '--verbose', is_flag=True)
@click.argument('infile', nargs=1, type=click.Path(exists=True))
@click.argument('clipfile', nargs=1, type=click.Path(exists=True))
@click.option('-f', '--field')
@click.argument('outfile', nargs=1)
def cli(infile, clipfile, field, outfile, verbose):
""" Command-line interface."""
if field == "":
click.echo(click.style('ERROR: field name must be provided!',
fg='red'))
sys.exit(-1)
success = cookie_cut(infile, clipfile, field, outfile, verbose=verbose)
if success:
click.echo(click.style('Done!', fg='green'))
else:
click.echo(click.style('Clipping failed', fg='red'))
if __name__ == '__main__':
cli()
|
Add script for cookie cutting
|
Add script for cookie cutting
|
Python
|
mit
|
VUEG/bdes_to,VUEG/bdes_to,VUEG/bdes_to
|
Add script for cookie cutting
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Utility functions for spatial processing."""
import click
import fiona
import logging
import numpy as np
import numpy.ma as ma
import os
import rasterio
import sys
def cookie_cut(infile, clipfile, field, outfile, verbose=False):
""
# Read in the clipfile and get all the values in the field
with fiona.open(clipfile, 'r') as clip_src:
# Check that the provided field exists
field_names = list(clip_src.meta['schema']['properties'].keys())
if field not in field_names:
raise ValueError("Field name {} not found".format(field))
# Get the field values.
field_values = [item['properties'][field] for item in clip_src]
# Check if there are multiple same values
unique_field_values = list(set(field_values))
unique_field_values.sort()
if len(field_values) != len(unique_field_values):
click.echo(click.style("WARNING: {} contains multiple same values. Only one instance is retained.".format(field),
fg='orange'))
n_field_values = len(unique_field_values)
click.echo(click.style('Clipping {} into {} parts'.format(infile, n_field_values),
fg='green'))
i = 1
for field_value in unique_field_values:
click.echo(click.style('[{}/{}] Processing {} '.format(i, n_field_values, field_value),
fg='green'))
i += 1
@click.command()
@click.option('-v', '--verbose', is_flag=True)
@click.argument('infile', nargs=1, type=click.Path(exists=True))
@click.argument('clipfile', nargs=1, type=click.Path(exists=True))
@click.option('-f', '--field')
@click.argument('outfile', nargs=1)
def cli(infile, clipfile, field, outfile, verbose):
""" Command-line interface."""
if field == "":
click.echo(click.style('ERROR: field name must be provided!',
fg='red'))
sys.exit(-1)
success = cookie_cut(infile, clipfile, field, outfile, verbose=verbose)
if success:
click.echo(click.style('Done!', fg='green'))
else:
click.echo(click.style('Clipping failed', fg='red'))
if __name__ == '__main__':
cli()
|
<commit_before><commit_msg>Add script for cookie cutting<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Utility functions for spatial processing."""
import click
import fiona
import logging
import numpy as np
import numpy.ma as ma
import os
import rasterio
import sys
def cookie_cut(infile, clipfile, field, outfile, verbose=False):
""
# Read in the clipfile and get all the values in the field
with fiona.open(clipfile, 'r') as clip_src:
# Check that the provided field exists
field_names = list(clip_src.meta['schema']['properties'].keys())
if field not in field_names:
raise ValueError("Field name {} not found".format(field))
# Get the field values.
field_values = [item['properties'][field] for item in clip_src]
# Check if there are multiple same values
unique_field_values = list(set(field_values))
unique_field_values.sort()
if len(field_values) != len(unique_field_values):
click.echo(click.style("WARNING: {} contains multiple same values. Only one instance is retained.".format(field),
fg='orange'))
n_field_values = len(unique_field_values)
click.echo(click.style('Clipping {} into {} parts'.format(infile, n_field_values),
fg='green'))
i = 1
for field_value in unique_field_values:
click.echo(click.style('[{}/{}] Processing {} '.format(i, n_field_values, field_value),
fg='green'))
i += 1
@click.command()
@click.option('-v', '--verbose', is_flag=True)
@click.argument('infile', nargs=1, type=click.Path(exists=True))
@click.argument('clipfile', nargs=1, type=click.Path(exists=True))
@click.option('-f', '--field')
@click.argument('outfile', nargs=1)
def cli(infile, clipfile, field, outfile, verbose):
""" Command-line interface."""
if field == "":
click.echo(click.style('ERROR: field name must be provided!',
fg='red'))
sys.exit(-1)
success = cookie_cut(infile, clipfile, field, outfile, verbose=verbose)
if success:
click.echo(click.style('Done!', fg='green'))
else:
click.echo(click.style('Clipping failed', fg='red'))
if __name__ == '__main__':
cli()
|
Add script for cookie cutting#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Utility functions for spatial processing."""
import click
import fiona
import logging
import numpy as np
import numpy.ma as ma
import os
import rasterio
import sys
def cookie_cut(infile, clipfile, field, outfile, verbose=False):
""
# Read in the clipfile and get all the values in the field
with fiona.open(clipfile, 'r') as clip_src:
# Check that the provided field exists
field_names = list(clip_src.meta['schema']['properties'].keys())
if field not in field_names:
raise ValueError("Field name {} not found".format(field))
# Get the field values.
field_values = [item['properties'][field] for item in clip_src]
# Check if there are multiple same values
unique_field_values = list(set(field_values))
unique_field_values.sort()
if len(field_values) != len(unique_field_values):
click.echo(click.style("WARNING: {} contains multiple same values. Only one instance is retained.".format(field),
fg='orange'))
n_field_values = len(unique_field_values)
click.echo(click.style('Clipping {} into {} parts'.format(infile, n_field_values),
fg='green'))
i = 1
for field_value in unique_field_values:
click.echo(click.style('[{}/{}] Processing {} '.format(i, n_field_values, field_value),
fg='green'))
i += 1
@click.command()
@click.option('-v', '--verbose', is_flag=True)
@click.argument('infile', nargs=1, type=click.Path(exists=True))
@click.argument('clipfile', nargs=1, type=click.Path(exists=True))
@click.option('-f', '--field')
@click.argument('outfile', nargs=1)
def cli(infile, clipfile, field, outfile, verbose):
""" Command-line interface."""
if field == "":
click.echo(click.style('ERROR: field name must be provided!',
fg='red'))
sys.exit(-1)
success = cookie_cut(infile, clipfile, field, outfile, verbose=verbose)
if success:
click.echo(click.style('Done!', fg='green'))
else:
click.echo(click.style('Clipping failed', fg='red'))
if __name__ == '__main__':
cli()
|
<commit_before><commit_msg>Add script for cookie cutting<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Utility functions for spatial processing."""
import click
import fiona
import logging
import numpy as np
import numpy.ma as ma
import os
import rasterio
import sys
def cookie_cut(infile, clipfile, field, outfile, verbose=False):
""
# Read in the clipfile and get all the values in the field
with fiona.open(clipfile, 'r') as clip_src:
# Check that the provided field exists
field_names = list(clip_src.meta['schema']['properties'].keys())
if field not in field_names:
raise ValueError("Field name {} not found".format(field))
# Get the field values.
field_values = [item['properties'][field] for item in clip_src]
# Check if there are multiple same values
unique_field_values = list(set(field_values))
unique_field_values.sort()
if len(field_values) != len(unique_field_values):
click.echo(click.style("WARNING: {} contains multiple same values. Only one instance is retained.".format(field),
fg='orange'))
n_field_values = len(unique_field_values)
click.echo(click.style('Clipping {} into {} parts'.format(infile, n_field_values),
fg='green'))
i = 1
for field_value in unique_field_values:
click.echo(click.style('[{}/{}] Processing {} '.format(i, n_field_values, field_value),
fg='green'))
i += 1
@click.command()
@click.option('-v', '--verbose', is_flag=True)
@click.argument('infile', nargs=1, type=click.Path(exists=True))
@click.argument('clipfile', nargs=1, type=click.Path(exists=True))
@click.option('-f', '--field')
@click.argument('outfile', nargs=1)
def cli(infile, clipfile, field, outfile, verbose):
""" Command-line interface."""
if field == "":
click.echo(click.style('ERROR: field name must be provided!',
fg='red'))
sys.exit(-1)
success = cookie_cut(infile, clipfile, field, outfile, verbose=verbose)
if success:
click.echo(click.style('Done!', fg='green'))
else:
click.echo(click.style('Clipping failed', fg='red'))
if __name__ == '__main__':
cli()
|
|
87aecd71db9ec9872374f6f6807cd3ee43b95a83
|
examples/translations/portuguese_test_1.py
|
examples/translations/portuguese_test_1.py
|
# Portuguese Language Test - Python 3 Only!
from seleniumbase.translate.portuguese import CasoDeTeste
class MinhaClasseDeTeste(CasoDeTeste):
def test_exemplo_1(self):
self.abrir_url("https://pt.wikipedia.org/wiki/")
self.verificar_texto("Wikipédia")
self.verificar_elemento('[title="Visitar a página principal"]')
self.atualizar_texto("#searchInput", "Rio de Janeiro")
self.clique("#searchButton")
self.verificar_texto("Rio de Janeiro", "#firstHeading")
self.verificar_elemento('img[alt*="edifícios"]')
self.atualizar_texto("#searchInput", "São Paulo")
self.clique("#searchButton")
self.verificar_texto("São Paulo", "#firstHeading")
self.verificar_elemento('img[src*="Monumento"]')
self.voltar()
self.verificar_verdade("Janeiro" in self.obter_url_atual())
self.avançar() # noqa
self.verificar_verdade("Paulo" in self.obter_url_atual())
|
Add an example test in Portuguese
|
Add an example test in Portuguese
|
Python
|
mit
|
seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase
|
Add an example test in Portuguese
|
# Portuguese Language Test - Python 3 Only!
from seleniumbase.translate.portuguese import CasoDeTeste
class MinhaClasseDeTeste(CasoDeTeste):
def test_exemplo_1(self):
self.abrir_url("https://pt.wikipedia.org/wiki/")
self.verificar_texto("Wikipédia")
self.verificar_elemento('[title="Visitar a página principal"]')
self.atualizar_texto("#searchInput", "Rio de Janeiro")
self.clique("#searchButton")
self.verificar_texto("Rio de Janeiro", "#firstHeading")
self.verificar_elemento('img[alt*="edifícios"]')
self.atualizar_texto("#searchInput", "São Paulo")
self.clique("#searchButton")
self.verificar_texto("São Paulo", "#firstHeading")
self.verificar_elemento('img[src*="Monumento"]')
self.voltar()
self.verificar_verdade("Janeiro" in self.obter_url_atual())
self.avançar() # noqa
self.verificar_verdade("Paulo" in self.obter_url_atual())
|
<commit_before><commit_msg>Add an example test in Portuguese<commit_after>
|
# Portuguese Language Test - Python 3 Only!
from seleniumbase.translate.portuguese import CasoDeTeste
class MinhaClasseDeTeste(CasoDeTeste):
def test_exemplo_1(self):
self.abrir_url("https://pt.wikipedia.org/wiki/")
self.verificar_texto("Wikipédia")
self.verificar_elemento('[title="Visitar a página principal"]')
self.atualizar_texto("#searchInput", "Rio de Janeiro")
self.clique("#searchButton")
self.verificar_texto("Rio de Janeiro", "#firstHeading")
self.verificar_elemento('img[alt*="edifícios"]')
self.atualizar_texto("#searchInput", "São Paulo")
self.clique("#searchButton")
self.verificar_texto("São Paulo", "#firstHeading")
self.verificar_elemento('img[src*="Monumento"]')
self.voltar()
self.verificar_verdade("Janeiro" in self.obter_url_atual())
self.avançar() # noqa
self.verificar_verdade("Paulo" in self.obter_url_atual())
|
Add an example test in Portuguese# Portuguese Language Test - Python 3 Only!
from seleniumbase.translate.portuguese import CasoDeTeste
class MinhaClasseDeTeste(CasoDeTeste):
def test_exemplo_1(self):
self.abrir_url("https://pt.wikipedia.org/wiki/")
self.verificar_texto("Wikipédia")
self.verificar_elemento('[title="Visitar a página principal"]')
self.atualizar_texto("#searchInput", "Rio de Janeiro")
self.clique("#searchButton")
self.verificar_texto("Rio de Janeiro", "#firstHeading")
self.verificar_elemento('img[alt*="edifícios"]')
self.atualizar_texto("#searchInput", "São Paulo")
self.clique("#searchButton")
self.verificar_texto("São Paulo", "#firstHeading")
self.verificar_elemento('img[src*="Monumento"]')
self.voltar()
self.verificar_verdade("Janeiro" in self.obter_url_atual())
self.avançar() # noqa
self.verificar_verdade("Paulo" in self.obter_url_atual())
|
<commit_before><commit_msg>Add an example test in Portuguese<commit_after># Portuguese Language Test - Python 3 Only!
from seleniumbase.translate.portuguese import CasoDeTeste
class MinhaClasseDeTeste(CasoDeTeste):
def test_exemplo_1(self):
self.abrir_url("https://pt.wikipedia.org/wiki/")
self.verificar_texto("Wikipédia")
self.verificar_elemento('[title="Visitar a página principal"]')
self.atualizar_texto("#searchInput", "Rio de Janeiro")
self.clique("#searchButton")
self.verificar_texto("Rio de Janeiro", "#firstHeading")
self.verificar_elemento('img[alt*="edifícios"]')
self.atualizar_texto("#searchInput", "São Paulo")
self.clique("#searchButton")
self.verificar_texto("São Paulo", "#firstHeading")
self.verificar_elemento('img[src*="Monumento"]')
self.voltar()
self.verificar_verdade("Janeiro" in self.obter_url_atual())
self.avançar() # noqa
self.verificar_verdade("Paulo" in self.obter_url_atual())
|
|
b26bb24d09691189d2d079a052c99a2c29bf51d5
|
myDevices/test/systeminfo_test.py
|
myDevices/test/systeminfo_test.py
|
import unittest
from myDevices.os.systeminfo import SystemInfo
from myDevices.utils.logger import setInfo, info
class SystemInfoTest(unittest.TestCase):
def setUp(self):
setInfo()
system_info = SystemInfo()
self.info = system_info.getSystemInformation()
def testCpuInfo(self):
cpu_info = self.info['Cpu']
info(cpu_info)
self.assertEqual(set(cpu_info.keys()), set(('loadavg', 'usage', 'temperature')))
self.assertEqual(set(cpu_info['loadavg'].keys()), set(('one', 'five', 'ten')))
self.assertGreaterEqual(set(cpu_info['usage'].keys()), set(('user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'total')))
# def testCpuLoadInfo(self):
# cpu_load_info = self.info['CpuLoad']
# info(cpu_load_info)
# self.assertGreaterEqual(set(cpu_load_info.keys()), set(('cpu',)))
# def testMemoryInfo(self):
# memory_info = self.info['Memory']
# info(memory_info)
# self.assertEqual(set(memory_info.keys()), set(('total', 'free', 'used', 'buffers', 'cached', 'processes', 'swap')))
# self.assertEqual(set(memory_info['swap'].keys()), set(('total', 'free', 'used')))
# def testUptimeInfo(self):
# uptime_info = self.info['Uptime']
# info(uptime_info)
# self.assertEqual(set(uptime_info.keys()), set(('uptime',)))
# def testStorageInfo(self):
# storage_info = self.info['Storage']
# info(storage_info)
# self.assertEqual(set(storage_info.keys()), set(('list',)))
# for item in storage_info['list']:
# self.assertLessEqual(set(('device', 'filesystem', 'mount')), set(item.keys()))
# def testNetworkInfo(self):
# network_info = self.info['Network']
# info(network_info)
# self.assertGreaterEqual(set(network_info.keys()), set(('list',)))
# for key, value in network_info['list'].items():
# self.assertTrue(value)
# self.assertLessEqual(set(value.keys()), set(('ip', 'ipv6', 'mac')))
# self.assertIsNotNone(network_info['list']['eth0']['ip']['address'])
if __name__ == '__main__':
unittest.main()
|
Add system info test script.
|
Add system info test script.
|
Python
|
mit
|
myDevicesIoT/Cayenne-Agent,myDevicesIoT/Cayenne-Agent
|
Add system info test script.
|
import unittest
from myDevices.os.systeminfo import SystemInfo
from myDevices.utils.logger import setInfo, info
class SystemInfoTest(unittest.TestCase):
def setUp(self):
setInfo()
system_info = SystemInfo()
self.info = system_info.getSystemInformation()
def testCpuInfo(self):
cpu_info = self.info['Cpu']
info(cpu_info)
self.assertEqual(set(cpu_info.keys()), set(('loadavg', 'usage', 'temperature')))
self.assertEqual(set(cpu_info['loadavg'].keys()), set(('one', 'five', 'ten')))
self.assertGreaterEqual(set(cpu_info['usage'].keys()), set(('user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'total')))
# def testCpuLoadInfo(self):
# cpu_load_info = self.info['CpuLoad']
# info(cpu_load_info)
# self.assertGreaterEqual(set(cpu_load_info.keys()), set(('cpu',)))
# def testMemoryInfo(self):
# memory_info = self.info['Memory']
# info(memory_info)
# self.assertEqual(set(memory_info.keys()), set(('total', 'free', 'used', 'buffers', 'cached', 'processes', 'swap')))
# self.assertEqual(set(memory_info['swap'].keys()), set(('total', 'free', 'used')))
# def testUptimeInfo(self):
# uptime_info = self.info['Uptime']
# info(uptime_info)
# self.assertEqual(set(uptime_info.keys()), set(('uptime',)))
# def testStorageInfo(self):
# storage_info = self.info['Storage']
# info(storage_info)
# self.assertEqual(set(storage_info.keys()), set(('list',)))
# for item in storage_info['list']:
# self.assertLessEqual(set(('device', 'filesystem', 'mount')), set(item.keys()))
# def testNetworkInfo(self):
# network_info = self.info['Network']
# info(network_info)
# self.assertGreaterEqual(set(network_info.keys()), set(('list',)))
# for key, value in network_info['list'].items():
# self.assertTrue(value)
# self.assertLessEqual(set(value.keys()), set(('ip', 'ipv6', 'mac')))
# self.assertIsNotNone(network_info['list']['eth0']['ip']['address'])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add system info test script.<commit_after>
|
import unittest
from myDevices.os.systeminfo import SystemInfo
from myDevices.utils.logger import setInfo, info
class SystemInfoTest(unittest.TestCase):
def setUp(self):
setInfo()
system_info = SystemInfo()
self.info = system_info.getSystemInformation()
def testCpuInfo(self):
cpu_info = self.info['Cpu']
info(cpu_info)
self.assertEqual(set(cpu_info.keys()), set(('loadavg', 'usage', 'temperature')))
self.assertEqual(set(cpu_info['loadavg'].keys()), set(('one', 'five', 'ten')))
self.assertGreaterEqual(set(cpu_info['usage'].keys()), set(('user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'total')))
# def testCpuLoadInfo(self):
# cpu_load_info = self.info['CpuLoad']
# info(cpu_load_info)
# self.assertGreaterEqual(set(cpu_load_info.keys()), set(('cpu',)))
# def testMemoryInfo(self):
# memory_info = self.info['Memory']
# info(memory_info)
# self.assertEqual(set(memory_info.keys()), set(('total', 'free', 'used', 'buffers', 'cached', 'processes', 'swap')))
# self.assertEqual(set(memory_info['swap'].keys()), set(('total', 'free', 'used')))
# def testUptimeInfo(self):
# uptime_info = self.info['Uptime']
# info(uptime_info)
# self.assertEqual(set(uptime_info.keys()), set(('uptime',)))
# def testStorageInfo(self):
# storage_info = self.info['Storage']
# info(storage_info)
# self.assertEqual(set(storage_info.keys()), set(('list',)))
# for item in storage_info['list']:
# self.assertLessEqual(set(('device', 'filesystem', 'mount')), set(item.keys()))
# def testNetworkInfo(self):
# network_info = self.info['Network']
# info(network_info)
# self.assertGreaterEqual(set(network_info.keys()), set(('list',)))
# for key, value in network_info['list'].items():
# self.assertTrue(value)
# self.assertLessEqual(set(value.keys()), set(('ip', 'ipv6', 'mac')))
# self.assertIsNotNone(network_info['list']['eth0']['ip']['address'])
if __name__ == '__main__':
unittest.main()
|
Add system info test script.import unittest
from myDevices.os.systeminfo import SystemInfo
from myDevices.utils.logger import setInfo, info
class SystemInfoTest(unittest.TestCase):
def setUp(self):
setInfo()
system_info = SystemInfo()
self.info = system_info.getSystemInformation()
def testCpuInfo(self):
cpu_info = self.info['Cpu']
info(cpu_info)
self.assertEqual(set(cpu_info.keys()), set(('loadavg', 'usage', 'temperature')))
self.assertEqual(set(cpu_info['loadavg'].keys()), set(('one', 'five', 'ten')))
self.assertGreaterEqual(set(cpu_info['usage'].keys()), set(('user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'total')))
# def testCpuLoadInfo(self):
# cpu_load_info = self.info['CpuLoad']
# info(cpu_load_info)
# self.assertGreaterEqual(set(cpu_load_info.keys()), set(('cpu',)))
# def testMemoryInfo(self):
# memory_info = self.info['Memory']
# info(memory_info)
# self.assertEqual(set(memory_info.keys()), set(('total', 'free', 'used', 'buffers', 'cached', 'processes', 'swap')))
# self.assertEqual(set(memory_info['swap'].keys()), set(('total', 'free', 'used')))
# def testUptimeInfo(self):
# uptime_info = self.info['Uptime']
# info(uptime_info)
# self.assertEqual(set(uptime_info.keys()), set(('uptime',)))
# def testStorageInfo(self):
# storage_info = self.info['Storage']
# info(storage_info)
# self.assertEqual(set(storage_info.keys()), set(('list',)))
# for item in storage_info['list']:
# self.assertLessEqual(set(('device', 'filesystem', 'mount')), set(item.keys()))
# def testNetworkInfo(self):
# network_info = self.info['Network']
# info(network_info)
# self.assertGreaterEqual(set(network_info.keys()), set(('list',)))
# for key, value in network_info['list'].items():
# self.assertTrue(value)
# self.assertLessEqual(set(value.keys()), set(('ip', 'ipv6', 'mac')))
# self.assertIsNotNone(network_info['list']['eth0']['ip']['address'])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add system info test script.<commit_after>import unittest
from myDevices.os.systeminfo import SystemInfo
from myDevices.utils.logger import setInfo, info
class SystemInfoTest(unittest.TestCase):
def setUp(self):
setInfo()
system_info = SystemInfo()
self.info = system_info.getSystemInformation()
def testCpuInfo(self):
cpu_info = self.info['Cpu']
info(cpu_info)
self.assertEqual(set(cpu_info.keys()), set(('loadavg', 'usage', 'temperature')))
self.assertEqual(set(cpu_info['loadavg'].keys()), set(('one', 'five', 'ten')))
self.assertGreaterEqual(set(cpu_info['usage'].keys()), set(('user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq', 'total')))
# def testCpuLoadInfo(self):
# cpu_load_info = self.info['CpuLoad']
# info(cpu_load_info)
# self.assertGreaterEqual(set(cpu_load_info.keys()), set(('cpu',)))
# def testMemoryInfo(self):
# memory_info = self.info['Memory']
# info(memory_info)
# self.assertEqual(set(memory_info.keys()), set(('total', 'free', 'used', 'buffers', 'cached', 'processes', 'swap')))
# self.assertEqual(set(memory_info['swap'].keys()), set(('total', 'free', 'used')))
# def testUptimeInfo(self):
# uptime_info = self.info['Uptime']
# info(uptime_info)
# self.assertEqual(set(uptime_info.keys()), set(('uptime',)))
# def testStorageInfo(self):
# storage_info = self.info['Storage']
# info(storage_info)
# self.assertEqual(set(storage_info.keys()), set(('list',)))
# for item in storage_info['list']:
# self.assertLessEqual(set(('device', 'filesystem', 'mount')), set(item.keys()))
# def testNetworkInfo(self):
# network_info = self.info['Network']
# info(network_info)
# self.assertGreaterEqual(set(network_info.keys()), set(('list',)))
# for key, value in network_info['list'].items():
# self.assertTrue(value)
# self.assertLessEqual(set(value.keys()), set(('ip', 'ipv6', 'mac')))
# self.assertIsNotNone(network_info['list']['eth0']['ip']['address'])
if __name__ == '__main__':
unittest.main()
|
|
984fa4639d73132799589963fe33ed2f213ca9ed
|
numba/tests/test_nopython_math.py
|
numba/tests/test_nopython_math.py
|
import math
import numpy as np
import unittest
#import logging; logging.getLogger().setLevel(1)
from numba import *
def test_exp(a):
return math.exp(a)
def test_sqrt(a):
return math.sqrt(a)
def test_log(a):
return math.log(a)
class TestNoPythonMath(unittest.TestCase):
def test_sqrt(self):
self._template(test_sqrt, np.sqrt)
def test_exp(self):
self._template(test_exp, np.exp)
def test_log(self):
self._template(test_log, np.log)
def _template(self, func, npfunc):
func_jitted = jit(argtypes=[f4], restype=f4, nopython=True)(func)
A = np.array(np.random.random(10), dtype=np.float32)
B = np.vectorize(func_jitted)(A)
self.assertTrue(np.allclose(B, npfunc(A)))
if __name__ == '__main__':
unittest.main()
|
Add test for math functions in nopython context.
|
Add test for math functions in nopython context.
|
Python
|
bsd-2-clause
|
numba/numba,pombredanne/numba,IntelLabs/numba,stonebig/numba,pombredanne/numba,gdementen/numba,IntelLabs/numba,cpcloud/numba,seibert/numba,jriehl/numba,stonebig/numba,GaZ3ll3/numba,stuartarchibald/numba,shiquanwang/numba,gmarkall/numba,ssarangi/numba,numba/numba,gmarkall/numba,stefanseefeld/numba,IntelLabs/numba,jriehl/numba,cpcloud/numba,pitrou/numba,jriehl/numba,GaZ3ll3/numba,pitrou/numba,cpcloud/numba,stuartarchibald/numba,pombredanne/numba,stefanseefeld/numba,sklam/numba,gdementen/numba,ssarangi/numba,gmarkall/numba,pombredanne/numba,stefanseefeld/numba,pitrou/numba,jriehl/numba,ssarangi/numba,stuartarchibald/numba,GaZ3ll3/numba,seibert/numba,stuartarchibald/numba,ssarangi/numba,GaZ3ll3/numba,jriehl/numba,sklam/numba,ssarangi/numba,cpcloud/numba,seibert/numba,stonebig/numba,stefanseefeld/numba,GaZ3ll3/numba,numba/numba,numba/numba,stonebig/numba,gmarkall/numba,seibert/numba,stuartarchibald/numba,shiquanwang/numba,shiquanwang/numba,sklam/numba,seibert/numba,cpcloud/numba,sklam/numba,pitrou/numba,pombredanne/numba,gdementen/numba,gdementen/numba,IntelLabs/numba,sklam/numba,stonebig/numba,numba/numba,gdementen/numba,IntelLabs/numba,gmarkall/numba,stefanseefeld/numba,pitrou/numba
|
Add test for math functions in nopython context.
|
import math
import numpy as np
import unittest
#import logging; logging.getLogger().setLevel(1)
from numba import *
def test_exp(a):
return math.exp(a)
def test_sqrt(a):
return math.sqrt(a)
def test_log(a):
return math.log(a)
class TestNoPythonMath(unittest.TestCase):
def test_sqrt(self):
self._template(test_sqrt, np.sqrt)
def test_exp(self):
self._template(test_exp, np.exp)
def test_log(self):
self._template(test_log, np.log)
def _template(self, func, npfunc):
func_jitted = jit(argtypes=[f4], restype=f4, nopython=True)(func)
A = np.array(np.random.random(10), dtype=np.float32)
B = np.vectorize(func_jitted)(A)
self.assertTrue(np.allclose(B, npfunc(A)))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for math functions in nopython context.<commit_after>
|
import math
import numpy as np
import unittest
#import logging; logging.getLogger().setLevel(1)
from numba import *
def test_exp(a):
return math.exp(a)
def test_sqrt(a):
return math.sqrt(a)
def test_log(a):
return math.log(a)
class TestNoPythonMath(unittest.TestCase):
def test_sqrt(self):
self._template(test_sqrt, np.sqrt)
def test_exp(self):
self._template(test_exp, np.exp)
def test_log(self):
self._template(test_log, np.log)
def _template(self, func, npfunc):
func_jitted = jit(argtypes=[f4], restype=f4, nopython=True)(func)
A = np.array(np.random.random(10), dtype=np.float32)
B = np.vectorize(func_jitted)(A)
self.assertTrue(np.allclose(B, npfunc(A)))
if __name__ == '__main__':
unittest.main()
|
Add test for math functions in nopython context.import math
import numpy as np
import unittest
#import logging; logging.getLogger().setLevel(1)
from numba import *
def test_exp(a):
return math.exp(a)
def test_sqrt(a):
return math.sqrt(a)
def test_log(a):
return math.log(a)
class TestNoPythonMath(unittest.TestCase):
def test_sqrt(self):
self._template(test_sqrt, np.sqrt)
def test_exp(self):
self._template(test_exp, np.exp)
def test_log(self):
self._template(test_log, np.log)
def _template(self, func, npfunc):
func_jitted = jit(argtypes=[f4], restype=f4, nopython=True)(func)
A = np.array(np.random.random(10), dtype=np.float32)
B = np.vectorize(func_jitted)(A)
self.assertTrue(np.allclose(B, npfunc(A)))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for math functions in nopython context.<commit_after>import math
import numpy as np
import unittest
#import logging; logging.getLogger().setLevel(1)
from numba import *
def test_exp(a):
return math.exp(a)
def test_sqrt(a):
return math.sqrt(a)
def test_log(a):
return math.log(a)
class TestNoPythonMath(unittest.TestCase):
def test_sqrt(self):
self._template(test_sqrt, np.sqrt)
def test_exp(self):
self._template(test_exp, np.exp)
def test_log(self):
self._template(test_log, np.log)
def _template(self, func, npfunc):
func_jitted = jit(argtypes=[f4], restype=f4, nopython=True)(func)
A = np.array(np.random.random(10), dtype=np.float32)
B = np.vectorize(func_jitted)(A)
self.assertTrue(np.allclose(B, npfunc(A)))
if __name__ == '__main__':
unittest.main()
|
|
59a0255babaef772ad72e36c688e72b3385b0db4
|
test/pointfree_test.py
|
test/pointfree_test.py
|
import os, sys, unittest
from pointfree import *
class CurryingTest(unittest.TestCase):
def testCurrying1(self):
@curryable
def add(a, b):
return a + b
@curryable
def mult(a, b):
return a * b
add1 = add(1)
mult2 = mult(2)
self.assertEqual(add1(3), 4)
self.assertEqual(mult2(4), 8)
if __name__ == '__main__':
unittest.main()
|
Add simple unit test module
|
Add simple unit test module
|
Python
|
apache-2.0
|
markshroyer/pointfree,markshroyer/pointfree
|
Add simple unit test module
|
import os, sys, unittest
from pointfree import *
class CurryingTest(unittest.TestCase):
def testCurrying1(self):
@curryable
def add(a, b):
return a + b
@curryable
def mult(a, b):
return a * b
add1 = add(1)
mult2 = mult(2)
self.assertEqual(add1(3), 4)
self.assertEqual(mult2(4), 8)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add simple unit test module<commit_after>
|
import os, sys, unittest
from pointfree import *
class CurryingTest(unittest.TestCase):
def testCurrying1(self):
@curryable
def add(a, b):
return a + b
@curryable
def mult(a, b):
return a * b
add1 = add(1)
mult2 = mult(2)
self.assertEqual(add1(3), 4)
self.assertEqual(mult2(4), 8)
if __name__ == '__main__':
unittest.main()
|
Add simple unit test moduleimport os, sys, unittest
from pointfree import *
class CurryingTest(unittest.TestCase):
def testCurrying1(self):
@curryable
def add(a, b):
return a + b
@curryable
def mult(a, b):
return a * b
add1 = add(1)
mult2 = mult(2)
self.assertEqual(add1(3), 4)
self.assertEqual(mult2(4), 8)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add simple unit test module<commit_after>import os, sys, unittest
from pointfree import *
class CurryingTest(unittest.TestCase):
def testCurrying1(self):
@curryable
def add(a, b):
return a + b
@curryable
def mult(a, b):
return a * b
add1 = add(1)
mult2 = mult(2)
self.assertEqual(add1(3), 4)
self.assertEqual(mult2(4), 8)
if __name__ == '__main__':
unittest.main()
|
|
64d4e5939fbfa325b57149a73c4bf12c54b6f2bf
|
tests/maptools_test.py
|
tests/maptools_test.py
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import unittest as ut
import stripeline.maptools as mt
import numpy as np
class TestMaptools(ut.TestCase):
def test_condmatr(self):
matr = np.zeros((2, 9), dtype='float64', order='F')
mt.update_condmatr(numpix=2,
pixidx=[0, 0, 0],
angle=np.array([0.0, 1. / 8., 1. / 4.]) * np.pi,
m=matr)
mt.update_condmatr(numpix=2,
pixidx=[1, 1, 1],
angle=np.array([0.0, 1. / 12., 1. / 8.]) * np.pi,
m=matr)
expected = np.array([
[3., 1.7071067811865475, 1.7071067811865475,
1.7071067811865475, 1.5, 0.5,
1.7071067811865475, 0.5, 1.5],
[3., 2.5731321849709863, 1.2071067811865475,
2.5731321849709863, 2.25, 0.9330127018922193,
1.2071067811865475, 0.9330127018922193, 0.75]
])
self.assertTrue(np.allclose(expected, matr))
|
Add a test for update_condmatr
|
Add a test for update_condmatr
|
Python
|
mit
|
ziotom78/stripeline,ziotom78/stripeline
|
Add a test for update_condmatr
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import unittest as ut
import stripeline.maptools as mt
import numpy as np
class TestMaptools(ut.TestCase):
def test_condmatr(self):
matr = np.zeros((2, 9), dtype='float64', order='F')
mt.update_condmatr(numpix=2,
pixidx=[0, 0, 0],
angle=np.array([0.0, 1. / 8., 1. / 4.]) * np.pi,
m=matr)
mt.update_condmatr(numpix=2,
pixidx=[1, 1, 1],
angle=np.array([0.0, 1. / 12., 1. / 8.]) * np.pi,
m=matr)
expected = np.array([
[3., 1.7071067811865475, 1.7071067811865475,
1.7071067811865475, 1.5, 0.5,
1.7071067811865475, 0.5, 1.5],
[3., 2.5731321849709863, 1.2071067811865475,
2.5731321849709863, 2.25, 0.9330127018922193,
1.2071067811865475, 0.9330127018922193, 0.75]
])
self.assertTrue(np.allclose(expected, matr))
|
<commit_before><commit_msg>Add a test for update_condmatr<commit_after>
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import unittest as ut
import stripeline.maptools as mt
import numpy as np
class TestMaptools(ut.TestCase):
def test_condmatr(self):
matr = np.zeros((2, 9), dtype='float64', order='F')
mt.update_condmatr(numpix=2,
pixidx=[0, 0, 0],
angle=np.array([0.0, 1. / 8., 1. / 4.]) * np.pi,
m=matr)
mt.update_condmatr(numpix=2,
pixidx=[1, 1, 1],
angle=np.array([0.0, 1. / 12., 1. / 8.]) * np.pi,
m=matr)
expected = np.array([
[3., 1.7071067811865475, 1.7071067811865475,
1.7071067811865475, 1.5, 0.5,
1.7071067811865475, 0.5, 1.5],
[3., 2.5731321849709863, 1.2071067811865475,
2.5731321849709863, 2.25, 0.9330127018922193,
1.2071067811865475, 0.9330127018922193, 0.75]
])
self.assertTrue(np.allclose(expected, matr))
|
Add a test for update_condmatr#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import unittest as ut
import stripeline.maptools as mt
import numpy as np
class TestMaptools(ut.TestCase):
def test_condmatr(self):
matr = np.zeros((2, 9), dtype='float64', order='F')
mt.update_condmatr(numpix=2,
pixidx=[0, 0, 0],
angle=np.array([0.0, 1. / 8., 1. / 4.]) * np.pi,
m=matr)
mt.update_condmatr(numpix=2,
pixidx=[1, 1, 1],
angle=np.array([0.0, 1. / 12., 1. / 8.]) * np.pi,
m=matr)
expected = np.array([
[3., 1.7071067811865475, 1.7071067811865475,
1.7071067811865475, 1.5, 0.5,
1.7071067811865475, 0.5, 1.5],
[3., 2.5731321849709863, 1.2071067811865475,
2.5731321849709863, 2.25, 0.9330127018922193,
1.2071067811865475, 0.9330127018922193, 0.75]
])
self.assertTrue(np.allclose(expected, matr))
|
<commit_before><commit_msg>Add a test for update_condmatr<commit_after>#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import unittest as ut
import stripeline.maptools as mt
import numpy as np
class TestMaptools(ut.TestCase):
def test_condmatr(self):
matr = np.zeros((2, 9), dtype='float64', order='F')
mt.update_condmatr(numpix=2,
pixidx=[0, 0, 0],
angle=np.array([0.0, 1. / 8., 1. / 4.]) * np.pi,
m=matr)
mt.update_condmatr(numpix=2,
pixidx=[1, 1, 1],
angle=np.array([0.0, 1. / 12., 1. / 8.]) * np.pi,
m=matr)
expected = np.array([
[3., 1.7071067811865475, 1.7071067811865475,
1.7071067811865475, 1.5, 0.5,
1.7071067811865475, 0.5, 1.5],
[3., 2.5731321849709863, 1.2071067811865475,
2.5731321849709863, 2.25, 0.9330127018922193,
1.2071067811865475, 0.9330127018922193, 0.75]
])
self.assertTrue(np.allclose(expected, matr))
|
|
94cc15125fa4d8990de50b7c3c39effe6a5e5d93
|
tools/dev/wc-format.py
|
tools/dev/wc-format.py
|
#!/usr/bin/env python
import os
import sqlite3
import sys
# helper
def usage():
sys.stderr.write("USAGE: %s [PATH]\n" + \
"\n" + \
"Prints to stdout the format of the working copy at PATH.\n")
# parse argv
wc = (sys.argv[1:] + ['.'])[0]
# main()
entries = os.path.join(wc, '.svn', 'entries')
wc_db = os.path.join(wc, '.svn', 'wc.db')
if os.path.exists(entries):
formatno = int(open(entries).readline())
elif os.path.exists(wc_db):
formatno = sqlite3.connect(wc_db).execute('pragma user_version;').fetchone()[0]
else:
usage()
sys.exit(1)
# 1.0.x -> 1.3.x: format 4
# 1.4.x: format 8
# 1.5.x: format 9
# 1.6.x: format 10
# 1.7.x: format XXX
print("%s: %d" % (wc, formatno))
|
Add a helper script, ported to Python.
|
Add a helper script, ported to Python.
* tools/dev/wc-format.py: New.
Prints the working copy format of a given directory.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@995260 13f79535-47bb-0310-9956-ffa450edef68
|
Python
|
apache-2.0
|
YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion,YueLinHo/Subversion,wbond/subversion,YueLinHo/Subversion,wbond/subversion,wbond/subversion,wbond/subversion,wbond/subversion,YueLinHo/Subversion,wbond/subversion,YueLinHo/Subversion,wbond/subversion
|
Add a helper script, ported to Python.
* tools/dev/wc-format.py: New.
Prints the working copy format of a given directory.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@995260 13f79535-47bb-0310-9956-ffa450edef68
|
#!/usr/bin/env python
import os
import sqlite3
import sys
# helper
def usage():
sys.stderr.write("USAGE: %s [PATH]\n" + \
"\n" + \
"Prints to stdout the format of the working copy at PATH.\n")
# parse argv
wc = (sys.argv[1:] + ['.'])[0]
# main()
entries = os.path.join(wc, '.svn', 'entries')
wc_db = os.path.join(wc, '.svn', 'wc.db')
if os.path.exists(entries):
formatno = int(open(entries).readline())
elif os.path.exists(wc_db):
formatno = sqlite3.connect(wc_db).execute('pragma user_version;').fetchone()[0]
else:
usage()
sys.exit(1)
# 1.0.x -> 1.3.x: format 4
# 1.4.x: format 8
# 1.5.x: format 9
# 1.6.x: format 10
# 1.7.x: format XXX
print("%s: %d" % (wc, formatno))
|
<commit_before><commit_msg>Add a helper script, ported to Python.
* tools/dev/wc-format.py: New.
Prints the working copy format of a given directory.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@995260 13f79535-47bb-0310-9956-ffa450edef68<commit_after>
|
#!/usr/bin/env python
import os
import sqlite3
import sys
# helper
def usage():
sys.stderr.write("USAGE: %s [PATH]\n" + \
"\n" + \
"Prints to stdout the format of the working copy at PATH.\n")
# parse argv
wc = (sys.argv[1:] + ['.'])[0]
# main()
entries = os.path.join(wc, '.svn', 'entries')
wc_db = os.path.join(wc, '.svn', 'wc.db')
if os.path.exists(entries):
formatno = int(open(entries).readline())
elif os.path.exists(wc_db):
formatno = sqlite3.connect(wc_db).execute('pragma user_version;').fetchone()[0]
else:
usage()
sys.exit(1)
# 1.0.x -> 1.3.x: format 4
# 1.4.x: format 8
# 1.5.x: format 9
# 1.6.x: format 10
# 1.7.x: format XXX
print("%s: %d" % (wc, formatno))
|
Add a helper script, ported to Python.
* tools/dev/wc-format.py: New.
Prints the working copy format of a given directory.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@995260 13f79535-47bb-0310-9956-ffa450edef68#!/usr/bin/env python
import os
import sqlite3
import sys
# helper
def usage():
sys.stderr.write("USAGE: %s [PATH]\n" + \
"\n" + \
"Prints to stdout the format of the working copy at PATH.\n")
# parse argv
wc = (sys.argv[1:] + ['.'])[0]
# main()
entries = os.path.join(wc, '.svn', 'entries')
wc_db = os.path.join(wc, '.svn', 'wc.db')
if os.path.exists(entries):
formatno = int(open(entries).readline())
elif os.path.exists(wc_db):
formatno = sqlite3.connect(wc_db).execute('pragma user_version;').fetchone()[0]
else:
usage()
sys.exit(1)
# 1.0.x -> 1.3.x: format 4
# 1.4.x: format 8
# 1.5.x: format 9
# 1.6.x: format 10
# 1.7.x: format XXX
print("%s: %d" % (wc, formatno))
|
<commit_before><commit_msg>Add a helper script, ported to Python.
* tools/dev/wc-format.py: New.
Prints the working copy format of a given directory.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@995260 13f79535-47bb-0310-9956-ffa450edef68<commit_after>#!/usr/bin/env python
import os
import sqlite3
import sys
# helper
def usage():
sys.stderr.write("USAGE: %s [PATH]\n" + \
"\n" + \
"Prints to stdout the format of the working copy at PATH.\n")
# parse argv
wc = (sys.argv[1:] + ['.'])[0]
# main()
entries = os.path.join(wc, '.svn', 'entries')
wc_db = os.path.join(wc, '.svn', 'wc.db')
if os.path.exists(entries):
formatno = int(open(entries).readline())
elif os.path.exists(wc_db):
formatno = sqlite3.connect(wc_db).execute('pragma user_version;').fetchone()[0]
else:
usage()
sys.exit(1)
# 1.0.x -> 1.3.x: format 4
# 1.4.x: format 8
# 1.5.x: format 9
# 1.6.x: format 10
# 1.7.x: format XXX
print("%s: %d" % (wc, formatno))
|
|
c9b3178a9ac222d0536b0500452ae6a56672fba7
|
documents/tests/document_test.py
|
documents/tests/document_test.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from documents.models import Document, Page
from users.models import User
import pytest
import mock
from documents.models import process_document
pytestmark = pytest.mark.django_db
def create_doc(name):
user = User.objects.create(netid='test_user')
doc = Document.objects.create(name=name, user=user)
return doc
def add_pages(doc):
for i in range(5):
doc.page_set.add(Page(numero=i))
def test_repr():
doc = create_doc("Coucou")
assert repr(doc).decode('utf-8') == '<Document: Coucou>'
def test_repr_with_accents():
doc = create_doc("Lés accênts c'est cool")
assert repr(doc).decode('utf-8') == "<Document: Lés accênts c'est cool>"
@mock.patch.object(Document, 'add_to_queue')
def test_reprocess_done(mock_add_to_queue):
doc = create_doc("Coucou")
doc.state = "DONE"
add_pages(doc)
with pytest.raises(Exception):
doc.reprocess()
assert mock_add_to_queue.called == 0
assert doc.page_set.count() != 0
@mock.patch.object(Document, 'add_to_queue')
def test_reprocess(mock_add_to_queue):
doc = create_doc("Coucou")
doc.state = 'ERROR'
add_pages(doc)
doc.reprocess()
assert mock_add_to_queue.called == 1
assert doc.page_set.count() == 0
@mock.patch.object(process_document, 'delay')
def test_add_to_queue(mock_process_document):
doc = create_doc("Coucou")
doc.state = 'ANYTHING'
doc.add_to_queue()
assert doc.state == "IN_QUEUE"
assert mock_process_document.called == 1
|
Add tests for documents models
|
Add tests for documents models
|
Python
|
agpl-3.0
|
UrLab/beta402,UrLab/DocHub,UrLab/beta402,UrLab/DocHub,UrLab/beta402,UrLab/DocHub,UrLab/DocHub
|
Add tests for documents models
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from documents.models import Document, Page
from users.models import User
import pytest
import mock
from documents.models import process_document
pytestmark = pytest.mark.django_db
def create_doc(name):
user = User.objects.create(netid='test_user')
doc = Document.objects.create(name=name, user=user)
return doc
def add_pages(doc):
for i in range(5):
doc.page_set.add(Page(numero=i))
def test_repr():
doc = create_doc("Coucou")
assert repr(doc).decode('utf-8') == '<Document: Coucou>'
def test_repr_with_accents():
doc = create_doc("Lés accênts c'est cool")
assert repr(doc).decode('utf-8') == "<Document: Lés accênts c'est cool>"
@mock.patch.object(Document, 'add_to_queue')
def test_reprocess_done(mock_add_to_queue):
doc = create_doc("Coucou")
doc.state = "DONE"
add_pages(doc)
with pytest.raises(Exception):
doc.reprocess()
assert mock_add_to_queue.called == 0
assert doc.page_set.count() != 0
@mock.patch.object(Document, 'add_to_queue')
def test_reprocess(mock_add_to_queue):
doc = create_doc("Coucou")
doc.state = 'ERROR'
add_pages(doc)
doc.reprocess()
assert mock_add_to_queue.called == 1
assert doc.page_set.count() == 0
@mock.patch.object(process_document, 'delay')
def test_add_to_queue(mock_process_document):
doc = create_doc("Coucou")
doc.state = 'ANYTHING'
doc.add_to_queue()
assert doc.state == "IN_QUEUE"
assert mock_process_document.called == 1
|
<commit_before><commit_msg>Add tests for documents models<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from documents.models import Document, Page
from users.models import User
import pytest
import mock
from documents.models import process_document
pytestmark = pytest.mark.django_db
def create_doc(name):
user = User.objects.create(netid='test_user')
doc = Document.objects.create(name=name, user=user)
return doc
def add_pages(doc):
for i in range(5):
doc.page_set.add(Page(numero=i))
def test_repr():
doc = create_doc("Coucou")
assert repr(doc).decode('utf-8') == '<Document: Coucou>'
def test_repr_with_accents():
doc = create_doc("Lés accênts c'est cool")
assert repr(doc).decode('utf-8') == "<Document: Lés accênts c'est cool>"
@mock.patch.object(Document, 'add_to_queue')
def test_reprocess_done(mock_add_to_queue):
doc = create_doc("Coucou")
doc.state = "DONE"
add_pages(doc)
with pytest.raises(Exception):
doc.reprocess()
assert mock_add_to_queue.called == 0
assert doc.page_set.count() != 0
@mock.patch.object(Document, 'add_to_queue')
def test_reprocess(mock_add_to_queue):
doc = create_doc("Coucou")
doc.state = 'ERROR'
add_pages(doc)
doc.reprocess()
assert mock_add_to_queue.called == 1
assert doc.page_set.count() == 0
@mock.patch.object(process_document, 'delay')
def test_add_to_queue(mock_process_document):
doc = create_doc("Coucou")
doc.state = 'ANYTHING'
doc.add_to_queue()
assert doc.state == "IN_QUEUE"
assert mock_process_document.called == 1
|
Add tests for documents models# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from documents.models import Document, Page
from users.models import User
import pytest
import mock
from documents.models import process_document
pytestmark = pytest.mark.django_db
def create_doc(name):
user = User.objects.create(netid='test_user')
doc = Document.objects.create(name=name, user=user)
return doc
def add_pages(doc):
for i in range(5):
doc.page_set.add(Page(numero=i))
def test_repr():
doc = create_doc("Coucou")
assert repr(doc).decode('utf-8') == '<Document: Coucou>'
def test_repr_with_accents():
doc = create_doc("Lés accênts c'est cool")
assert repr(doc).decode('utf-8') == "<Document: Lés accênts c'est cool>"
@mock.patch.object(Document, 'add_to_queue')
def test_reprocess_done(mock_add_to_queue):
doc = create_doc("Coucou")
doc.state = "DONE"
add_pages(doc)
with pytest.raises(Exception):
doc.reprocess()
assert mock_add_to_queue.called == 0
assert doc.page_set.count() != 0
@mock.patch.object(Document, 'add_to_queue')
def test_reprocess(mock_add_to_queue):
doc = create_doc("Coucou")
doc.state = 'ERROR'
add_pages(doc)
doc.reprocess()
assert mock_add_to_queue.called == 1
assert doc.page_set.count() == 0
@mock.patch.object(process_document, 'delay')
def test_add_to_queue(mock_process_document):
doc = create_doc("Coucou")
doc.state = 'ANYTHING'
doc.add_to_queue()
assert doc.state == "IN_QUEUE"
assert mock_process_document.called == 1
|
<commit_before><commit_msg>Add tests for documents models<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from documents.models import Document, Page
from users.models import User
import pytest
import mock
from documents.models import process_document
pytestmark = pytest.mark.django_db
def create_doc(name):
user = User.objects.create(netid='test_user')
doc = Document.objects.create(name=name, user=user)
return doc
def add_pages(doc):
for i in range(5):
doc.page_set.add(Page(numero=i))
def test_repr():
doc = create_doc("Coucou")
assert repr(doc).decode('utf-8') == '<Document: Coucou>'
def test_repr_with_accents():
doc = create_doc("Lés accênts c'est cool")
assert repr(doc).decode('utf-8') == "<Document: Lés accênts c'est cool>"
@mock.patch.object(Document, 'add_to_queue')
def test_reprocess_done(mock_add_to_queue):
doc = create_doc("Coucou")
doc.state = "DONE"
add_pages(doc)
with pytest.raises(Exception):
doc.reprocess()
assert mock_add_to_queue.called == 0
assert doc.page_set.count() != 0
@mock.patch.object(Document, 'add_to_queue')
def test_reprocess(mock_add_to_queue):
doc = create_doc("Coucou")
doc.state = 'ERROR'
add_pages(doc)
doc.reprocess()
assert mock_add_to_queue.called == 1
assert doc.page_set.count() == 0
@mock.patch.object(process_document, 'delay')
def test_add_to_queue(mock_process_document):
doc = create_doc("Coucou")
doc.state = 'ANYTHING'
doc.add_to_queue()
assert doc.state == "IN_QUEUE"
assert mock_process_document.called == 1
|
|
aa06ca147741d8f931acaf95ffe482b4abb11ac4
|
tests/base/io.py
|
tests/base/io.py
|
import steel
import io
import unittest
class SeekIO(io.BytesIO):
"""
A variation of BytesIO that keeps track of all .seek() activity.
This can test whether files are accessed as efficiently as possible.
"""
def __init__(self, *args, **kwargs):
super(SeekIO, self).__init__(*args, **kwargs)
self.seeks = []
def seek(self, offset, *args, **kwargs):
# Log it for later
self.seeks.append(offset)
# *args and **kwargs probably aren't necessary,
# but it's a good idea anyway, just in case.
return super(SeekIO, self).seek(offset, *args, **kwargs)
class SeekTests(unittest.TestCase):
def setUp(self):
self.data = b'abc'
class Test(steel.Structure):
a = steel.Bytes(size=1) # offset 0
b = steel.Bytes(size=1) # offset 1
c = steel.Bytes(size=1) # offset 2
self.Structure = Test
def test_sequential_access(self):
file = SeekIO(self.data)
obj = self.Structure(file)
self.assertEqual(obj.a, b'a')
self.assertEqual(obj.b, b'b')
self.assertEqual(obj.c, b'c')
self.assertEqual(file.seeks, [0, 1, 2])
def test_random_access(self):
file = SeekIO(self.data)
obj = self.Structure(file)
self.assertEqual(obj.b, b'b')
self.assertEqual(obj.c, b'c')
self.assertEqual(obj.a, b'a')
self.assertEqual(file.seeks, [1, 2, 0])
|
Add some tests to make sure we're seeking efficiently
|
Add some tests to make sure we're seeking efficiently
|
Python
|
bsd-3-clause
|
gulopine/steel-experiment
|
Add some tests to make sure we're seeking efficiently
|
import steel
import io
import unittest
class SeekIO(io.BytesIO):
"""
A variation of BytesIO that keeps track of all .seek() activity.
This can test whether files are accessed as efficiently as possible.
"""
def __init__(self, *args, **kwargs):
super(SeekIO, self).__init__(*args, **kwargs)
self.seeks = []
def seek(self, offset, *args, **kwargs):
# Log it for later
self.seeks.append(offset)
# *args and **kwargs probably aren't necessary,
# but it's a good idea anyway, just in case.
return super(SeekIO, self).seek(offset, *args, **kwargs)
class SeekTests(unittest.TestCase):
def setUp(self):
self.data = b'abc'
class Test(steel.Structure):
a = steel.Bytes(size=1) # offset 0
b = steel.Bytes(size=1) # offset 1
c = steel.Bytes(size=1) # offset 2
self.Structure = Test
def test_sequential_access(self):
file = SeekIO(self.data)
obj = self.Structure(file)
self.assertEqual(obj.a, b'a')
self.assertEqual(obj.b, b'b')
self.assertEqual(obj.c, b'c')
self.assertEqual(file.seeks, [0, 1, 2])
def test_random_access(self):
file = SeekIO(self.data)
obj = self.Structure(file)
self.assertEqual(obj.b, b'b')
self.assertEqual(obj.c, b'c')
self.assertEqual(obj.a, b'a')
self.assertEqual(file.seeks, [1, 2, 0])
|
<commit_before><commit_msg>Add some tests to make sure we're seeking efficiently<commit_after>
|
import steel
import io
import unittest
class SeekIO(io.BytesIO):
"""
A variation of BytesIO that keeps track of all .seek() activity.
This can test whether files are accessed as efficiently as possible.
"""
def __init__(self, *args, **kwargs):
super(SeekIO, self).__init__(*args, **kwargs)
self.seeks = []
def seek(self, offset, *args, **kwargs):
# Log it for later
self.seeks.append(offset)
# *args and **kwargs probably aren't necessary,
# but it's a good idea anyway, just in case.
return super(SeekIO, self).seek(offset, *args, **kwargs)
class SeekTests(unittest.TestCase):
def setUp(self):
self.data = b'abc'
class Test(steel.Structure):
a = steel.Bytes(size=1) # offset 0
b = steel.Bytes(size=1) # offset 1
c = steel.Bytes(size=1) # offset 2
self.Structure = Test
def test_sequential_access(self):
file = SeekIO(self.data)
obj = self.Structure(file)
self.assertEqual(obj.a, b'a')
self.assertEqual(obj.b, b'b')
self.assertEqual(obj.c, b'c')
self.assertEqual(file.seeks, [0, 1, 2])
def test_random_access(self):
file = SeekIO(self.data)
obj = self.Structure(file)
self.assertEqual(obj.b, b'b')
self.assertEqual(obj.c, b'c')
self.assertEqual(obj.a, b'a')
self.assertEqual(file.seeks, [1, 2, 0])
|
Add some tests to make sure we're seeking efficientlyimport steel
import io
import unittest
class SeekIO(io.BytesIO):
"""
A variation of BytesIO that keeps track of all .seek() activity.
This can test whether files are accessed as efficiently as possible.
"""
def __init__(self, *args, **kwargs):
super(SeekIO, self).__init__(*args, **kwargs)
self.seeks = []
def seek(self, offset, *args, **kwargs):
# Log it for later
self.seeks.append(offset)
# *args and **kwargs probably aren't necessary,
# but it's a good idea anyway, just in case.
return super(SeekIO, self).seek(offset, *args, **kwargs)
class SeekTests(unittest.TestCase):
def setUp(self):
self.data = b'abc'
class Test(steel.Structure):
a = steel.Bytes(size=1) # offset 0
b = steel.Bytes(size=1) # offset 1
c = steel.Bytes(size=1) # offset 2
self.Structure = Test
def test_sequential_access(self):
file = SeekIO(self.data)
obj = self.Structure(file)
self.assertEqual(obj.a, b'a')
self.assertEqual(obj.b, b'b')
self.assertEqual(obj.c, b'c')
self.assertEqual(file.seeks, [0, 1, 2])
def test_random_access(self):
file = SeekIO(self.data)
obj = self.Structure(file)
self.assertEqual(obj.b, b'b')
self.assertEqual(obj.c, b'c')
self.assertEqual(obj.a, b'a')
self.assertEqual(file.seeks, [1, 2, 0])
|
<commit_before><commit_msg>Add some tests to make sure we're seeking efficiently<commit_after>import steel
import io
import unittest
class SeekIO(io.BytesIO):
"""
A variation of BytesIO that keeps track of all .seek() activity.
This can test whether files are accessed as efficiently as possible.
"""
def __init__(self, *args, **kwargs):
super(SeekIO, self).__init__(*args, **kwargs)
self.seeks = []
def seek(self, offset, *args, **kwargs):
# Log it for later
self.seeks.append(offset)
# *args and **kwargs probably aren't necessary,
# but it's a good idea anyway, just in case.
return super(SeekIO, self).seek(offset, *args, **kwargs)
class SeekTests(unittest.TestCase):
def setUp(self):
self.data = b'abc'
class Test(steel.Structure):
a = steel.Bytes(size=1) # offset 0
b = steel.Bytes(size=1) # offset 1
c = steel.Bytes(size=1) # offset 2
self.Structure = Test
def test_sequential_access(self):
file = SeekIO(self.data)
obj = self.Structure(file)
self.assertEqual(obj.a, b'a')
self.assertEqual(obj.b, b'b')
self.assertEqual(obj.c, b'c')
self.assertEqual(file.seeks, [0, 1, 2])
def test_random_access(self):
file = SeekIO(self.data)
obj = self.Structure(file)
self.assertEqual(obj.b, b'b')
self.assertEqual(obj.c, b'c')
self.assertEqual(obj.a, b'a')
self.assertEqual(file.seeks, [1, 2, 0])
|
|
30c25249f7c76dd20422a73832fd8f26ee3a9f1d
|
tests/test_bip32_vector.py
|
tests/test_bip32_vector.py
|
import json
from unittest import TestCase
from bitmerchant.network import BitcoinMainNet
from bitmerchant.wallet import Wallet
class TestBIP32(TestCase):
def _test_wallet(self, wallet, data):
self.assertEqual(
wallet.serialize_b58(private=True), data['private_key'])
self.assertEqual(
wallet.serialize_b58(private=False), data['public_key'])
self.assertEqual(wallet.export_to_wif(), data['wif'])
self.assertEqual(wallet.chain_code, data['chain_code'])
fingerprint = data['fingerprint']
if not fingerprint.startswith(b'0x'):
fingerprint = b'0x' + fingerprint
self.assertEqual(wallet.fingerprint, fingerprint)
self.assertEqual(wallet.depth, data['depth'])
self.assertEqual(
wallet.private_key._private_key.privkey.secret_multiplier,
data['secret_exponent'])
def test_bip32(self):
with open("tests/bip32_test_vector.json", 'r') as f:
vectors = json.loads(f.read())
for wallet_data in vectors:
wallet = Wallet.deserialize(
wallet_data['private_key'], network=BitcoinMainNet)
self._test_wallet(wallet, wallet_data)
for child_data in wallet_data['children']:
child = wallet.get_child_for_path(child_data['path'])
self._test_wallet(child, child_data['child'])
|
Add test for the new bip32 test vectors
|
Add test for the new bip32 test vectors
|
Python
|
mit
|
sbuss/bitmerchant,mflaxman/bitmerchant
|
Add test for the new bip32 test vectors
|
import json
from unittest import TestCase
from bitmerchant.network import BitcoinMainNet
from bitmerchant.wallet import Wallet
class TestBIP32(TestCase):
def _test_wallet(self, wallet, data):
self.assertEqual(
wallet.serialize_b58(private=True), data['private_key'])
self.assertEqual(
wallet.serialize_b58(private=False), data['public_key'])
self.assertEqual(wallet.export_to_wif(), data['wif'])
self.assertEqual(wallet.chain_code, data['chain_code'])
fingerprint = data['fingerprint']
if not fingerprint.startswith(b'0x'):
fingerprint = b'0x' + fingerprint
self.assertEqual(wallet.fingerprint, fingerprint)
self.assertEqual(wallet.depth, data['depth'])
self.assertEqual(
wallet.private_key._private_key.privkey.secret_multiplier,
data['secret_exponent'])
def test_bip32(self):
with open("tests/bip32_test_vector.json", 'r') as f:
vectors = json.loads(f.read())
for wallet_data in vectors:
wallet = Wallet.deserialize(
wallet_data['private_key'], network=BitcoinMainNet)
self._test_wallet(wallet, wallet_data)
for child_data in wallet_data['children']:
child = wallet.get_child_for_path(child_data['path'])
self._test_wallet(child, child_data['child'])
|
<commit_before><commit_msg>Add test for the new bip32 test vectors<commit_after>
|
import json
from unittest import TestCase
from bitmerchant.network import BitcoinMainNet
from bitmerchant.wallet import Wallet
class TestBIP32(TestCase):
def _test_wallet(self, wallet, data):
self.assertEqual(
wallet.serialize_b58(private=True), data['private_key'])
self.assertEqual(
wallet.serialize_b58(private=False), data['public_key'])
self.assertEqual(wallet.export_to_wif(), data['wif'])
self.assertEqual(wallet.chain_code, data['chain_code'])
fingerprint = data['fingerprint']
if not fingerprint.startswith(b'0x'):
fingerprint = b'0x' + fingerprint
self.assertEqual(wallet.fingerprint, fingerprint)
self.assertEqual(wallet.depth, data['depth'])
self.assertEqual(
wallet.private_key._private_key.privkey.secret_multiplier,
data['secret_exponent'])
def test_bip32(self):
with open("tests/bip32_test_vector.json", 'r') as f:
vectors = json.loads(f.read())
for wallet_data in vectors:
wallet = Wallet.deserialize(
wallet_data['private_key'], network=BitcoinMainNet)
self._test_wallet(wallet, wallet_data)
for child_data in wallet_data['children']:
child = wallet.get_child_for_path(child_data['path'])
self._test_wallet(child, child_data['child'])
|
Add test for the new bip32 test vectorsimport json
from unittest import TestCase
from bitmerchant.network import BitcoinMainNet
from bitmerchant.wallet import Wallet
class TestBIP32(TestCase):
def _test_wallet(self, wallet, data):
self.assertEqual(
wallet.serialize_b58(private=True), data['private_key'])
self.assertEqual(
wallet.serialize_b58(private=False), data['public_key'])
self.assertEqual(wallet.export_to_wif(), data['wif'])
self.assertEqual(wallet.chain_code, data['chain_code'])
fingerprint = data['fingerprint']
if not fingerprint.startswith(b'0x'):
fingerprint = b'0x' + fingerprint
self.assertEqual(wallet.fingerprint, fingerprint)
self.assertEqual(wallet.depth, data['depth'])
self.assertEqual(
wallet.private_key._private_key.privkey.secret_multiplier,
data['secret_exponent'])
def test_bip32(self):
with open("tests/bip32_test_vector.json", 'r') as f:
vectors = json.loads(f.read())
for wallet_data in vectors:
wallet = Wallet.deserialize(
wallet_data['private_key'], network=BitcoinMainNet)
self._test_wallet(wallet, wallet_data)
for child_data in wallet_data['children']:
child = wallet.get_child_for_path(child_data['path'])
self._test_wallet(child, child_data['child'])
|
<commit_before><commit_msg>Add test for the new bip32 test vectors<commit_after>import json
from unittest import TestCase
from bitmerchant.network import BitcoinMainNet
from bitmerchant.wallet import Wallet
class TestBIP32(TestCase):
def _test_wallet(self, wallet, data):
self.assertEqual(
wallet.serialize_b58(private=True), data['private_key'])
self.assertEqual(
wallet.serialize_b58(private=False), data['public_key'])
self.assertEqual(wallet.export_to_wif(), data['wif'])
self.assertEqual(wallet.chain_code, data['chain_code'])
fingerprint = data['fingerprint']
if not fingerprint.startswith(b'0x'):
fingerprint = b'0x' + fingerprint
self.assertEqual(wallet.fingerprint, fingerprint)
self.assertEqual(wallet.depth, data['depth'])
self.assertEqual(
wallet.private_key._private_key.privkey.secret_multiplier,
data['secret_exponent'])
def test_bip32(self):
with open("tests/bip32_test_vector.json", 'r') as f:
vectors = json.loads(f.read())
for wallet_data in vectors:
wallet = Wallet.deserialize(
wallet_data['private_key'], network=BitcoinMainNet)
self._test_wallet(wallet, wallet_data)
for child_data in wallet_data['children']:
child = wallet.get_child_for_path(child_data['path'])
self._test_wallet(child, child_data['child'])
|
|
197e3910f64fc34aec71c1788f4e944c33e05422
|
tests/test_dgim_quality.py
|
tests/test_dgim_quality.py
|
import unittest
import random
from dgim.dgim import Dgim
class ExactAlgorithm(object):
"""Exact algorithm to count the number of ones
in the last N elements of a stream."""
def __init__(self, N):
"""Constructor
:param N: size of the sliding window
:type N: int
"""
self.N = N
self.sliding_window = []
def update(self, elt):
"""Update the stream with one element.
The element can be either 0 or 1.
:param elt: the latest element of the stream
:type elt: int
"""
self.sliding_window.append(elt)
if len(self.sliding_window) > self.N:
self.sliding_window.pop(0)
def get_count(self):
"""Returns an estimate of the number of ones in the sliding window.
:returns: int
"""
return sum(self.sliding_window)
class TestDgimQuality(unittest.TestCase):
def generate_random_stream(self, length):
"""Generate a random stream of zeros and ones.
:param length: the stream length
:type length: int
:returns: iterator
"""
for i in range(length):
yield random.randint(0, 1)
def check_quality_settings(self, N, stream_length):
"""Compare the result "e" returned by dgim with the exact result
"c" on a random stream.
The test fails if the dgim result "e" is not in the expected bounds.
0.5 * c <= e <= 1.5 * c
:param N: sliding window length
:type N: int
:param stream_length: the length of the random stream
:type stream_length: int
"""
dgim = Dgim(N)
exact_algorithm = ExactAlgorithm(N)
for elt in self.generate_random_stream(stream_length):
dgim.update(elt)
exact_algorithm.update(elt)
self.assertLessEqual(0.5 * exact_algorithm.get_count(), dgim.get_count())
self.assertLessEqual(dgim.get_count(), 1.5 * exact_algorithm.get_count())
def test_nominal_case(self):
self.check_quality_settings(N=100, stream_length=1000)
def test_large_N(self):
self.check_quality_settings(N=10000, stream_length=20000)
def test_short_stream(self):
# stream is shorter than N
self.check_quality_settings(N=1000, stream_length=100)
def test_N_is_one(self):
self.check_quality_settings(N=1, stream_length=10)
def test_N_is_two(self):
self.check_quality_settings(N=2, stream_length=100)
|
Add tests to assess algorithm quality.
|
Add tests to assess algorithm quality.
To test the quality, implement an exact algorithm to count the number
of ones in the last N elements of a stream.
Then compare the dgim result with the exact result and check that the
dgim estimates is in the expected bounds.
|
Python
|
bsd-3-clause
|
simondolle/dgim,simondolle/dgim
|
Add tests to assess algorithm quality.
To test the quality, implement an exact algorithm to count the number
of ones in the last N elements of a stream.
Then compare the dgim result with the exact result and check that the
dgim estimates is in the expected bounds.
|
import unittest
import random
from dgim.dgim import Dgim
class ExactAlgorithm(object):
"""Exact algorithm to count the number of ones
in the last N elements of a stream."""
def __init__(self, N):
"""Constructor
:param N: size of the sliding window
:type N: int
"""
self.N = N
self.sliding_window = []
def update(self, elt):
"""Update the stream with one element.
The element can be either 0 or 1.
:param elt: the latest element of the stream
:type elt: int
"""
self.sliding_window.append(elt)
if len(self.sliding_window) > self.N:
self.sliding_window.pop(0)
def get_count(self):
"""Returns an estimate of the number of ones in the sliding window.
:returns: int
"""
return sum(self.sliding_window)
class TestDgimQuality(unittest.TestCase):
def generate_random_stream(self, length):
"""Generate a random stream of zeros and ones.
:param length: the stream length
:type length: int
:returns: iterator
"""
for i in range(length):
yield random.randint(0, 1)
def check_quality_settings(self, N, stream_length):
"""Compare the result "e" returned by dgim with the exact result
"c" on a random stream.
The test fails if the dgim result "e" is not in the expected bounds.
0.5 * c <= e <= 1.5 * c
:param N: sliding window length
:type N: int
:param stream_length: the length of the random stream
:type stream_length: int
"""
dgim = Dgim(N)
exact_algorithm = ExactAlgorithm(N)
for elt in self.generate_random_stream(stream_length):
dgim.update(elt)
exact_algorithm.update(elt)
self.assertLessEqual(0.5 * exact_algorithm.get_count(), dgim.get_count())
self.assertLessEqual(dgim.get_count(), 1.5 * exact_algorithm.get_count())
def test_nominal_case(self):
self.check_quality_settings(N=100, stream_length=1000)
def test_large_N(self):
self.check_quality_settings(N=10000, stream_length=20000)
def test_short_stream(self):
# stream is shorter than N
self.check_quality_settings(N=1000, stream_length=100)
def test_N_is_one(self):
self.check_quality_settings(N=1, stream_length=10)
def test_N_is_two(self):
self.check_quality_settings(N=2, stream_length=100)
|
<commit_before><commit_msg>Add tests to assess algorithm quality.
To test the quality, implement an exact algorithm to count the number
of ones in the last N elements of a stream.
Then compare the dgim result with the exact result and check that the
dgim estimates is in the expected bounds.<commit_after>
|
import unittest
import random
from dgim.dgim import Dgim
class ExactAlgorithm(object):
"""Exact algorithm to count the number of ones
in the last N elements of a stream."""
def __init__(self, N):
"""Constructor
:param N: size of the sliding window
:type N: int
"""
self.N = N
self.sliding_window = []
def update(self, elt):
"""Update the stream with one element.
The element can be either 0 or 1.
:param elt: the latest element of the stream
:type elt: int
"""
self.sliding_window.append(elt)
if len(self.sliding_window) > self.N:
self.sliding_window.pop(0)
def get_count(self):
"""Returns an estimate of the number of ones in the sliding window.
:returns: int
"""
return sum(self.sliding_window)
class TestDgimQuality(unittest.TestCase):
def generate_random_stream(self, length):
"""Generate a random stream of zeros and ones.
:param length: the stream length
:type length: int
:returns: iterator
"""
for i in range(length):
yield random.randint(0, 1)
def check_quality_settings(self, N, stream_length):
"""Compare the result "e" returned by dgim with the exact result
"c" on a random stream.
The test fails if the dgim result "e" is not in the expected bounds.
0.5 * c <= e <= 1.5 * c
:param N: sliding window length
:type N: int
:param stream_length: the length of the random stream
:type stream_length: int
"""
dgim = Dgim(N)
exact_algorithm = ExactAlgorithm(N)
for elt in self.generate_random_stream(stream_length):
dgim.update(elt)
exact_algorithm.update(elt)
self.assertLessEqual(0.5 * exact_algorithm.get_count(), dgim.get_count())
self.assertLessEqual(dgim.get_count(), 1.5 * exact_algorithm.get_count())
def test_nominal_case(self):
self.check_quality_settings(N=100, stream_length=1000)
def test_large_N(self):
self.check_quality_settings(N=10000, stream_length=20000)
def test_short_stream(self):
# stream is shorter than N
self.check_quality_settings(N=1000, stream_length=100)
def test_N_is_one(self):
self.check_quality_settings(N=1, stream_length=10)
def test_N_is_two(self):
self.check_quality_settings(N=2, stream_length=100)
|
Add tests to assess algorithm quality.
To test the quality, implement an exact algorithm to count the number
of ones in the last N elements of a stream.
Then compare the dgim result with the exact result and check that the
dgim estimates is in the expected bounds.import unittest
import random
from dgim.dgim import Dgim
class ExactAlgorithm(object):
"""Exact algorithm to count the number of ones
in the last N elements of a stream."""
def __init__(self, N):
"""Constructor
:param N: size of the sliding window
:type N: int
"""
self.N = N
self.sliding_window = []
def update(self, elt):
"""Update the stream with one element.
The element can be either 0 or 1.
:param elt: the latest element of the stream
:type elt: int
"""
self.sliding_window.append(elt)
if len(self.sliding_window) > self.N:
self.sliding_window.pop(0)
def get_count(self):
"""Returns an estimate of the number of ones in the sliding window.
:returns: int
"""
return sum(self.sliding_window)
class TestDgimQuality(unittest.TestCase):
def generate_random_stream(self, length):
"""Generate a random stream of zeros and ones.
:param length: the stream length
:type length: int
:returns: iterator
"""
for i in range(length):
yield random.randint(0, 1)
def check_quality_settings(self, N, stream_length):
"""Compare the result "e" returned by dgim with the exact result
"c" on a random stream.
The test fails if the dgim result "e" is not in the expected bounds.
0.5 * c <= e <= 1.5 * c
:param N: sliding window length
:type N: int
:param stream_length: the length of the random stream
:type stream_length: int
"""
dgim = Dgim(N)
exact_algorithm = ExactAlgorithm(N)
for elt in self.generate_random_stream(stream_length):
dgim.update(elt)
exact_algorithm.update(elt)
self.assertLessEqual(0.5 * exact_algorithm.get_count(), dgim.get_count())
self.assertLessEqual(dgim.get_count(), 1.5 * exact_algorithm.get_count())
def test_nominal_case(self):
self.check_quality_settings(N=100, stream_length=1000)
def test_large_N(self):
self.check_quality_settings(N=10000, stream_length=20000)
def test_short_stream(self):
# stream is shorter than N
self.check_quality_settings(N=1000, stream_length=100)
def test_N_is_one(self):
self.check_quality_settings(N=1, stream_length=10)
def test_N_is_two(self):
self.check_quality_settings(N=2, stream_length=100)
|
<commit_before><commit_msg>Add tests to assess algorithm quality.
To test the quality, implement an exact algorithm to count the number
of ones in the last N elements of a stream.
Then compare the dgim result with the exact result and check that the
dgim estimates is in the expected bounds.<commit_after>import unittest
import random
from dgim.dgim import Dgim
class ExactAlgorithm(object):
"""Exact algorithm to count the number of ones
in the last N elements of a stream."""
def __init__(self, N):
"""Constructor
:param N: size of the sliding window
:type N: int
"""
self.N = N
self.sliding_window = []
def update(self, elt):
"""Update the stream with one element.
The element can be either 0 or 1.
:param elt: the latest element of the stream
:type elt: int
"""
self.sliding_window.append(elt)
if len(self.sliding_window) > self.N:
self.sliding_window.pop(0)
def get_count(self):
"""Returns an estimate of the number of ones in the sliding window.
:returns: int
"""
return sum(self.sliding_window)
class TestDgimQuality(unittest.TestCase):
def generate_random_stream(self, length):
"""Generate a random stream of zeros and ones.
:param length: the stream length
:type length: int
:returns: iterator
"""
for i in range(length):
yield random.randint(0, 1)
def check_quality_settings(self, N, stream_length):
"""Compare the result "e" returned by dgim with the exact result
"c" on a random stream.
The test fails if the dgim result "e" is not in the expected bounds.
0.5 * c <= e <= 1.5 * c
:param N: sliding window length
:type N: int
:param stream_length: the length of the random stream
:type stream_length: int
"""
dgim = Dgim(N)
exact_algorithm = ExactAlgorithm(N)
for elt in self.generate_random_stream(stream_length):
dgim.update(elt)
exact_algorithm.update(elt)
self.assertLessEqual(0.5 * exact_algorithm.get_count(), dgim.get_count())
self.assertLessEqual(dgim.get_count(), 1.5 * exact_algorithm.get_count())
def test_nominal_case(self):
self.check_quality_settings(N=100, stream_length=1000)
def test_large_N(self):
self.check_quality_settings(N=10000, stream_length=20000)
def test_short_stream(self):
# stream is shorter than N
self.check_quality_settings(N=1000, stream_length=100)
def test_N_is_one(self):
self.check_quality_settings(N=1, stream_length=10)
def test_N_is_two(self):
self.check_quality_settings(N=2, stream_length=100)
|
|
40ae8895be108e14ec1673cfd88031f82b4cf6f4
|
tools/recover.py
|
tools/recover.py
|
#!/usr/bin/python
import sys
import re
import socket
import binascii
if len(sys.argv) < 2:
sys.exit("log file is not provided")
path = sys.argv[1]
p = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} DEBUG: \[([0-9a-fA-F]{8}): (\d+) < [\d.]+] HEX: ([0-9a-fA-F]+)")
ports = {}
messages = {}
for line in open(path):
if "HEX:" in line:
m = p.match(line)
if m:
session = m.group(1)
port = m.group(2)
message = m.group(3)
ports[session] = port
if session not in messages:
messages[session] = []
messages[session].append(message)
for session in ports:
port = ports[session]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", int(port)))
for message in messages[session]:
s.send(binascii.unhexlify(message))
s.close()
|
Implement a script to re-send data from log
|
Implement a script to re-send data from log
|
Python
|
apache-2.0
|
AnshulJain1985/Roadcast-Tracker,stalien/traccar_test,joseant/traccar-1,orcoliver/traccar,tananaev/traccar,jssenyange/traccar,ninioe/traccar,5of9/traccar,orcoliver/traccar,5of9/traccar,duke2906/traccar,tsmgeek/traccar,tananaev/traccar,ninioe/traccar,jon-stumpf/traccar,tsmgeek/traccar,tananaev/traccar,renaudallard/traccar,jssenyange/traccar,duke2906/traccar,orcoliver/traccar,tsmgeek/traccar,AnshulJain1985/Roadcast-Tracker,jon-stumpf/traccar,vipien/traccar,jon-stumpf/traccar,joseant/traccar-1,al3x1s/traccar,renaudallard/traccar,vipien/traccar,ninioe/traccar,jssenyange/traccar,al3x1s/traccar,stalien/traccar_test
|
Implement a script to re-send data from log
|
#!/usr/bin/python
import sys
import re
import socket
import binascii
if len(sys.argv) < 2:
sys.exit("log file is not provided")
path = sys.argv[1]
p = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} DEBUG: \[([0-9a-fA-F]{8}): (\d+) < [\d.]+] HEX: ([0-9a-fA-F]+)")
ports = {}
messages = {}
for line in open(path):
if "HEX:" in line:
m = p.match(line)
if m:
session = m.group(1)
port = m.group(2)
message = m.group(3)
ports[session] = port
if session not in messages:
messages[session] = []
messages[session].append(message)
for session in ports:
port = ports[session]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", int(port)))
for message in messages[session]:
s.send(binascii.unhexlify(message))
s.close()
|
<commit_before><commit_msg>Implement a script to re-send data from log<commit_after>
|
#!/usr/bin/python
import sys
import re
import socket
import binascii
if len(sys.argv) < 2:
sys.exit("log file is not provided")
path = sys.argv[1]
p = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} DEBUG: \[([0-9a-fA-F]{8}): (\d+) < [\d.]+] HEX: ([0-9a-fA-F]+)")
ports = {}
messages = {}
for line in open(path):
if "HEX:" in line:
m = p.match(line)
if m:
session = m.group(1)
port = m.group(2)
message = m.group(3)
ports[session] = port
if session not in messages:
messages[session] = []
messages[session].append(message)
for session in ports:
port = ports[session]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", int(port)))
for message in messages[session]:
s.send(binascii.unhexlify(message))
s.close()
|
Implement a script to re-send data from log#!/usr/bin/python
import sys
import re
import socket
import binascii
if len(sys.argv) < 2:
sys.exit("log file is not provided")
path = sys.argv[1]
p = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} DEBUG: \[([0-9a-fA-F]{8}): (\d+) < [\d.]+] HEX: ([0-9a-fA-F]+)")
ports = {}
messages = {}
for line in open(path):
if "HEX:" in line:
m = p.match(line)
if m:
session = m.group(1)
port = m.group(2)
message = m.group(3)
ports[session] = port
if session not in messages:
messages[session] = []
messages[session].append(message)
for session in ports:
port = ports[session]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", int(port)))
for message in messages[session]:
s.send(binascii.unhexlify(message))
s.close()
|
<commit_before><commit_msg>Implement a script to re-send data from log<commit_after>#!/usr/bin/python
import sys
import re
import socket
import binascii
if len(sys.argv) < 2:
sys.exit("log file is not provided")
path = sys.argv[1]
p = re.compile(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} DEBUG: \[([0-9a-fA-F]{8}): (\d+) < [\d.]+] HEX: ([0-9a-fA-F]+)")
ports = {}
messages = {}
for line in open(path):
if "HEX:" in line:
m = p.match(line)
if m:
session = m.group(1)
port = m.group(2)
message = m.group(3)
ports[session] = port
if session not in messages:
messages[session] = []
messages[session].append(message)
for session in ports:
port = ports[session]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", int(port)))
for message in messages[session]:
s.send(binascii.unhexlify(message))
s.close()
|
|
d0c65c221fd6e14b3ec6251ec3eb0b8ec424dced
|
flexget/plugins/urlrewrite_cinemageddon.py
|
flexget/plugins/urlrewrite_cinemageddon.py
|
from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
from flexget import plugin
from flexget.event import event
log = logging.getLogger('cinemageddon')
class UrlRewriteCinemageddon(object):
"""Cinemageddon urlrewriter."""
def url_rewritable(self, task, entry):
return entry['url'].startswith('http://cinemageddon.net/details.php?id=')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('details.php?id=', 'download.php?id=')
entry['url'] += '&name=%s.torrent' % (urllib.quote(entry['title'], safe=''))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteCinemageddon, 'cinemageddon', groups=['urlrewriter'], api_ver=2)
|
from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
from flexget import plugin
from flexget.event import event
log = logging.getLogger('cinemageddon')
class UrlRewriteCinemageddon(object):
"""Cinemageddon urlrewriter."""
def url_rewritable(self, task, entry):
return entry['url'].startswith('http://cinemageddon.net/details.php?id=')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('details.php?id=', 'download.php?id=')
entry['url'] += '&name=%s.torrent' % (urllib.quote(entry['title'], safe=''))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteCinemageddon, 'cinemageddon', groups=['urlrewriter'], api_ver=2)
|
Remove extra unneeded empty line.
|
Remove extra unneeded empty line.
|
Python
|
mit
|
drwyrm/Flexget,thalamus/Flexget,qk4l/Flexget,Danfocus/Flexget,OmgOhnoes/Flexget,jawilson/Flexget,poulpito/Flexget,ZefQ/Flexget,jawilson/Flexget,vfrc2/Flexget,JorisDeRieck/Flexget,jawilson/Flexget,Danfocus/Flexget,tobinjt/Flexget,LynxyssCZ/Flexget,cvium/Flexget,sean797/Flexget,tvcsantos/Flexget,oxc/Flexget,malkavi/Flexget,ibrahimkarahan/Flexget,offbyone/Flexget,dsemi/Flexget,offbyone/Flexget,gazpachoking/Flexget,patsissons/Flexget,spencerjanssen/Flexget,Pretagonist/Flexget,ianstalk/Flexget,tarzasai/Flexget,ibrahimkarahan/Flexget,dsemi/Flexget,lildadou/Flexget,cvium/Flexget,Flexget/Flexget,camon/Flexget,drwyrm/Flexget,ianstalk/Flexget,drwyrm/Flexget,JorisDeRieck/Flexget,sean797/Flexget,thalamus/Flexget,antivirtel/Flexget,ZefQ/Flexget,antivirtel/Flexget,Pretagonist/Flexget,jawilson/Flexget,lildadou/Flexget,tobinjt/Flexget,grrr2/Flexget,ibrahimkarahan/Flexget,qk4l/Flexget,crawln45/Flexget,camon/Flexget,malkavi/Flexget,qvazzler/Flexget,spencerjanssen/Flexget,grrr2/Flexget,xfouloux/Flexget,crawln45/Flexget,antivirtel/Flexget,Danfocus/Flexget,poulpito/Flexget,tarzasai/Flexget,thalamus/Flexget,OmgOhnoes/Flexget,LynxyssCZ/Flexget,ianstalk/Flexget,OmgOhnoes/Flexget,malkavi/Flexget,JorisDeRieck/Flexget,ZefQ/Flexget,crawln45/Flexget,ratoaq2/Flexget,patsissons/Flexget,xfouloux/Flexget,v17al/Flexget,oxc/Flexget,dsemi/Flexget,v17al/Flexget,sean797/Flexget,ratoaq2/Flexget,Danfocus/Flexget,spencerjanssen/Flexget,qvazzler/Flexget,Flexget/Flexget,patsissons/Flexget,LynxyssCZ/Flexget,tsnoam/Flexget,vfrc2/Flexget,qk4l/Flexget,crawln45/Flexget,vfrc2/Flexget,jacobmetrick/Flexget,cvium/Flexget,Flexget/Flexget,offbyone/Flexget,JorisDeRieck/Flexget,tarzasai/Flexget,grrr2/Flexget,xfouloux/Flexget,qvazzler/Flexget,v17al/Flexget,gazpachoking/Flexget,tsnoam/Flexget,tvcsantos/Flexget,malkavi/Flexget,jacobmetrick/Flexget,tsnoam/Flexget,tobinjt/Flexget,poulpito/Flexget,tobinjt/Flexget,Pretagonist/Flexget,ratoaq2/Flexget,Flexget/Flexget,lildadou/Flexget,LynxyssCZ/Flexget,oxc/Flexget,jacobmetrick/Flexget
|
from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
from flexget import plugin
from flexget.event import event
log = logging.getLogger('cinemageddon')
class UrlRewriteCinemageddon(object):
"""Cinemageddon urlrewriter."""
def url_rewritable(self, task, entry):
return entry['url'].startswith('http://cinemageddon.net/details.php?id=')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('details.php?id=', 'download.php?id=')
entry['url'] += '&name=%s.torrent' % (urllib.quote(entry['title'], safe=''))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteCinemageddon, 'cinemageddon', groups=['urlrewriter'], api_ver=2)Remove extra unneeded empty line.
|
from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
from flexget import plugin
from flexget.event import event
log = logging.getLogger('cinemageddon')
class UrlRewriteCinemageddon(object):
"""Cinemageddon urlrewriter."""
def url_rewritable(self, task, entry):
return entry['url'].startswith('http://cinemageddon.net/details.php?id=')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('details.php?id=', 'download.php?id=')
entry['url'] += '&name=%s.torrent' % (urllib.quote(entry['title'], safe=''))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteCinemageddon, 'cinemageddon', groups=['urlrewriter'], api_ver=2)
|
<commit_before>from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
from flexget import plugin
from flexget.event import event
log = logging.getLogger('cinemageddon')
class UrlRewriteCinemageddon(object):
"""Cinemageddon urlrewriter."""
def url_rewritable(self, task, entry):
return entry['url'].startswith('http://cinemageddon.net/details.php?id=')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('details.php?id=', 'download.php?id=')
entry['url'] += '&name=%s.torrent' % (urllib.quote(entry['title'], safe=''))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteCinemageddon, 'cinemageddon', groups=['urlrewriter'], api_ver=2)<commit_msg>Remove extra unneeded empty line.<commit_after>
|
from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
from flexget import plugin
from flexget.event import event
log = logging.getLogger('cinemageddon')
class UrlRewriteCinemageddon(object):
"""Cinemageddon urlrewriter."""
def url_rewritable(self, task, entry):
return entry['url'].startswith('http://cinemageddon.net/details.php?id=')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('details.php?id=', 'download.php?id=')
entry['url'] += '&name=%s.torrent' % (urllib.quote(entry['title'], safe=''))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteCinemageddon, 'cinemageddon', groups=['urlrewriter'], api_ver=2)
|
from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
from flexget import plugin
from flexget.event import event
log = logging.getLogger('cinemageddon')
class UrlRewriteCinemageddon(object):
"""Cinemageddon urlrewriter."""
def url_rewritable(self, task, entry):
return entry['url'].startswith('http://cinemageddon.net/details.php?id=')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('details.php?id=', 'download.php?id=')
entry['url'] += '&name=%s.torrent' % (urllib.quote(entry['title'], safe=''))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteCinemageddon, 'cinemageddon', groups=['urlrewriter'], api_ver=2)Remove extra unneeded empty line.from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
from flexget import plugin
from flexget.event import event
log = logging.getLogger('cinemageddon')
class UrlRewriteCinemageddon(object):
"""Cinemageddon urlrewriter."""
def url_rewritable(self, task, entry):
return entry['url'].startswith('http://cinemageddon.net/details.php?id=')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('details.php?id=', 'download.php?id=')
entry['url'] += '&name=%s.torrent' % (urllib.quote(entry['title'], safe=''))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteCinemageddon, 'cinemageddon', groups=['urlrewriter'], api_ver=2)
|
<commit_before>from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
from flexget import plugin
from flexget.event import event
log = logging.getLogger('cinemageddon')
class UrlRewriteCinemageddon(object):
"""Cinemageddon urlrewriter."""
def url_rewritable(self, task, entry):
return entry['url'].startswith('http://cinemageddon.net/details.php?id=')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('details.php?id=', 'download.php?id=')
entry['url'] += '&name=%s.torrent' % (urllib.quote(entry['title'], safe=''))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteCinemageddon, 'cinemageddon', groups=['urlrewriter'], api_ver=2)<commit_msg>Remove extra unneeded empty line.<commit_after>from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
from flexget import plugin
from flexget.event import event
log = logging.getLogger('cinemageddon')
class UrlRewriteCinemageddon(object):
"""Cinemageddon urlrewriter."""
def url_rewritable(self, task, entry):
return entry['url'].startswith('http://cinemageddon.net/details.php?id=')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('details.php?id=', 'download.php?id=')
entry['url'] += '&name=%s.torrent' % (urllib.quote(entry['title'], safe=''))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteCinemageddon, 'cinemageddon', groups=['urlrewriter'], api_ver=2)
|
0b07192262dc251344c11027b59751501fc6e4a9
|
iroha_files.py
|
iroha_files.py
|
# Script to create tar.gz package. This will replace automake based "make dist".
import os
PACKAGE="iroha"
VERSION="0.1.0"
ARCHIVE=PACKAGE + "-" + VERSION
EXTRA = ["configure.ac", "configure", "ltmain.sh", "depcomp", "Makefile.in", "config.sub", "lib/cxx-rt.h", "missing", "config.guess", "install-sh", "aclocal.m4", "Makefile.am", "compile", "m4/ltoptions.m4", "m4/ltversion.m4", "m4/m4_ax_check_compile_flag.m4", "m4/libtool.m4", "m4/ltsugar.m4", "m4/lt~obsolete.m4", "iroha_files.py"]
def GetGypFileList(gyp):
gypdir = os.path.dirname(gyp) + "/"
d = eval(open(gyp).read())
targets = d['targets']
files = []
for t in targets:
for s in t['sources']:
files.append(gypdir + s)
return files
def GetExtraFileList(base):
b = os.path.dirname(base) + "/"
files = []
for e in EXTRA:
files.append(b + e)
return files
def CopyFiles(archive, files):
os.system("mkdir " + archive)
pdir = archive + "/"
dirs = {}
for fn in files:
d = pdir + os.path.dirname(fn)
if not d in dirs:
dirs[d] = True
os.system("mkdir -p " + d)
os.system("cp -p " + fn + " " + pdir + fn)
def MakeTarBall(archive, files):
os.system("rm -rf " + archive)
CopyFiles(archive, files)
os.system("tar cvzf " + archive + ".tar.gz " + archive)
os.system("rm -rf " + archive)
if __name__ == '__main__':
files = GetGypFileList("src/iroha.gyp") + GetExtraFileList("./")
MakeTarBall(ARCHIVE, files)
|
Add script to collect files.
|
Add script to collect files.
|
Python
|
bsd-3-clause
|
nlsynth/iroha,nlsynth/iroha
|
Add script to collect files.
|
# Script to create tar.gz package. This will replace automake based "make dist".
import os
PACKAGE="iroha"
VERSION="0.1.0"
ARCHIVE=PACKAGE + "-" + VERSION
EXTRA = ["configure.ac", "configure", "ltmain.sh", "depcomp", "Makefile.in", "config.sub", "lib/cxx-rt.h", "missing", "config.guess", "install-sh", "aclocal.m4", "Makefile.am", "compile", "m4/ltoptions.m4", "m4/ltversion.m4", "m4/m4_ax_check_compile_flag.m4", "m4/libtool.m4", "m4/ltsugar.m4", "m4/lt~obsolete.m4", "iroha_files.py"]
def GetGypFileList(gyp):
gypdir = os.path.dirname(gyp) + "/"
d = eval(open(gyp).read())
targets = d['targets']
files = []
for t in targets:
for s in t['sources']:
files.append(gypdir + s)
return files
def GetExtraFileList(base):
b = os.path.dirname(base) + "/"
files = []
for e in EXTRA:
files.append(b + e)
return files
def CopyFiles(archive, files):
os.system("mkdir " + archive)
pdir = archive + "/"
dirs = {}
for fn in files:
d = pdir + os.path.dirname(fn)
if not d in dirs:
dirs[d] = True
os.system("mkdir -p " + d)
os.system("cp -p " + fn + " " + pdir + fn)
def MakeTarBall(archive, files):
os.system("rm -rf " + archive)
CopyFiles(archive, files)
os.system("tar cvzf " + archive + ".tar.gz " + archive)
os.system("rm -rf " + archive)
if __name__ == '__main__':
files = GetGypFileList("src/iroha.gyp") + GetExtraFileList("./")
MakeTarBall(ARCHIVE, files)
|
<commit_before><commit_msg>Add script to collect files.<commit_after>
|
# Script to create tar.gz package. This will replace automake based "make dist".
import os
PACKAGE="iroha"
VERSION="0.1.0"
ARCHIVE=PACKAGE + "-" + VERSION
EXTRA = ["configure.ac", "configure", "ltmain.sh", "depcomp", "Makefile.in", "config.sub", "lib/cxx-rt.h", "missing", "config.guess", "install-sh", "aclocal.m4", "Makefile.am", "compile", "m4/ltoptions.m4", "m4/ltversion.m4", "m4/m4_ax_check_compile_flag.m4", "m4/libtool.m4", "m4/ltsugar.m4", "m4/lt~obsolete.m4", "iroha_files.py"]
def GetGypFileList(gyp):
gypdir = os.path.dirname(gyp) + "/"
d = eval(open(gyp).read())
targets = d['targets']
files = []
for t in targets:
for s in t['sources']:
files.append(gypdir + s)
return files
def GetExtraFileList(base):
b = os.path.dirname(base) + "/"
files = []
for e in EXTRA:
files.append(b + e)
return files
def CopyFiles(archive, files):
os.system("mkdir " + archive)
pdir = archive + "/"
dirs = {}
for fn in files:
d = pdir + os.path.dirname(fn)
if not d in dirs:
dirs[d] = True
os.system("mkdir -p " + d)
os.system("cp -p " + fn + " " + pdir + fn)
def MakeTarBall(archive, files):
os.system("rm -rf " + archive)
CopyFiles(archive, files)
os.system("tar cvzf " + archive + ".tar.gz " + archive)
os.system("rm -rf " + archive)
if __name__ == '__main__':
files = GetGypFileList("src/iroha.gyp") + GetExtraFileList("./")
MakeTarBall(ARCHIVE, files)
|
Add script to collect files.# Script to create tar.gz package. This will replace automake based "make dist".
import os
PACKAGE="iroha"
VERSION="0.1.0"
ARCHIVE=PACKAGE + "-" + VERSION
EXTRA = ["configure.ac", "configure", "ltmain.sh", "depcomp", "Makefile.in", "config.sub", "lib/cxx-rt.h", "missing", "config.guess", "install-sh", "aclocal.m4", "Makefile.am", "compile", "m4/ltoptions.m4", "m4/ltversion.m4", "m4/m4_ax_check_compile_flag.m4", "m4/libtool.m4", "m4/ltsugar.m4", "m4/lt~obsolete.m4", "iroha_files.py"]
def GetGypFileList(gyp):
gypdir = os.path.dirname(gyp) + "/"
d = eval(open(gyp).read())
targets = d['targets']
files = []
for t in targets:
for s in t['sources']:
files.append(gypdir + s)
return files
def GetExtraFileList(base):
b = os.path.dirname(base) + "/"
files = []
for e in EXTRA:
files.append(b + e)
return files
def CopyFiles(archive, files):
os.system("mkdir " + archive)
pdir = archive + "/"
dirs = {}
for fn in files:
d = pdir + os.path.dirname(fn)
if not d in dirs:
dirs[d] = True
os.system("mkdir -p " + d)
os.system("cp -p " + fn + " " + pdir + fn)
def MakeTarBall(archive, files):
os.system("rm -rf " + archive)
CopyFiles(archive, files)
os.system("tar cvzf " + archive + ".tar.gz " + archive)
os.system("rm -rf " + archive)
if __name__ == '__main__':
files = GetGypFileList("src/iroha.gyp") + GetExtraFileList("./")
MakeTarBall(ARCHIVE, files)
|
<commit_before><commit_msg>Add script to collect files.<commit_after># Script to create tar.gz package. This will replace automake based "make dist".
import os
PACKAGE="iroha"
VERSION="0.1.0"
ARCHIVE=PACKAGE + "-" + VERSION
EXTRA = ["configure.ac", "configure", "ltmain.sh", "depcomp", "Makefile.in", "config.sub", "lib/cxx-rt.h", "missing", "config.guess", "install-sh", "aclocal.m4", "Makefile.am", "compile", "m4/ltoptions.m4", "m4/ltversion.m4", "m4/m4_ax_check_compile_flag.m4", "m4/libtool.m4", "m4/ltsugar.m4", "m4/lt~obsolete.m4", "iroha_files.py"]
def GetGypFileList(gyp):
gypdir = os.path.dirname(gyp) + "/"
d = eval(open(gyp).read())
targets = d['targets']
files = []
for t in targets:
for s in t['sources']:
files.append(gypdir + s)
return files
def GetExtraFileList(base):
b = os.path.dirname(base) + "/"
files = []
for e in EXTRA:
files.append(b + e)
return files
def CopyFiles(archive, files):
os.system("mkdir " + archive)
pdir = archive + "/"
dirs = {}
for fn in files:
d = pdir + os.path.dirname(fn)
if not d in dirs:
dirs[d] = True
os.system("mkdir -p " + d)
os.system("cp -p " + fn + " " + pdir + fn)
def MakeTarBall(archive, files):
os.system("rm -rf " + archive)
CopyFiles(archive, files)
os.system("tar cvzf " + archive + ".tar.gz " + archive)
os.system("rm -rf " + archive)
if __name__ == '__main__':
files = GetGypFileList("src/iroha.gyp") + GetExtraFileList("./")
MakeTarBall(ARCHIVE, files)
|
|
7872d42b99235a681e52f0e5bec83600b14345c5
|
svm_10folds_cv.py
|
svm_10folds_cv.py
|
import csv
from os.path import dirname, join
import time
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.svm import LinearSVC
from modules.cleaner import clean
categories = ['traffic', 'non_traffic']
count_vect = CountVectorizer(preprocessor=clean)
with open(join(dirname(__file__), 'result/generated_datasets/traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
traffic_tweets = [line[0] for line in dataset]
with open(join(dirname(__file__), 'result/generated_datasets/non_traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
non_traffic_tweets = [line[0] for line in dataset]
tweets = {
'data': traffic_tweets + non_traffic_tweets,
'target': [True] * len(traffic_tweets) + [False] * len(non_traffic_tweets),
}
training_vectors = count_vect.fit_transform(tweets['data'])
clf = LinearSVC(max_iter=10000)
cv = StratifiedKFold(n_splits=10, shuffle=True)
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv)
print(scores)
print("Accuracy: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='precision')
print(scores)
print("Precision: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='recall')
print(scores)
print("Recall: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='f1')
print(scores)
print("F1: {} (+/- {})".format(scores.mean(), scores.std() * 2))
|
Add k-folds CV with SVM
|
Add k-folds CV with SVM
|
Python
|
mit
|
dwiajik/twit-macet-mining-v3
|
Add k-folds CV with SVM
|
import csv
from os.path import dirname, join
import time
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.svm import LinearSVC
from modules.cleaner import clean
categories = ['traffic', 'non_traffic']
count_vect = CountVectorizer(preprocessor=clean)
with open(join(dirname(__file__), 'result/generated_datasets/traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
traffic_tweets = [line[0] for line in dataset]
with open(join(dirname(__file__), 'result/generated_datasets/non_traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
non_traffic_tweets = [line[0] for line in dataset]
tweets = {
'data': traffic_tweets + non_traffic_tweets,
'target': [True] * len(traffic_tweets) + [False] * len(non_traffic_tweets),
}
training_vectors = count_vect.fit_transform(tweets['data'])
clf = LinearSVC(max_iter=10000)
cv = StratifiedKFold(n_splits=10, shuffle=True)
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv)
print(scores)
print("Accuracy: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='precision')
print(scores)
print("Precision: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='recall')
print(scores)
print("Recall: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='f1')
print(scores)
print("F1: {} (+/- {})".format(scores.mean(), scores.std() * 2))
|
<commit_before><commit_msg>Add k-folds CV with SVM<commit_after>
|
import csv
from os.path import dirname, join
import time
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.svm import LinearSVC
from modules.cleaner import clean
categories = ['traffic', 'non_traffic']
count_vect = CountVectorizer(preprocessor=clean)
with open(join(dirname(__file__), 'result/generated_datasets/traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
traffic_tweets = [line[0] for line in dataset]
with open(join(dirname(__file__), 'result/generated_datasets/non_traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
non_traffic_tweets = [line[0] for line in dataset]
tweets = {
'data': traffic_tweets + non_traffic_tweets,
'target': [True] * len(traffic_tweets) + [False] * len(non_traffic_tweets),
}
training_vectors = count_vect.fit_transform(tweets['data'])
clf = LinearSVC(max_iter=10000)
cv = StratifiedKFold(n_splits=10, shuffle=True)
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv)
print(scores)
print("Accuracy: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='precision')
print(scores)
print("Precision: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='recall')
print(scores)
print("Recall: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='f1')
print(scores)
print("F1: {} (+/- {})".format(scores.mean(), scores.std() * 2))
|
Add k-folds CV with SVMimport csv
from os.path import dirname, join
import time
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.svm import LinearSVC
from modules.cleaner import clean
categories = ['traffic', 'non_traffic']
count_vect = CountVectorizer(preprocessor=clean)
with open(join(dirname(__file__), 'result/generated_datasets/traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
traffic_tweets = [line[0] for line in dataset]
with open(join(dirname(__file__), 'result/generated_datasets/non_traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
non_traffic_tweets = [line[0] for line in dataset]
tweets = {
'data': traffic_tweets + non_traffic_tweets,
'target': [True] * len(traffic_tweets) + [False] * len(non_traffic_tweets),
}
training_vectors = count_vect.fit_transform(tweets['data'])
clf = LinearSVC(max_iter=10000)
cv = StratifiedKFold(n_splits=10, shuffle=True)
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv)
print(scores)
print("Accuracy: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='precision')
print(scores)
print("Precision: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='recall')
print(scores)
print("Recall: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='f1')
print(scores)
print("F1: {} (+/- {})".format(scores.mean(), scores.std() * 2))
|
<commit_before><commit_msg>Add k-folds CV with SVM<commit_after>import csv
from os.path import dirname, join
import time
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.svm import LinearSVC
from modules.cleaner import clean
categories = ['traffic', 'non_traffic']
count_vect = CountVectorizer(preprocessor=clean)
with open(join(dirname(__file__), 'result/generated_datasets/traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
traffic_tweets = [line[0] for line in dataset]
with open(join(dirname(__file__), 'result/generated_datasets/non_traffic.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
non_traffic_tweets = [line[0] for line in dataset]
tweets = {
'data': traffic_tweets + non_traffic_tweets,
'target': [True] * len(traffic_tweets) + [False] * len(non_traffic_tweets),
}
training_vectors = count_vect.fit_transform(tweets['data'])
clf = LinearSVC(max_iter=10000)
cv = StratifiedKFold(n_splits=10, shuffle=True)
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv)
print(scores)
print("Accuracy: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='precision')
print(scores)
print("Precision: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='recall')
print(scores)
print("Recall: {} (+/- {})".format(scores.mean(), scores.std() * 2))
scores = cross_val_score(clf, training_vectors, tweets['target'], cv=cv, scoring='f1')
print(scores)
print("F1: {} (+/- {})".format(scores.mean(), scores.std() * 2))
|
|
266ae3aa0a00f8b8872cd90a631196ff4a7afb38
|
st2common/st2common/constants/secrets.py
|
st2common/st2common/constants/secrets.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'MASKED_ATTRIBUTES_BLACKLIST',
'MASKED_ATTRIBUTE_VALUE'
]
# A blacklist of attributes which should be masked in the log messages by default.
# Note: If an attribute is an object or a dict, we try to recursively process it and mask the
# values.
MASKED_ATTRIBUTES_BLACKLIST = [
'password',
'auth_token',
'token',
'secret',
'credentials'
]
# Value with which the masked attribute values are replaced
MASKED_ATTRIBUTE_VALUE = '********'
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'MASKED_ATTRIBUTES_BLACKLIST',
'MASKED_ATTRIBUTE_VALUE'
]
# A blacklist of attributes which should be masked in the log messages by default.
# Note: If an attribute is an object or a dict, we try to recursively process it and mask the
# values.
MASKED_ATTRIBUTES_BLACKLIST = [
'password',
'auth_token',
'token',
'secret',
'credentials',
'st2_auth_token'
]
# Value with which the masked attribute values are replaced
MASKED_ATTRIBUTE_VALUE = '********'
|
Add st2_auth_token to masked attributes list.
|
Add st2_auth_token to masked attributes list.
|
Python
|
apache-2.0
|
StackStorm/st2,alfasin/st2,punalpatel/st2,pixelrebel/st2,Plexxi/st2,dennybaa/st2,lakshmi-kannan/st2,emedvedev/st2,Plexxi/st2,dennybaa/st2,armab/st2,StackStorm/st2,lakshmi-kannan/st2,armab/st2,punalpatel/st2,StackStorm/st2,pixelrebel/st2,nzlosh/st2,peak6/st2,Plexxi/st2,nzlosh/st2,emedvedev/st2,dennybaa/st2,alfasin/st2,peak6/st2,nzlosh/st2,nzlosh/st2,tonybaloney/st2,pixelrebel/st2,peak6/st2,tonybaloney/st2,Plexxi/st2,StackStorm/st2,punalpatel/st2,emedvedev/st2,tonybaloney/st2,lakshmi-kannan/st2,armab/st2,alfasin/st2
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'MASKED_ATTRIBUTES_BLACKLIST',
'MASKED_ATTRIBUTE_VALUE'
]
# A blacklist of attributes which should be masked in the log messages by default.
# Note: If an attribute is an object or a dict, we try to recursively process it and mask the
# values.
MASKED_ATTRIBUTES_BLACKLIST = [
'password',
'auth_token',
'token',
'secret',
'credentials'
]
# Value with which the masked attribute values are replaced
MASKED_ATTRIBUTE_VALUE = '********'
Add st2_auth_token to masked attributes list.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'MASKED_ATTRIBUTES_BLACKLIST',
'MASKED_ATTRIBUTE_VALUE'
]
# A blacklist of attributes which should be masked in the log messages by default.
# Note: If an attribute is an object or a dict, we try to recursively process it and mask the
# values.
MASKED_ATTRIBUTES_BLACKLIST = [
'password',
'auth_token',
'token',
'secret',
'credentials',
'st2_auth_token'
]
# Value with which the masked attribute values are replaced
MASKED_ATTRIBUTE_VALUE = '********'
|
<commit_before># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'MASKED_ATTRIBUTES_BLACKLIST',
'MASKED_ATTRIBUTE_VALUE'
]
# A blacklist of attributes which should be masked in the log messages by default.
# Note: If an attribute is an object or a dict, we try to recursively process it and mask the
# values.
MASKED_ATTRIBUTES_BLACKLIST = [
'password',
'auth_token',
'token',
'secret',
'credentials'
]
# Value with which the masked attribute values are replaced
MASKED_ATTRIBUTE_VALUE = '********'
<commit_msg>Add st2_auth_token to masked attributes list.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'MASKED_ATTRIBUTES_BLACKLIST',
'MASKED_ATTRIBUTE_VALUE'
]
# A blacklist of attributes which should be masked in the log messages by default.
# Note: If an attribute is an object or a dict, we try to recursively process it and mask the
# values.
MASKED_ATTRIBUTES_BLACKLIST = [
'password',
'auth_token',
'token',
'secret',
'credentials',
'st2_auth_token'
]
# Value with which the masked attribute values are replaced
MASKED_ATTRIBUTE_VALUE = '********'
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'MASKED_ATTRIBUTES_BLACKLIST',
'MASKED_ATTRIBUTE_VALUE'
]
# A blacklist of attributes which should be masked in the log messages by default.
# Note: If an attribute is an object or a dict, we try to recursively process it and mask the
# values.
MASKED_ATTRIBUTES_BLACKLIST = [
'password',
'auth_token',
'token',
'secret',
'credentials'
]
# Value with which the masked attribute values are replaced
MASKED_ATTRIBUTE_VALUE = '********'
Add st2_auth_token to masked attributes list.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'MASKED_ATTRIBUTES_BLACKLIST',
'MASKED_ATTRIBUTE_VALUE'
]
# A blacklist of attributes which should be masked in the log messages by default.
# Note: If an attribute is an object or a dict, we try to recursively process it and mask the
# values.
MASKED_ATTRIBUTES_BLACKLIST = [
'password',
'auth_token',
'token',
'secret',
'credentials',
'st2_auth_token'
]
# Value with which the masked attribute values are replaced
MASKED_ATTRIBUTE_VALUE = '********'
|
<commit_before># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'MASKED_ATTRIBUTES_BLACKLIST',
'MASKED_ATTRIBUTE_VALUE'
]
# A blacklist of attributes which should be masked in the log messages by default.
# Note: If an attribute is an object or a dict, we try to recursively process it and mask the
# values.
MASKED_ATTRIBUTES_BLACKLIST = [
'password',
'auth_token',
'token',
'secret',
'credentials'
]
# Value with which the masked attribute values are replaced
MASKED_ATTRIBUTE_VALUE = '********'
<commit_msg>Add st2_auth_token to masked attributes list.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'MASKED_ATTRIBUTES_BLACKLIST',
'MASKED_ATTRIBUTE_VALUE'
]
# A blacklist of attributes which should be masked in the log messages by default.
# Note: If an attribute is an object or a dict, we try to recursively process it and mask the
# values.
MASKED_ATTRIBUTES_BLACKLIST = [
'password',
'auth_token',
'token',
'secret',
'credentials',
'st2_auth_token'
]
# Value with which the masked attribute values are replaced
MASKED_ATTRIBUTE_VALUE = '********'
|
a7fedf571935bccf10b3002427afe2070ad0666b
|
tagcache/utils.py
|
tagcache/utils.py
|
# -*- encoding: utf-8 -*-
import os
import errno
def ensure_intermediate_dir(path):
"""
Basiclly equivalent to command `mkdir -p`
"""
try:
os.makedirs(os.path.dirname(path))
except OSError, e:
if e.errno != errno.EEXIST:
raise e
def open_file(filename, flag, mode=0777):
"""
Wrapper of `os.open` which ensure intermediate dirs are created as well.
"""
try:
return os.open(filename, flag, mode=mode)
except OSError, e:
if e.errno != errno.ENOENT or not (flag & os.O_CREAT):
raise e
# a directory component not exists
ensure_intermediate_dir(filename)
# second try
return os.open(filename, flag, mode=mode)
def link_file(src, dst):
"""
Wrapper of `os.link` which ensure intermediate dirs are created as well.
"""
try:
return os.link(src, dst)
except OSError, e:
if e.errno != errno.ENOENT:
raise e
ensure_intermediate_dir(dst)
return os.link(src, dst)
def rename_file(old, new):
"""
Wrapper of `os.rename` which ensure intermediate dirs are created as well.
"""
try:
return os.rename(old, new)
except OSError, e:
if e.errno != errno.ENOENT:
raise e
ensure_intermediate_dir(new)
return os.rename(old, new)
|
Add some file utility functions.
|
Add some file utility functions.
|
Python
|
mit
|
huangjunwen/tagcache
|
Add some file utility functions.
|
# -*- encoding: utf-8 -*-
import os
import errno
def ensure_intermediate_dir(path):
"""
Basiclly equivalent to command `mkdir -p`
"""
try:
os.makedirs(os.path.dirname(path))
except OSError, e:
if e.errno != errno.EEXIST:
raise e
def open_file(filename, flag, mode=0777):
"""
Wrapper of `os.open` which ensure intermediate dirs are created as well.
"""
try:
return os.open(filename, flag, mode=mode)
except OSError, e:
if e.errno != errno.ENOENT or not (flag & os.O_CREAT):
raise e
# a directory component not exists
ensure_intermediate_dir(filename)
# second try
return os.open(filename, flag, mode=mode)
def link_file(src, dst):
"""
Wrapper of `os.link` which ensure intermediate dirs are created as well.
"""
try:
return os.link(src, dst)
except OSError, e:
if e.errno != errno.ENOENT:
raise e
ensure_intermediate_dir(dst)
return os.link(src, dst)
def rename_file(old, new):
"""
Wrapper of `os.rename` which ensure intermediate dirs are created as well.
"""
try:
return os.rename(old, new)
except OSError, e:
if e.errno != errno.ENOENT:
raise e
ensure_intermediate_dir(new)
return os.rename(old, new)
|
<commit_before><commit_msg>Add some file utility functions.<commit_after>
|
# -*- encoding: utf-8 -*-
import os
import errno
def ensure_intermediate_dir(path):
"""
Basiclly equivalent to command `mkdir -p`
"""
try:
os.makedirs(os.path.dirname(path))
except OSError, e:
if e.errno != errno.EEXIST:
raise e
def open_file(filename, flag, mode=0777):
"""
Wrapper of `os.open` which ensure intermediate dirs are created as well.
"""
try:
return os.open(filename, flag, mode=mode)
except OSError, e:
if e.errno != errno.ENOENT or not (flag & os.O_CREAT):
raise e
# a directory component not exists
ensure_intermediate_dir(filename)
# second try
return os.open(filename, flag, mode=mode)
def link_file(src, dst):
"""
Wrapper of `os.link` which ensure intermediate dirs are created as well.
"""
try:
return os.link(src, dst)
except OSError, e:
if e.errno != errno.ENOENT:
raise e
ensure_intermediate_dir(dst)
return os.link(src, dst)
def rename_file(old, new):
"""
Wrapper of `os.rename` which ensure intermediate dirs are created as well.
"""
try:
return os.rename(old, new)
except OSError, e:
if e.errno != errno.ENOENT:
raise e
ensure_intermediate_dir(new)
return os.rename(old, new)
|
Add some file utility functions.# -*- encoding: utf-8 -*-
import os
import errno
def ensure_intermediate_dir(path):
"""
Basiclly equivalent to command `mkdir -p`
"""
try:
os.makedirs(os.path.dirname(path))
except OSError, e:
if e.errno != errno.EEXIST:
raise e
def open_file(filename, flag, mode=0777):
"""
Wrapper of `os.open` which ensure intermediate dirs are created as well.
"""
try:
return os.open(filename, flag, mode=mode)
except OSError, e:
if e.errno != errno.ENOENT or not (flag & os.O_CREAT):
raise e
# a directory component not exists
ensure_intermediate_dir(filename)
# second try
return os.open(filename, flag, mode=mode)
def link_file(src, dst):
"""
Wrapper of `os.link` which ensure intermediate dirs are created as well.
"""
try:
return os.link(src, dst)
except OSError, e:
if e.errno != errno.ENOENT:
raise e
ensure_intermediate_dir(dst)
return os.link(src, dst)
def rename_file(old, new):
"""
Wrapper of `os.rename` which ensure intermediate dirs are created as well.
"""
try:
return os.rename(old, new)
except OSError, e:
if e.errno != errno.ENOENT:
raise e
ensure_intermediate_dir(new)
return os.rename(old, new)
|
<commit_before><commit_msg>Add some file utility functions.<commit_after># -*- encoding: utf-8 -*-
import os
import errno
def ensure_intermediate_dir(path):
"""
Basiclly equivalent to command `mkdir -p`
"""
try:
os.makedirs(os.path.dirname(path))
except OSError, e:
if e.errno != errno.EEXIST:
raise e
def open_file(filename, flag, mode=0777):
"""
Wrapper of `os.open` which ensure intermediate dirs are created as well.
"""
try:
return os.open(filename, flag, mode=mode)
except OSError, e:
if e.errno != errno.ENOENT or not (flag & os.O_CREAT):
raise e
# a directory component not exists
ensure_intermediate_dir(filename)
# second try
return os.open(filename, flag, mode=mode)
def link_file(src, dst):
"""
Wrapper of `os.link` which ensure intermediate dirs are created as well.
"""
try:
return os.link(src, dst)
except OSError, e:
if e.errno != errno.ENOENT:
raise e
ensure_intermediate_dir(dst)
return os.link(src, dst)
def rename_file(old, new):
"""
Wrapper of `os.rename` which ensure intermediate dirs are created as well.
"""
try:
return os.rename(old, new)
except OSError, e:
if e.errno != errno.ENOENT:
raise e
ensure_intermediate_dir(new)
return os.rename(old, new)
|
|
81b4cd41198edd388d68bf49dfa2a87c43f3357e
|
joblib/test/test_func_inspect.py
|
joblib/test/test_func_inspect.py
|
"""
Test the func_inspect module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import nose
import tempfile
from ..func_inspect import filter_args, get_func_name
from ..memory import Memory
################################################################################
# Module-level functions, for tests
def f(x, y=0):
pass
# Create a Memory object to test decorated functions.
# We should be careful not to call the decorated functions, so that
# cache directories are not created in the temp dir.
mem = Memory(cachedir=tempfile.gettempdir())
@mem.cache
def g(x):
return x
################################################################################
# Tests
def xxtest_filter_args():
nose.testing.assert_equal(filter_args(['x']))
def test_func_name():
yield nose.tools.assert_equal, 'f', get_func_name(f)[1]
# Check that we are not confused by the decoration
yield nose.tools.assert_equal, 'g', get_func_name(g)[1]
|
Add tests checking that we get the names right.
|
TEST: Add tests checking that we get the names right.
|
Python
|
bsd-3-clause
|
tomMoral/joblib,karandesai-96/joblib,aabadie/joblib,joblib/joblib,aabadie/joblib,karandesai-96/joblib,lesteve/joblib,joblib/joblib,tomMoral/joblib,lesteve/joblib
|
TEST: Add tests checking that we get the names right.
|
"""
Test the func_inspect module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import nose
import tempfile
from ..func_inspect import filter_args, get_func_name
from ..memory import Memory
################################################################################
# Module-level functions, for tests
def f(x, y=0):
pass
# Create a Memory object to test decorated functions.
# We should be careful not to call the decorated functions, so that
# cache directories are not created in the temp dir.
mem = Memory(cachedir=tempfile.gettempdir())
@mem.cache
def g(x):
return x
################################################################################
# Tests
def xxtest_filter_args():
nose.testing.assert_equal(filter_args(['x']))
def test_func_name():
yield nose.tools.assert_equal, 'f', get_func_name(f)[1]
# Check that we are not confused by the decoration
yield nose.tools.assert_equal, 'g', get_func_name(g)[1]
|
<commit_before><commit_msg>TEST: Add tests checking that we get the names right.<commit_after>
|
"""
Test the func_inspect module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import nose
import tempfile
from ..func_inspect import filter_args, get_func_name
from ..memory import Memory
################################################################################
# Module-level functions, for tests
def f(x, y=0):
pass
# Create a Memory object to test decorated functions.
# We should be careful not to call the decorated functions, so that
# cache directories are not created in the temp dir.
mem = Memory(cachedir=tempfile.gettempdir())
@mem.cache
def g(x):
return x
################################################################################
# Tests
def xxtest_filter_args():
nose.testing.assert_equal(filter_args(['x']))
def test_func_name():
yield nose.tools.assert_equal, 'f', get_func_name(f)[1]
# Check that we are not confused by the decoration
yield nose.tools.assert_equal, 'g', get_func_name(g)[1]
|
TEST: Add tests checking that we get the names right."""
Test the func_inspect module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import nose
import tempfile
from ..func_inspect import filter_args, get_func_name
from ..memory import Memory
################################################################################
# Module-level functions, for tests
def f(x, y=0):
pass
# Create a Memory object to test decorated functions.
# We should be careful not to call the decorated functions, so that
# cache directories are not created in the temp dir.
mem = Memory(cachedir=tempfile.gettempdir())
@mem.cache
def g(x):
return x
################################################################################
# Tests
def xxtest_filter_args():
nose.testing.assert_equal(filter_args(['x']))
def test_func_name():
yield nose.tools.assert_equal, 'f', get_func_name(f)[1]
# Check that we are not confused by the decoration
yield nose.tools.assert_equal, 'g', get_func_name(g)[1]
|
<commit_before><commit_msg>TEST: Add tests checking that we get the names right.<commit_after>"""
Test the func_inspect module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import nose
import tempfile
from ..func_inspect import filter_args, get_func_name
from ..memory import Memory
################################################################################
# Module-level functions, for tests
def f(x, y=0):
pass
# Create a Memory object to test decorated functions.
# We should be careful not to call the decorated functions, so that
# cache directories are not created in the temp dir.
mem = Memory(cachedir=tempfile.gettempdir())
@mem.cache
def g(x):
return x
################################################################################
# Tests
def xxtest_filter_args():
nose.testing.assert_equal(filter_args(['x']))
def test_func_name():
yield nose.tools.assert_equal, 'f', get_func_name(f)[1]
# Check that we are not confused by the decoration
yield nose.tools.assert_equal, 'g', get_func_name(g)[1]
|
|
8b16282fe0e9332c6224ff83c9236eb9e8780fdb
|
txircd/ircbase.py
|
txircd/ircbase.py
|
from twisted.protocols.basic.LineOnlyReceiver
class IRCBase(LineOnlyReceiver):
delimiter = "\n" # Default to splitting by \n, and then we'll also split \r in the handler
def lineReceived(self, data):
for line in data.split("\r"):
command, params, prefix, tags = self._parseLine(line)
if command:
self.handleCommand(command, params, prefix, tags)
def _parseLine(self, line):
if " :" in line:
linePart, lastParam = line.split(" :", 1)
else:
linePart = line
lastParam = None
if linePart[0] == "@":
if " " not in linePart:
return None, None, None, None
tagLine, linePart = linePart.split(" ", 1)
tags = self._parseTags(tagLine[1:])
else:
tags = {}
if linePart[0] == ":":
if " " not in linePart:
return None, None, None, None
prefix, linePart = linePart.split(" ", 1)
prefix = prefix[1:]
if " " in linePart:
command, paramLine = linePart.spilt(" ", 1)
params = paramLine.split(" ")
else:
command = linePart
params = []
if lastParam:
params.append(lastParam)
return command, params, prefix, tags
def _parseTags(self, tagLine):
tags = {}
for tagval in tagLine.split(";"):
if "=" in tagval:
tag, value = tagval.split("=", 1)
else:
tag = tagval
value = None
tags[tag] = value
return tags
def handleCommand(self, command, params, prefix, tags):
pass
def sendMessage(self, command, *params, **kw):
if "tags" in kw:
tags = self._buildTagString(kw["tags"])
else:
tags = None
if "prefix" in kw:
prefix = kw["prefix"]
else:
prefix = None
if " " in params[-1]:
params[-1] = ":{}".format(params[-1])
lineToSend = ""
if tags:
lineToSend += "@{} ".format(tags)
if prefix:
lineToSend += ":{} ".format(prefix)
lineToSend += "{} {}".format(command, " ".join(params))
self.sendLine(lineToSend)
|
Create an IRC parser base to replace the Twisted one
|
Create an IRC parser base to replace the Twisted one
|
Python
|
bsd-3-clause
|
ElementalAlchemist/txircd,Heufneutje/txircd
|
Create an IRC parser base to replace the Twisted one
|
from twisted.protocols.basic.LineOnlyReceiver
class IRCBase(LineOnlyReceiver):
delimiter = "\n" # Default to splitting by \n, and then we'll also split \r in the handler
def lineReceived(self, data):
for line in data.split("\r"):
command, params, prefix, tags = self._parseLine(line)
if command:
self.handleCommand(command, params, prefix, tags)
def _parseLine(self, line):
if " :" in line:
linePart, lastParam = line.split(" :", 1)
else:
linePart = line
lastParam = None
if linePart[0] == "@":
if " " not in linePart:
return None, None, None, None
tagLine, linePart = linePart.split(" ", 1)
tags = self._parseTags(tagLine[1:])
else:
tags = {}
if linePart[0] == ":":
if " " not in linePart:
return None, None, None, None
prefix, linePart = linePart.split(" ", 1)
prefix = prefix[1:]
if " " in linePart:
command, paramLine = linePart.spilt(" ", 1)
params = paramLine.split(" ")
else:
command = linePart
params = []
if lastParam:
params.append(lastParam)
return command, params, prefix, tags
def _parseTags(self, tagLine):
tags = {}
for tagval in tagLine.split(";"):
if "=" in tagval:
tag, value = tagval.split("=", 1)
else:
tag = tagval
value = None
tags[tag] = value
return tags
def handleCommand(self, command, params, prefix, tags):
pass
def sendMessage(self, command, *params, **kw):
if "tags" in kw:
tags = self._buildTagString(kw["tags"])
else:
tags = None
if "prefix" in kw:
prefix = kw["prefix"]
else:
prefix = None
if " " in params[-1]:
params[-1] = ":{}".format(params[-1])
lineToSend = ""
if tags:
lineToSend += "@{} ".format(tags)
if prefix:
lineToSend += ":{} ".format(prefix)
lineToSend += "{} {}".format(command, " ".join(params))
self.sendLine(lineToSend)
|
<commit_before><commit_msg>Create an IRC parser base to replace the Twisted one<commit_after>
|
from twisted.protocols.basic.LineOnlyReceiver
class IRCBase(LineOnlyReceiver):
delimiter = "\n" # Default to splitting by \n, and then we'll also split \r in the handler
def lineReceived(self, data):
for line in data.split("\r"):
command, params, prefix, tags = self._parseLine(line)
if command:
self.handleCommand(command, params, prefix, tags)
def _parseLine(self, line):
if " :" in line:
linePart, lastParam = line.split(" :", 1)
else:
linePart = line
lastParam = None
if linePart[0] == "@":
if " " not in linePart:
return None, None, None, None
tagLine, linePart = linePart.split(" ", 1)
tags = self._parseTags(tagLine[1:])
else:
tags = {}
if linePart[0] == ":":
if " " not in linePart:
return None, None, None, None
prefix, linePart = linePart.split(" ", 1)
prefix = prefix[1:]
if " " in linePart:
command, paramLine = linePart.spilt(" ", 1)
params = paramLine.split(" ")
else:
command = linePart
params = []
if lastParam:
params.append(lastParam)
return command, params, prefix, tags
def _parseTags(self, tagLine):
tags = {}
for tagval in tagLine.split(";"):
if "=" in tagval:
tag, value = tagval.split("=", 1)
else:
tag = tagval
value = None
tags[tag] = value
return tags
def handleCommand(self, command, params, prefix, tags):
pass
def sendMessage(self, command, *params, **kw):
if "tags" in kw:
tags = self._buildTagString(kw["tags"])
else:
tags = None
if "prefix" in kw:
prefix = kw["prefix"]
else:
prefix = None
if " " in params[-1]:
params[-1] = ":{}".format(params[-1])
lineToSend = ""
if tags:
lineToSend += "@{} ".format(tags)
if prefix:
lineToSend += ":{} ".format(prefix)
lineToSend += "{} {}".format(command, " ".join(params))
self.sendLine(lineToSend)
|
Create an IRC parser base to replace the Twisted onefrom twisted.protocols.basic.LineOnlyReceiver
class IRCBase(LineOnlyReceiver):
delimiter = "\n" # Default to splitting by \n, and then we'll also split \r in the handler
def lineReceived(self, data):
for line in data.split("\r"):
command, params, prefix, tags = self._parseLine(line)
if command:
self.handleCommand(command, params, prefix, tags)
def _parseLine(self, line):
if " :" in line:
linePart, lastParam = line.split(" :", 1)
else:
linePart = line
lastParam = None
if linePart[0] == "@":
if " " not in linePart:
return None, None, None, None
tagLine, linePart = linePart.split(" ", 1)
tags = self._parseTags(tagLine[1:])
else:
tags = {}
if linePart[0] == ":":
if " " not in linePart:
return None, None, None, None
prefix, linePart = linePart.split(" ", 1)
prefix = prefix[1:]
if " " in linePart:
command, paramLine = linePart.spilt(" ", 1)
params = paramLine.split(" ")
else:
command = linePart
params = []
if lastParam:
params.append(lastParam)
return command, params, prefix, tags
def _parseTags(self, tagLine):
tags = {}
for tagval in tagLine.split(";"):
if "=" in tagval:
tag, value = tagval.split("=", 1)
else:
tag = tagval
value = None
tags[tag] = value
return tags
def handleCommand(self, command, params, prefix, tags):
pass
def sendMessage(self, command, *params, **kw):
if "tags" in kw:
tags = self._buildTagString(kw["tags"])
else:
tags = None
if "prefix" in kw:
prefix = kw["prefix"]
else:
prefix = None
if " " in params[-1]:
params[-1] = ":{}".format(params[-1])
lineToSend = ""
if tags:
lineToSend += "@{} ".format(tags)
if prefix:
lineToSend += ":{} ".format(prefix)
lineToSend += "{} {}".format(command, " ".join(params))
self.sendLine(lineToSend)
|
<commit_before><commit_msg>Create an IRC parser base to replace the Twisted one<commit_after>from twisted.protocols.basic.LineOnlyReceiver
class IRCBase(LineOnlyReceiver):
delimiter = "\n" # Default to splitting by \n, and then we'll also split \r in the handler
def lineReceived(self, data):
for line in data.split("\r"):
command, params, prefix, tags = self._parseLine(line)
if command:
self.handleCommand(command, params, prefix, tags)
def _parseLine(self, line):
if " :" in line:
linePart, lastParam = line.split(" :", 1)
else:
linePart = line
lastParam = None
if linePart[0] == "@":
if " " not in linePart:
return None, None, None, None
tagLine, linePart = linePart.split(" ", 1)
tags = self._parseTags(tagLine[1:])
else:
tags = {}
if linePart[0] == ":":
if " " not in linePart:
return None, None, None, None
prefix, linePart = linePart.split(" ", 1)
prefix = prefix[1:]
if " " in linePart:
command, paramLine = linePart.spilt(" ", 1)
params = paramLine.split(" ")
else:
command = linePart
params = []
if lastParam:
params.append(lastParam)
return command, params, prefix, tags
def _parseTags(self, tagLine):
tags = {}
for tagval in tagLine.split(";"):
if "=" in tagval:
tag, value = tagval.split("=", 1)
else:
tag = tagval
value = None
tags[tag] = value
return tags
def handleCommand(self, command, params, prefix, tags):
pass
def sendMessage(self, command, *params, **kw):
if "tags" in kw:
tags = self._buildTagString(kw["tags"])
else:
tags = None
if "prefix" in kw:
prefix = kw["prefix"]
else:
prefix = None
if " " in params[-1]:
params[-1] = ":{}".format(params[-1])
lineToSend = ""
if tags:
lineToSend += "@{} ".format(tags)
if prefix:
lineToSend += ":{} ".format(prefix)
lineToSend += "{} {}".format(command, " ".join(params))
self.sendLine(lineToSend)
|
|
4c90aa6581b3c26a3da1cd83fd92e82ab9e70d68
|
_tests/test_feed.py
|
_tests/test_feed.py
|
# -*- encoding: utf-8
import feedvalidator
from feedvalidator import compatibility
from feedvalidator.formatter.text_plain import Formatter
def test_feed_passes_validation():
events = feedvalidator.validateStream(
open('_site/feeds/all.atom.xml'),
firstOccurrenceOnly=1
)['loggedEvents']
events = compatibility.AA(events)
output = Formatter(events)
assert not output, '\n'.join(output)
|
Add a test that the feed passes validation
|
Add a test that the feed passes validation
|
Python
|
mit
|
alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net
|
Add a test that the feed passes validation
|
# -*- encoding: utf-8
import feedvalidator
from feedvalidator import compatibility
from feedvalidator.formatter.text_plain import Formatter
def test_feed_passes_validation():
events = feedvalidator.validateStream(
open('_site/feeds/all.atom.xml'),
firstOccurrenceOnly=1
)['loggedEvents']
events = compatibility.AA(events)
output = Formatter(events)
assert not output, '\n'.join(output)
|
<commit_before><commit_msg>Add a test that the feed passes validation<commit_after>
|
# -*- encoding: utf-8
import feedvalidator
from feedvalidator import compatibility
from feedvalidator.formatter.text_plain import Formatter
def test_feed_passes_validation():
events = feedvalidator.validateStream(
open('_site/feeds/all.atom.xml'),
firstOccurrenceOnly=1
)['loggedEvents']
events = compatibility.AA(events)
output = Formatter(events)
assert not output, '\n'.join(output)
|
Add a test that the feed passes validation# -*- encoding: utf-8
import feedvalidator
from feedvalidator import compatibility
from feedvalidator.formatter.text_plain import Formatter
def test_feed_passes_validation():
events = feedvalidator.validateStream(
open('_site/feeds/all.atom.xml'),
firstOccurrenceOnly=1
)['loggedEvents']
events = compatibility.AA(events)
output = Formatter(events)
assert not output, '\n'.join(output)
|
<commit_before><commit_msg>Add a test that the feed passes validation<commit_after># -*- encoding: utf-8
import feedvalidator
from feedvalidator import compatibility
from feedvalidator.formatter.text_plain import Formatter
def test_feed_passes_validation():
events = feedvalidator.validateStream(
open('_site/feeds/all.atom.xml'),
firstOccurrenceOnly=1
)['loggedEvents']
events = compatibility.AA(events)
output = Formatter(events)
assert not output, '\n'.join(output)
|
|
f2a889564b3a215902622b040a1247af38cb8203
|
tests/basics/gc1.py
|
tests/basics/gc1.py
|
# basic tests for gc module
try:
import gc
except ImportError:
print("SKIP")
import sys
sys.exit()
print(gc.isenabled())
gc.disable()
print(gc.isenabled())
gc.enable()
print(gc.isenabled())
gc.collect()
if hasattr(gc, 'mem_free'):
# uPy has these extra functions
# just test they execute and return an int
assert type(gc.mem_free()) is int
assert type(gc.mem_alloc()) is int
|
Add basics test for gc module.
|
tests: Add basics test for gc module.
|
Python
|
mit
|
MrSurly/micropython-esp32,MrSurly/micropython-esp32,AriZuu/micropython,Peetz0r/micropython-esp32,praemdonck/micropython,chrisdearman/micropython,drrk/micropython,HenrikSolver/micropython,slzatz/micropython,PappaPeppar/micropython,vitiral/micropython,adafruit/circuitpython,mpalomer/micropython,drrk/micropython,lbattraw/micropython,micropython/micropython-esp32,ceramos/micropython,jlillest/micropython,pozetroninc/micropython,TDAbboud/micropython,suda/micropython,blazewicz/micropython,xyb/micropython,praemdonck/micropython,bvernoux/micropython,emfcamp/micropython,tdautc19841202/micropython,Timmenem/micropython,tobbad/micropython,adafruit/micropython,deshipu/micropython,rubencabrera/micropython,dxxb/micropython,mgyenik/micropython,cwyark/micropython,supergis/micropython,selste/micropython,blmorris/micropython,slzatz/micropython,martinribelotta/micropython,lowRISC/micropython,henriknelson/micropython,HenrikSolver/micropython,infinnovation/micropython,adafruit/circuitpython,firstval/micropython,blazewicz/micropython,ganshun666/micropython,dhylands/micropython,xhat/micropython,tralamazza/micropython,utopiaprince/micropython,paul-xxx/micropython,xuxiaoxin/micropython,ChuckM/micropython,micropython/micropython-esp32,rubencabrera/micropython,lowRISC/micropython,TDAbboud/micropython,ganshun666/micropython,kostyll/micropython,lowRISC/micropython,noahchense/micropython,rubencabrera/micropython,hosaka/micropython,danicampora/micropython,ryannathans/micropython,mhoffma/micropython,Timmenem/micropython,adafruit/micropython,infinnovation/micropython,xyb/micropython,KISSMonX/micropython,SungEun-Steve-Kim/test-mp,warner83/micropython,mianos/micropython,xuxiaoxin/micropython,HenrikSolver/micropython,tralamazza/micropython,dmazzella/micropython,kostyll/micropython,utopiaprince/micropython,noahwilliamsson/micropython,danicampora/micropython,chrisdearman/micropython,pramasoul/micropython,skybird6672/micropython,torwag/micropython,martinribelotta/micropython,Peetz0r/micropython-esp32,blmorris/micropython,ChuckM/micropython,SHA2017-badge/micropython-esp32,oopy/micropython,ahotam/micropython,MrSurly/micropython,drrk/micropython,infinnovation/micropython,adamkh/micropython,stonegithubs/micropython,heisewangluo/micropython,xuxiaoxin/micropython,ceramos/micropython,feilongfl/micropython,selste/micropython,puuu/micropython,kostyll/micropython,tralamazza/micropython,HenrikSolver/micropython,hosaka/micropython,emfcamp/micropython,heisewangluo/micropython,dxxb/micropython,tdautc19841202/micropython,redbear/micropython,jimkmc/micropython,Timmenem/micropython,mhoffma/micropython,ruffy91/micropython,ernesto-g/micropython,puuu/micropython,stonegithubs/micropython,ernesto-g/micropython,orionrobots/micropython,hosaka/micropython,ryannathans/micropython,lbattraw/micropython,xuxiaoxin/micropython,lowRISC/micropython,drrk/micropython,chrisdearman/micropython,MrSurly/micropython,ryannathans/micropython,toolmacher/micropython,MrSurly/micropython-esp32,EcmaXp/micropython,vriera/micropython,cwyark/micropython,matthewelse/micropython,tobbad/micropython,cloudformdesign/micropython,orionrobots/micropython,mpalomer/micropython,heisewangluo/micropython,SungEun-Steve-Kim/test-mp,chrisdearman/micropython,utopiaprince/micropython,SungEun-Steve-Kim/test-mp,trezor/micropython,noahchense/micropython,adamkh/micropython,galenhz/micropython,stonegithubs/micropython,chrisdearman/micropython,blazewicz/micropython,ahotam/micropython,EcmaXp/micropython,PappaPeppar/micropython,dxxb/micropython,MrSurly/micropython-esp32,KISSMonX/micropython,matthewelse/micropython,ruffy91/micropython,SHA2017-badge/micropython-esp32,henriknelson/micropython,skybird6672/micropython,praemdonck/micropython,stonegithubs/micropython,lbattraw/micropython,redbear/micropython,alex-march/micropython,warner83/micropython,mhoffma/micropython,redbear/micropython,drrk/micropython,feilongfl/micropython,deshipu/micropython,AriZuu/micropython,deshipu/micropython,blmorris/micropython,torwag/micropython,ryannathans/micropython,kerneltask/micropython,hiway/micropython,cwyark/micropython,noahchense/micropython,tdautc19841202/micropython,pozetroninc/micropython,tralamazza/micropython,deshipu/micropython,adamkh/micropython,ryannathans/micropython,mpalomer/micropython,firstval/micropython,matthewelse/micropython,ganshun666/micropython,vitiral/micropython,swegener/micropython,rubencabrera/micropython,tdautc19841202/micropython,KISSMonX/micropython,skybird6672/micropython,heisewangluo/micropython,vitiral/micropython,xhat/micropython,omtinez/micropython,omtinez/micropython,infinnovation/micropython,jlillest/micropython,ganshun666/micropython,mpalomer/micropython,dmazzella/micropython,alex-robbins/micropython,matthewelse/micropython,noahwilliamsson/micropython,mhoffma/micropython,toolmacher/micropython,neilh10/micropython,selste/micropython,galenhz/micropython,adafruit/micropython,suda/micropython,vriera/micropython,ericsnowcurrently/micropython,slzatz/micropython,utopiaprince/micropython,danicampora/micropython,jmarcelino/pycom-micropython,xyb/micropython,AriZuu/micropython,ericsnowcurrently/micropython,feilongfl/micropython,turbinenreiter/micropython,cloudformdesign/micropython,dxxb/micropython,dinau/micropython,martinribelotta/micropython,pfalcon/micropython,firstval/micropython,mianos/micropython,dinau/micropython,adafruit/circuitpython,emfcamp/micropython,tuc-osg/micropython,slzatz/micropython,dhylands/micropython,suda/micropython,emfcamp/micropython,pramasoul/micropython,galenhz/micropython,danicampora/micropython,neilh10/micropython,micropython/micropython-esp32,cloudformdesign/micropython,redbear/micropython,jimkmc/micropython,AriZuu/micropython,cnoviello/micropython,oopy/micropython,hiway/micropython,danicampora/micropython,matthewelse/micropython,ernesto-g/micropython,hosaka/micropython,tobbad/micropython,infinnovation/micropython,hosaka/micropython,toolmacher/micropython,jmarcelino/pycom-micropython,lowRISC/micropython,misterdanb/micropython,mpalomer/micropython,supergis/micropython,stonegithubs/micropython,alex-robbins/micropython,xhat/micropython,PappaPeppar/micropython,paul-xxx/micropython,dinau/micropython,puuu/micropython,ceramos/micropython,noahwilliamsson/micropython,supergis/micropython,kostyll/micropython,henriknelson/micropython,emfcamp/micropython,pozetroninc/micropython,cwyark/micropython,vriera/micropython,praemdonck/micropython,utopiaprince/micropython,dinau/micropython,misterdanb/micropython,hiway/micropython,kerneltask/micropython,ganshun666/micropython,adafruit/micropython,ericsnowcurrently/micropython,MrSurly/micropython,martinribelotta/micropython,swegener/micropython,ernesto-g/micropython,ericsnowcurrently/micropython,tuc-osg/micropython,selste/micropython,deshipu/micropython,warner83/micropython,alex-march/micropython,dmazzella/micropython,ChuckM/micropython,swegener/micropython,EcmaXp/micropython,pramasoul/micropython,turbinenreiter/micropython,tuc-osg/micropython,mgyenik/micropython,paul-xxx/micropython,swegener/micropython,turbinenreiter/micropython,trezor/micropython,mianos/micropython,mhoffma/micropython,tuc-osg/micropython,suda/micropython,cloudformdesign/micropython,ericsnowcurrently/micropython,adafruit/micropython,bvernoux/micropython,lbattraw/micropython,MrSurly/micropython,cloudformdesign/micropython,blazewicz/micropython,PappaPeppar/micropython,misterdanb/micropython,warner83/micropython,MrSurly/micropython,toolmacher/micropython,vriera/micropython,cnoviello/micropython,jlillest/micropython,cnoviello/micropython,ahotam/micropython,micropython/micropython-esp32,dinau/micropython,ahotam/micropython,TDAbboud/micropython,jimkmc/micropython,ruffy91/micropython,feilongfl/micropython,martinribelotta/micropython,trezor/micropython,xyb/micropython,pfalcon/micropython,dxxb/micropython,vitiral/micropython,paul-xxx/micropython,oopy/micropython,alex-march/micropython,jmarcelino/pycom-micropython,ahotam/micropython,jmarcelino/pycom-micropython,misterdanb/micropython,bvernoux/micropython,supergis/micropython,toolmacher/micropython,vitiral/micropython,SungEun-Steve-Kim/test-mp,dhylands/micropython,skybird6672/micropython,pramasoul/micropython,jimkmc/micropython,ruffy91/micropython,ceramos/micropython,torwag/micropython,kerneltask/micropython,mgyenik/micropython,selste/micropython,xuxiaoxin/micropython,noahchense/micropython,heisewangluo/micropython,oopy/micropython,trezor/micropython,Peetz0r/micropython-esp32,henriknelson/micropython,TDAbboud/micropython,cnoviello/micropython,micropython/micropython-esp32,SHA2017-badge/micropython-esp32,mianos/micropython,torwag/micropython,TDAbboud/micropython,Timmenem/micropython,hiway/micropython,vriera/micropython,SungEun-Steve-Kim/test-mp,alex-march/micropython,misterdanb/micropython,supergis/micropython,lbattraw/micropython,turbinenreiter/micropython,neilh10/micropython,blmorris/micropython,adamkh/micropython,kostyll/micropython,kerneltask/micropython,jlillest/micropython,SHA2017-badge/micropython-esp32,pozetroninc/micropython,MrSurly/micropython-esp32,jimkmc/micropython,puuu/micropython,pfalcon/micropython,jlillest/micropython,omtinez/micropython,paul-xxx/micropython,redbear/micropython,firstval/micropython,jmarcelino/pycom-micropython,firstval/micropython,xyb/micropython,oopy/micropython,turbinenreiter/micropython,skybird6672/micropython,noahwilliamsson/micropython,tobbad/micropython,feilongfl/micropython,suda/micropython,mgyenik/micropython,warner83/micropython,noahchense/micropython,praemdonck/micropython,dmazzella/micropython,alex-robbins/micropython,adafruit/circuitpython,adamkh/micropython,alex-robbins/micropython,mianos/micropython,orionrobots/micropython,dhylands/micropython,ruffy91/micropython,pfalcon/micropython,neilh10/micropython,ceramos/micropython,swegener/micropython,puuu/micropython,slzatz/micropython,tuc-osg/micropython,xhat/micropython,alex-robbins/micropython,kerneltask/micropython,Peetz0r/micropython-esp32,Timmenem/micropython,orionrobots/micropython,SHA2017-badge/micropython-esp32,blmorris/micropython,cwyark/micropython,neilh10/micropython,xhat/micropython,ernesto-g/micropython,galenhz/micropython,mgyenik/micropython,EcmaXp/micropython,omtinez/micropython,ChuckM/micropython,EcmaXp/micropython,noahwilliamsson/micropython,PappaPeppar/micropython,Peetz0r/micropython-esp32,HenrikSolver/micropython,KISSMonX/micropython,adafruit/circuitpython,KISSMonX/micropython,bvernoux/micropython,henriknelson/micropython,AriZuu/micropython,alex-march/micropython,omtinez/micropython,matthewelse/micropython,adafruit/circuitpython,rubencabrera/micropython,hiway/micropython,galenhz/micropython,blazewicz/micropython,dhylands/micropython,pfalcon/micropython,tdautc19841202/micropython,tobbad/micropython,cnoviello/micropython,pozetroninc/micropython,pramasoul/micropython,orionrobots/micropython,trezor/micropython,bvernoux/micropython,ChuckM/micropython,torwag/micropython
|
tests: Add basics test for gc module.
|
# basic tests for gc module
try:
import gc
except ImportError:
print("SKIP")
import sys
sys.exit()
print(gc.isenabled())
gc.disable()
print(gc.isenabled())
gc.enable()
print(gc.isenabled())
gc.collect()
if hasattr(gc, 'mem_free'):
# uPy has these extra functions
# just test they execute and return an int
assert type(gc.mem_free()) is int
assert type(gc.mem_alloc()) is int
|
<commit_before><commit_msg>tests: Add basics test for gc module.<commit_after>
|
# basic tests for gc module
try:
import gc
except ImportError:
print("SKIP")
import sys
sys.exit()
print(gc.isenabled())
gc.disable()
print(gc.isenabled())
gc.enable()
print(gc.isenabled())
gc.collect()
if hasattr(gc, 'mem_free'):
# uPy has these extra functions
# just test they execute and return an int
assert type(gc.mem_free()) is int
assert type(gc.mem_alloc()) is int
|
tests: Add basics test for gc module.# basic tests for gc module
try:
import gc
except ImportError:
print("SKIP")
import sys
sys.exit()
print(gc.isenabled())
gc.disable()
print(gc.isenabled())
gc.enable()
print(gc.isenabled())
gc.collect()
if hasattr(gc, 'mem_free'):
# uPy has these extra functions
# just test they execute and return an int
assert type(gc.mem_free()) is int
assert type(gc.mem_alloc()) is int
|
<commit_before><commit_msg>tests: Add basics test for gc module.<commit_after># basic tests for gc module
try:
import gc
except ImportError:
print("SKIP")
import sys
sys.exit()
print(gc.isenabled())
gc.disable()
print(gc.isenabled())
gc.enable()
print(gc.isenabled())
gc.collect()
if hasattr(gc, 'mem_free'):
# uPy has these extra functions
# just test they execute and return an int
assert type(gc.mem_free()) is int
assert type(gc.mem_alloc()) is int
|
|
5df72792a708adb17969dcad4dcbbe60212dcad7
|
tests/test_login.py
|
tests/test_login.py
|
from . import TheInternetTestCase
from helium.api import Text, write, press, ENTER, click
class BasicAuthTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/login"
def test_valid_credentials(self):
self._login("tomsmith", "SuperSecretPassword!")
self.assertTrue(Text("Secure Area").exists())
self._logout()
def test_no_credentials(self):
click("Login")
self.assertTrue(Text("Your username is invalid!").exists())
def test_invalid_password(self):
self._login("tomsmith", "INVALID_PASSWORD")
self.assertTrue(Text("Your password is invalid!").exists())
def _login(self, username, password):
write(username, into="Username")
write(password, into="Password")
press(ENTER)
def _logout(self):
click("Logout")
self.assertTrue(Text("You logged out of the secure area!").exists())
|
Add test case for login form.
|
Add test case for login form.
|
Python
|
mit
|
bugfree-software/the-internet-solution-python
|
Add test case for login form.
|
from . import TheInternetTestCase
from helium.api import Text, write, press, ENTER, click
class BasicAuthTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/login"
def test_valid_credentials(self):
self._login("tomsmith", "SuperSecretPassword!")
self.assertTrue(Text("Secure Area").exists())
self._logout()
def test_no_credentials(self):
click("Login")
self.assertTrue(Text("Your username is invalid!").exists())
def test_invalid_password(self):
self._login("tomsmith", "INVALID_PASSWORD")
self.assertTrue(Text("Your password is invalid!").exists())
def _login(self, username, password):
write(username, into="Username")
write(password, into="Password")
press(ENTER)
def _logout(self):
click("Logout")
self.assertTrue(Text("You logged out of the secure area!").exists())
|
<commit_before><commit_msg>Add test case for login form.<commit_after>
|
from . import TheInternetTestCase
from helium.api import Text, write, press, ENTER, click
class BasicAuthTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/login"
def test_valid_credentials(self):
self._login("tomsmith", "SuperSecretPassword!")
self.assertTrue(Text("Secure Area").exists())
self._logout()
def test_no_credentials(self):
click("Login")
self.assertTrue(Text("Your username is invalid!").exists())
def test_invalid_password(self):
self._login("tomsmith", "INVALID_PASSWORD")
self.assertTrue(Text("Your password is invalid!").exists())
def _login(self, username, password):
write(username, into="Username")
write(password, into="Password")
press(ENTER)
def _logout(self):
click("Logout")
self.assertTrue(Text("You logged out of the secure area!").exists())
|
Add test case for login form.from . import TheInternetTestCase
from helium.api import Text, write, press, ENTER, click
class BasicAuthTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/login"
def test_valid_credentials(self):
self._login("tomsmith", "SuperSecretPassword!")
self.assertTrue(Text("Secure Area").exists())
self._logout()
def test_no_credentials(self):
click("Login")
self.assertTrue(Text("Your username is invalid!").exists())
def test_invalid_password(self):
self._login("tomsmith", "INVALID_PASSWORD")
self.assertTrue(Text("Your password is invalid!").exists())
def _login(self, username, password):
write(username, into="Username")
write(password, into="Password")
press(ENTER)
def _logout(self):
click("Logout")
self.assertTrue(Text("You logged out of the secure area!").exists())
|
<commit_before><commit_msg>Add test case for login form.<commit_after>from . import TheInternetTestCase
from helium.api import Text, write, press, ENTER, click
class BasicAuthTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/login"
def test_valid_credentials(self):
self._login("tomsmith", "SuperSecretPassword!")
self.assertTrue(Text("Secure Area").exists())
self._logout()
def test_no_credentials(self):
click("Login")
self.assertTrue(Text("Your username is invalid!").exists())
def test_invalid_password(self):
self._login("tomsmith", "INVALID_PASSWORD")
self.assertTrue(Text("Your password is invalid!").exists())
def _login(self, username, password):
write(username, into="Username")
write(password, into="Password")
press(ENTER)
def _logout(self):
click("Logout")
self.assertTrue(Text("You logged out of the secure area!").exists())
|
|
aa40b9ceb0d0af4fd37490e2c2f5aabbed40d5df
|
web_widget_darkroom/__init__.py
|
web_widget_darkroom/__init__.py
|
# -*- coding: utf-8 -*-
# © 2016-TODAY LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
|
Add missing init in web_widget_darkroom
|
Add missing init in web_widget_darkroom
|
Python
|
agpl-3.0
|
laslabs/odoo-web,laslabs/odoo-web,laslabs/odoo-web
|
Add missing init in web_widget_darkroom
|
# -*- coding: utf-8 -*-
# © 2016-TODAY LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
|
<commit_before><commit_msg>Add missing init in web_widget_darkroom<commit_after>
|
# -*- coding: utf-8 -*-
# © 2016-TODAY LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
|
Add missing init in web_widget_darkroom# -*- coding: utf-8 -*-
# © 2016-TODAY LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
|
<commit_before><commit_msg>Add missing init in web_widget_darkroom<commit_after># -*- coding: utf-8 -*-
# © 2016-TODAY LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
|
|
4260746d6d266f63d17de2727d7b51145603c97b
|
python/connected_cell_in_a_grid.py
|
python/connected_cell_in_a_grid.py
|
class Grid(object):
def __init__(self, grid):
self.grid = grid
self.rows = len(grid)
self.cols = len(grid[0])
def largest_region(self):
return max([self.region_size(row, col)
for row in range(self.rows)
for col in range(self.cols)])
def region_size(self, row, col):
if not self.filled_cell(row, col):
return 0
visited = set()
to_visit = [(row, col)]
while to_visit:
row, col = to_visit.pop()
if (row, col) not in visited:
visited.add((row, col))
to_visit.extend(self.filled_neighbors(row, col))
return len(visited)
def filled_neighbors(self, row, col):
filled_neighbors = list(filter(lambda cell: self.filled_cell(cell[0], cell[1]), self.neighbors(row, col)))
# print("filled_neighbors: " + str(filled_neighbors))
return filled_neighbors
def neighbors(self, row, col):
neighbors = list(filter(lambda cell:
self.row_in_grid(cell[0]) and
self.col_in_grid(cell[1]),
self.potential_neighbors(row, col)))
# print("neighbors: " + str(neighbors))
return neighbors
def row_in_grid(self, row):
return 0 <= row < self.rows
def col_in_grid(self, col):
return 0 <= col < self.cols
def potential_neighbors(self, row, col):
return [(row + row_delta, col + col_delta)
for row_delta in range (-1, 2)
for col_delta in range (-1, 2)]
def filled_cell(self, row, col):
return self.grid[row][col] == 1
rows = int(input().strip())
cols = int(input().strip())
grid = [list(map(int, input().strip().split(" "))) for _ in range(rows)]
g = Grid(grid)
print(g.largest_region())
|
Solve connected cell in a grid
|
Solve connected cell in a grid
|
Python
|
mit
|
rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank
|
Solve connected cell in a grid
|
class Grid(object):
def __init__(self, grid):
self.grid = grid
self.rows = len(grid)
self.cols = len(grid[0])
def largest_region(self):
return max([self.region_size(row, col)
for row in range(self.rows)
for col in range(self.cols)])
def region_size(self, row, col):
if not self.filled_cell(row, col):
return 0
visited = set()
to_visit = [(row, col)]
while to_visit:
row, col = to_visit.pop()
if (row, col) not in visited:
visited.add((row, col))
to_visit.extend(self.filled_neighbors(row, col))
return len(visited)
def filled_neighbors(self, row, col):
filled_neighbors = list(filter(lambda cell: self.filled_cell(cell[0], cell[1]), self.neighbors(row, col)))
# print("filled_neighbors: " + str(filled_neighbors))
return filled_neighbors
def neighbors(self, row, col):
neighbors = list(filter(lambda cell:
self.row_in_grid(cell[0]) and
self.col_in_grid(cell[1]),
self.potential_neighbors(row, col)))
# print("neighbors: " + str(neighbors))
return neighbors
def row_in_grid(self, row):
return 0 <= row < self.rows
def col_in_grid(self, col):
return 0 <= col < self.cols
def potential_neighbors(self, row, col):
return [(row + row_delta, col + col_delta)
for row_delta in range (-1, 2)
for col_delta in range (-1, 2)]
def filled_cell(self, row, col):
return self.grid[row][col] == 1
rows = int(input().strip())
cols = int(input().strip())
grid = [list(map(int, input().strip().split(" "))) for _ in range(rows)]
g = Grid(grid)
print(g.largest_region())
|
<commit_before><commit_msg>Solve connected cell in a grid<commit_after>
|
class Grid(object):
def __init__(self, grid):
self.grid = grid
self.rows = len(grid)
self.cols = len(grid[0])
def largest_region(self):
return max([self.region_size(row, col)
for row in range(self.rows)
for col in range(self.cols)])
def region_size(self, row, col):
if not self.filled_cell(row, col):
return 0
visited = set()
to_visit = [(row, col)]
while to_visit:
row, col = to_visit.pop()
if (row, col) not in visited:
visited.add((row, col))
to_visit.extend(self.filled_neighbors(row, col))
return len(visited)
def filled_neighbors(self, row, col):
filled_neighbors = list(filter(lambda cell: self.filled_cell(cell[0], cell[1]), self.neighbors(row, col)))
# print("filled_neighbors: " + str(filled_neighbors))
return filled_neighbors
def neighbors(self, row, col):
neighbors = list(filter(lambda cell:
self.row_in_grid(cell[0]) and
self.col_in_grid(cell[1]),
self.potential_neighbors(row, col)))
# print("neighbors: " + str(neighbors))
return neighbors
def row_in_grid(self, row):
return 0 <= row < self.rows
def col_in_grid(self, col):
return 0 <= col < self.cols
def potential_neighbors(self, row, col):
return [(row + row_delta, col + col_delta)
for row_delta in range (-1, 2)
for col_delta in range (-1, 2)]
def filled_cell(self, row, col):
return self.grid[row][col] == 1
rows = int(input().strip())
cols = int(input().strip())
grid = [list(map(int, input().strip().split(" "))) for _ in range(rows)]
g = Grid(grid)
print(g.largest_region())
|
Solve connected cell in a gridclass Grid(object):
def __init__(self, grid):
self.grid = grid
self.rows = len(grid)
self.cols = len(grid[0])
def largest_region(self):
return max([self.region_size(row, col)
for row in range(self.rows)
for col in range(self.cols)])
def region_size(self, row, col):
if not self.filled_cell(row, col):
return 0
visited = set()
to_visit = [(row, col)]
while to_visit:
row, col = to_visit.pop()
if (row, col) not in visited:
visited.add((row, col))
to_visit.extend(self.filled_neighbors(row, col))
return len(visited)
def filled_neighbors(self, row, col):
filled_neighbors = list(filter(lambda cell: self.filled_cell(cell[0], cell[1]), self.neighbors(row, col)))
# print("filled_neighbors: " + str(filled_neighbors))
return filled_neighbors
def neighbors(self, row, col):
neighbors = list(filter(lambda cell:
self.row_in_grid(cell[0]) and
self.col_in_grid(cell[1]),
self.potential_neighbors(row, col)))
# print("neighbors: " + str(neighbors))
return neighbors
def row_in_grid(self, row):
return 0 <= row < self.rows
def col_in_grid(self, col):
return 0 <= col < self.cols
def potential_neighbors(self, row, col):
return [(row + row_delta, col + col_delta)
for row_delta in range (-1, 2)
for col_delta in range (-1, 2)]
def filled_cell(self, row, col):
return self.grid[row][col] == 1
rows = int(input().strip())
cols = int(input().strip())
grid = [list(map(int, input().strip().split(" "))) for _ in range(rows)]
g = Grid(grid)
print(g.largest_region())
|
<commit_before><commit_msg>Solve connected cell in a grid<commit_after>class Grid(object):
def __init__(self, grid):
self.grid = grid
self.rows = len(grid)
self.cols = len(grid[0])
def largest_region(self):
return max([self.region_size(row, col)
for row in range(self.rows)
for col in range(self.cols)])
def region_size(self, row, col):
if not self.filled_cell(row, col):
return 0
visited = set()
to_visit = [(row, col)]
while to_visit:
row, col = to_visit.pop()
if (row, col) not in visited:
visited.add((row, col))
to_visit.extend(self.filled_neighbors(row, col))
return len(visited)
def filled_neighbors(self, row, col):
filled_neighbors = list(filter(lambda cell: self.filled_cell(cell[0], cell[1]), self.neighbors(row, col)))
# print("filled_neighbors: " + str(filled_neighbors))
return filled_neighbors
def neighbors(self, row, col):
neighbors = list(filter(lambda cell:
self.row_in_grid(cell[0]) and
self.col_in_grid(cell[1]),
self.potential_neighbors(row, col)))
# print("neighbors: " + str(neighbors))
return neighbors
def row_in_grid(self, row):
return 0 <= row < self.rows
def col_in_grid(self, col):
return 0 <= col < self.cols
def potential_neighbors(self, row, col):
return [(row + row_delta, col + col_delta)
for row_delta in range (-1, 2)
for col_delta in range (-1, 2)]
def filled_cell(self, row, col):
return self.grid[row][col] == 1
rows = int(input().strip())
cols = int(input().strip())
grid = [list(map(int, input().strip().split(" "))) for _ in range(rows)]
g = Grid(grid)
print(g.largest_region())
|
|
dcc8af2b9147d00a40d972c20a37b5d3ab92a26f
|
test/__init__.py
|
test/__init__.py
|
"""
Copyright (C) 2016 STFC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
Allow 'setup.py test' to see the test package
|
Allow 'setup.py test' to see the test package
|
Python
|
apache-2.0
|
tofu-rocketry/ssm,apel/ssm,stfc/ssm,tofu-rocketry/ssm,apel/ssm,stfc/ssm
|
Allow 'setup.py test' to see the test package
|
"""
Copyright (C) 2016 STFC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
<commit_before><commit_msg>Allow 'setup.py test' to see the test package<commit_after>
|
"""
Copyright (C) 2016 STFC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
Allow 'setup.py test' to see the test package"""
Copyright (C) 2016 STFC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
<commit_before><commit_msg>Allow 'setup.py test' to see the test package<commit_after>"""
Copyright (C) 2016 STFC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
|
bb8301e66c243b18eada71c4c8338f8eaa47a597
|
284_peeking_iterator.py
|
284_peeking_iterator.py
|
# https://leetcode.com/problems/peeking-iterator/
# We just need to buffer the current object of the iterator. When peek(), return the buffered object. When next(), return the buffered object, update the buffer to the next object.
# Below is the interface for Iterator, which is already defined for you.
#
# class Iterator(object):
# def __init__(self, nums):
# """
# Initializes an iterator object to the beginning of a list.
# :type nums: List[int]
# """
#
# def hasNext(self):
# """
# Returns true if the iteration has more elements.
# :rtype: bool
# """
#
# def next(self):
# """
# Returns the next element in the iteration.
# :rtype: int
# """
class PeekingIterator(object):
def __nextItem(self):
if (self.__itr.hasNext()):
return self.__itr.next()
else:
return None
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.__itr = iterator
self.__bufferedItem = self.__nextItem()
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
return self.__bufferedItem
def next(self):
"""
:rtype: int
"""
tmp = self.__bufferedItem
self.__bufferedItem = self.__nextItem()
return tmp
def hasNext(self):
"""
:rtype: bool
"""
return self.__bufferedItem is not None
# Your PeekingIterator object will be instantiated and called as such:
# iter = PeekingIterator(Iterator(nums))
# while iter.hasNext():
# val = iter.peek() # Get the next element but not advance the iterator.
# iter.next() # Should return the same value as [val].
|
Add a solution for ploblem 284: Peeking Iterator.
|
Add a solution for ploblem 284: Peeking Iterator.
|
Python
|
apache-2.0
|
shen-yang/leetcode_solutions,shen-yang/leetcode_solutions,shen-yang/leetcode_solutions
|
Add a solution for ploblem 284: Peeking Iterator.
|
# https://leetcode.com/problems/peeking-iterator/
# We just need to buffer the current object of the iterator. When peek(), return the buffered object. When next(), return the buffered object, update the buffer to the next object.
# Below is the interface for Iterator, which is already defined for you.
#
# class Iterator(object):
# def __init__(self, nums):
# """
# Initializes an iterator object to the beginning of a list.
# :type nums: List[int]
# """
#
# def hasNext(self):
# """
# Returns true if the iteration has more elements.
# :rtype: bool
# """
#
# def next(self):
# """
# Returns the next element in the iteration.
# :rtype: int
# """
class PeekingIterator(object):
def __nextItem(self):
if (self.__itr.hasNext()):
return self.__itr.next()
else:
return None
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.__itr = iterator
self.__bufferedItem = self.__nextItem()
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
return self.__bufferedItem
def next(self):
"""
:rtype: int
"""
tmp = self.__bufferedItem
self.__bufferedItem = self.__nextItem()
return tmp
def hasNext(self):
"""
:rtype: bool
"""
return self.__bufferedItem is not None
# Your PeekingIterator object will be instantiated and called as such:
# iter = PeekingIterator(Iterator(nums))
# while iter.hasNext():
# val = iter.peek() # Get the next element but not advance the iterator.
# iter.next() # Should return the same value as [val].
|
<commit_before><commit_msg>Add a solution for ploblem 284: Peeking Iterator.<commit_after>
|
# https://leetcode.com/problems/peeking-iterator/
# We just need to buffer the current object of the iterator. When peek(), return the buffered object. When next(), return the buffered object, update the buffer to the next object.
# Below is the interface for Iterator, which is already defined for you.
#
# class Iterator(object):
# def __init__(self, nums):
# """
# Initializes an iterator object to the beginning of a list.
# :type nums: List[int]
# """
#
# def hasNext(self):
# """
# Returns true if the iteration has more elements.
# :rtype: bool
# """
#
# def next(self):
# """
# Returns the next element in the iteration.
# :rtype: int
# """
class PeekingIterator(object):
def __nextItem(self):
if (self.__itr.hasNext()):
return self.__itr.next()
else:
return None
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.__itr = iterator
self.__bufferedItem = self.__nextItem()
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
return self.__bufferedItem
def next(self):
"""
:rtype: int
"""
tmp = self.__bufferedItem
self.__bufferedItem = self.__nextItem()
return tmp
def hasNext(self):
"""
:rtype: bool
"""
return self.__bufferedItem is not None
# Your PeekingIterator object will be instantiated and called as such:
# iter = PeekingIterator(Iterator(nums))
# while iter.hasNext():
# val = iter.peek() # Get the next element but not advance the iterator.
# iter.next() # Should return the same value as [val].
|
Add a solution for ploblem 284: Peeking Iterator.# https://leetcode.com/problems/peeking-iterator/
# We just need to buffer the current object of the iterator. When peek(), return the buffered object. When next(), return the buffered object, update the buffer to the next object.
# Below is the interface for Iterator, which is already defined for you.
#
# class Iterator(object):
# def __init__(self, nums):
# """
# Initializes an iterator object to the beginning of a list.
# :type nums: List[int]
# """
#
# def hasNext(self):
# """
# Returns true if the iteration has more elements.
# :rtype: bool
# """
#
# def next(self):
# """
# Returns the next element in the iteration.
# :rtype: int
# """
class PeekingIterator(object):
def __nextItem(self):
if (self.__itr.hasNext()):
return self.__itr.next()
else:
return None
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.__itr = iterator
self.__bufferedItem = self.__nextItem()
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
return self.__bufferedItem
def next(self):
"""
:rtype: int
"""
tmp = self.__bufferedItem
self.__bufferedItem = self.__nextItem()
return tmp
def hasNext(self):
"""
:rtype: bool
"""
return self.__bufferedItem is not None
# Your PeekingIterator object will be instantiated and called as such:
# iter = PeekingIterator(Iterator(nums))
# while iter.hasNext():
# val = iter.peek() # Get the next element but not advance the iterator.
# iter.next() # Should return the same value as [val].
|
<commit_before><commit_msg>Add a solution for ploblem 284: Peeking Iterator.<commit_after># https://leetcode.com/problems/peeking-iterator/
# We just need to buffer the current object of the iterator. When peek(), return the buffered object. When next(), return the buffered object, update the buffer to the next object.
# Below is the interface for Iterator, which is already defined for you.
#
# class Iterator(object):
# def __init__(self, nums):
# """
# Initializes an iterator object to the beginning of a list.
# :type nums: List[int]
# """
#
# def hasNext(self):
# """
# Returns true if the iteration has more elements.
# :rtype: bool
# """
#
# def next(self):
# """
# Returns the next element in the iteration.
# :rtype: int
# """
class PeekingIterator(object):
def __nextItem(self):
if (self.__itr.hasNext()):
return self.__itr.next()
else:
return None
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.__itr = iterator
self.__bufferedItem = self.__nextItem()
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
return self.__bufferedItem
def next(self):
"""
:rtype: int
"""
tmp = self.__bufferedItem
self.__bufferedItem = self.__nextItem()
return tmp
def hasNext(self):
"""
:rtype: bool
"""
return self.__bufferedItem is not None
# Your PeekingIterator object will be instantiated and called as such:
# iter = PeekingIterator(Iterator(nums))
# while iter.hasNext():
# val = iter.peek() # Get the next element but not advance the iterator.
# iter.next() # Should return the same value as [val].
|
|
d417e5a874cd2912e7787b85304683b12ea5fbbc
|
tools/fitsevt.py
|
tools/fitsevt.py
|
#! /usr/bin/python3
import sys
import os
import math
from astropy.io import fits
inputFolder = sys.argv[1]
outputFolder = sys.argv[2]
eLo = int(sys.argv[3])
eHi = int(sys.argv[4])
binSize = int(sys.argv[5])
fnames = os.listdir(inputFolder)
for fname in fnames:
print(fname)
hdulist = fits.open(inputFolder+"/"+fname)
for i in range(1,5):
timeRange = hdulist[i].header["TSTOP"] - hdulist[i].header["TSTART"]
nBins = math.ceil(timeRange/binSize)
count = [0]*nBins
print(nBins)
for event in hdulist[i].data:
if(event["ENERGY"]>=eLo or event["ENERGY"]<=eHi):
index = math.floor( nBins*(event["Time"] - hdulist[i].header["TSTART"])/timeRange )
count[index] += 1
sigClass = 1
with open(outputFolder+"/{0}_{1}".format(fname,i),'w') as f:
f.write("{0} {1}\n".format(nBins,sigClass))
for j in range(nBins):
f.write("{0}\n".format(count[j]))
|
Add file to convert IUCAA FITS *.evt file to txt format suitable for input to feature extraction program
|
Add file to convert IUCAA FITS *.evt file to txt format suitable for input to feature extraction program
|
Python
|
mit
|
fauzanzaid/IUCAA-GRB-detection-Feature-extraction
|
Add file to convert IUCAA FITS *.evt file to txt format suitable for input to feature extraction program
|
#! /usr/bin/python3
import sys
import os
import math
from astropy.io import fits
inputFolder = sys.argv[1]
outputFolder = sys.argv[2]
eLo = int(sys.argv[3])
eHi = int(sys.argv[4])
binSize = int(sys.argv[5])
fnames = os.listdir(inputFolder)
for fname in fnames:
print(fname)
hdulist = fits.open(inputFolder+"/"+fname)
for i in range(1,5):
timeRange = hdulist[i].header["TSTOP"] - hdulist[i].header["TSTART"]
nBins = math.ceil(timeRange/binSize)
count = [0]*nBins
print(nBins)
for event in hdulist[i].data:
if(event["ENERGY"]>=eLo or event["ENERGY"]<=eHi):
index = math.floor( nBins*(event["Time"] - hdulist[i].header["TSTART"])/timeRange )
count[index] += 1
sigClass = 1
with open(outputFolder+"/{0}_{1}".format(fname,i),'w') as f:
f.write("{0} {1}\n".format(nBins,sigClass))
for j in range(nBins):
f.write("{0}\n".format(count[j]))
|
<commit_before><commit_msg>Add file to convert IUCAA FITS *.evt file to txt format suitable for input to feature extraction program<commit_after>
|
#! /usr/bin/python3
import sys
import os
import math
from astropy.io import fits
inputFolder = sys.argv[1]
outputFolder = sys.argv[2]
eLo = int(sys.argv[3])
eHi = int(sys.argv[4])
binSize = int(sys.argv[5])
fnames = os.listdir(inputFolder)
for fname in fnames:
print(fname)
hdulist = fits.open(inputFolder+"/"+fname)
for i in range(1,5):
timeRange = hdulist[i].header["TSTOP"] - hdulist[i].header["TSTART"]
nBins = math.ceil(timeRange/binSize)
count = [0]*nBins
print(nBins)
for event in hdulist[i].data:
if(event["ENERGY"]>=eLo or event["ENERGY"]<=eHi):
index = math.floor( nBins*(event["Time"] - hdulist[i].header["TSTART"])/timeRange )
count[index] += 1
sigClass = 1
with open(outputFolder+"/{0}_{1}".format(fname,i),'w') as f:
f.write("{0} {1}\n".format(nBins,sigClass))
for j in range(nBins):
f.write("{0}\n".format(count[j]))
|
Add file to convert IUCAA FITS *.evt file to txt format suitable for input to feature extraction program#! /usr/bin/python3
import sys
import os
import math
from astropy.io import fits
inputFolder = sys.argv[1]
outputFolder = sys.argv[2]
eLo = int(sys.argv[3])
eHi = int(sys.argv[4])
binSize = int(sys.argv[5])
fnames = os.listdir(inputFolder)
for fname in fnames:
print(fname)
hdulist = fits.open(inputFolder+"/"+fname)
for i in range(1,5):
timeRange = hdulist[i].header["TSTOP"] - hdulist[i].header["TSTART"]
nBins = math.ceil(timeRange/binSize)
count = [0]*nBins
print(nBins)
for event in hdulist[i].data:
if(event["ENERGY"]>=eLo or event["ENERGY"]<=eHi):
index = math.floor( nBins*(event["Time"] - hdulist[i].header["TSTART"])/timeRange )
count[index] += 1
sigClass = 1
with open(outputFolder+"/{0}_{1}".format(fname,i),'w') as f:
f.write("{0} {1}\n".format(nBins,sigClass))
for j in range(nBins):
f.write("{0}\n".format(count[j]))
|
<commit_before><commit_msg>Add file to convert IUCAA FITS *.evt file to txt format suitable for input to feature extraction program<commit_after>#! /usr/bin/python3
import sys
import os
import math
from astropy.io import fits
inputFolder = sys.argv[1]
outputFolder = sys.argv[2]
eLo = int(sys.argv[3])
eHi = int(sys.argv[4])
binSize = int(sys.argv[5])
fnames = os.listdir(inputFolder)
for fname in fnames:
print(fname)
hdulist = fits.open(inputFolder+"/"+fname)
for i in range(1,5):
timeRange = hdulist[i].header["TSTOP"] - hdulist[i].header["TSTART"]
nBins = math.ceil(timeRange/binSize)
count = [0]*nBins
print(nBins)
for event in hdulist[i].data:
if(event["ENERGY"]>=eLo or event["ENERGY"]<=eHi):
index = math.floor( nBins*(event["Time"] - hdulist[i].header["TSTART"])/timeRange )
count[index] += 1
sigClass = 1
with open(outputFolder+"/{0}_{1}".format(fname,i),'w') as f:
f.write("{0} {1}\n".format(nBins,sigClass))
for j in range(nBins):
f.write("{0}\n".format(count[j]))
|
|
8bc6fddeb1232e7973d76bef05252f8228070ab7
|
plata/shop/templatetags/plata_product_tags.py
|
plata/shop/templatetags/plata_product_tags.py
|
from django import template
import plata
register = template.Library()
@register.simple_tag
def featured_products_for_categories(category_list, variable_name='featured_product'):
"""
{% featured_products_for_categories category_list "variable_name" %}
"""
category_list = list(category_list)
for category in category_list:
try:
setattr(category, variable_name, category.products.featured()[0])
except IndexError:
pass
return u''
|
Add template tag to get featured products for a list of categories
|
Add template tag to get featured products for a list of categories
|
Python
|
bsd-3-clause
|
allink/plata,armicron/plata,stefanklug/plata,armicron/plata,armicron/plata
|
Add template tag to get featured products for a list of categories
|
from django import template
import plata
register = template.Library()
@register.simple_tag
def featured_products_for_categories(category_list, variable_name='featured_product'):
"""
{% featured_products_for_categories category_list "variable_name" %}
"""
category_list = list(category_list)
for category in category_list:
try:
setattr(category, variable_name, category.products.featured()[0])
except IndexError:
pass
return u''
|
<commit_before><commit_msg>Add template tag to get featured products for a list of categories<commit_after>
|
from django import template
import plata
register = template.Library()
@register.simple_tag
def featured_products_for_categories(category_list, variable_name='featured_product'):
"""
{% featured_products_for_categories category_list "variable_name" %}
"""
category_list = list(category_list)
for category in category_list:
try:
setattr(category, variable_name, category.products.featured()[0])
except IndexError:
pass
return u''
|
Add template tag to get featured products for a list of categoriesfrom django import template
import plata
register = template.Library()
@register.simple_tag
def featured_products_for_categories(category_list, variable_name='featured_product'):
"""
{% featured_products_for_categories category_list "variable_name" %}
"""
category_list = list(category_list)
for category in category_list:
try:
setattr(category, variable_name, category.products.featured()[0])
except IndexError:
pass
return u''
|
<commit_before><commit_msg>Add template tag to get featured products for a list of categories<commit_after>from django import template
import plata
register = template.Library()
@register.simple_tag
def featured_products_for_categories(category_list, variable_name='featured_product'):
"""
{% featured_products_for_categories category_list "variable_name" %}
"""
category_list = list(category_list)
for category in category_list:
try:
setattr(category, variable_name, category.products.featured()[0])
except IndexError:
pass
return u''
|
|
bd0d8099d8d6ef36fa111c91667bf9700cfdd844
|
scikits/talkbox/misc/tests/test_find_peaks.py
|
scikits/talkbox/misc/tests/test_find_peaks.py
|
import numpy as np
from numpy.testing import TestCase, assert_array_equal, \
assert_array_almost_equal, dec
from scikits.talkbox.misc.peak_picking import find_peaks
class TestFindPeaks(TestCase):
def test_simple(self):
x = np.sin(np.linspace(0, 6 * np.pi, 256))
p = find_peaks(x, 10)
assert_array_equal(p, [21, 106, 191, 255])
if __name__ == "__main__":
run_module_suite()
|
Add simple test for find_peaks.
|
Add simple test for find_peaks.
|
Python
|
mit
|
cournape/talkbox,cournape/talkbox
|
Add simple test for find_peaks.
|
import numpy as np
from numpy.testing import TestCase, assert_array_equal, \
assert_array_almost_equal, dec
from scikits.talkbox.misc.peak_picking import find_peaks
class TestFindPeaks(TestCase):
def test_simple(self):
x = np.sin(np.linspace(0, 6 * np.pi, 256))
p = find_peaks(x, 10)
assert_array_equal(p, [21, 106, 191, 255])
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>Add simple test for find_peaks.<commit_after>
|
import numpy as np
from numpy.testing import TestCase, assert_array_equal, \
assert_array_almost_equal, dec
from scikits.talkbox.misc.peak_picking import find_peaks
class TestFindPeaks(TestCase):
def test_simple(self):
x = np.sin(np.linspace(0, 6 * np.pi, 256))
p = find_peaks(x, 10)
assert_array_equal(p, [21, 106, 191, 255])
if __name__ == "__main__":
run_module_suite()
|
Add simple test for find_peaks.import numpy as np
from numpy.testing import TestCase, assert_array_equal, \
assert_array_almost_equal, dec
from scikits.talkbox.misc.peak_picking import find_peaks
class TestFindPeaks(TestCase):
def test_simple(self):
x = np.sin(np.linspace(0, 6 * np.pi, 256))
p = find_peaks(x, 10)
assert_array_equal(p, [21, 106, 191, 255])
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>Add simple test for find_peaks.<commit_after>import numpy as np
from numpy.testing import TestCase, assert_array_equal, \
assert_array_almost_equal, dec
from scikits.talkbox.misc.peak_picking import find_peaks
class TestFindPeaks(TestCase):
def test_simple(self):
x = np.sin(np.linspace(0, 6 * np.pi, 256))
p = find_peaks(x, 10)
assert_array_equal(p, [21, 106, 191, 255])
if __name__ == "__main__":
run_module_suite()
|
|
ad2bc4f97bf267415e425463e6980ac865a20f24
|
scripts/02-web_collect/01_get_weather_data.py
|
scripts/02-web_collect/01_get_weather_data.py
|
#!/usr/bin/python3
'''
script that collects weather data from
'''
import urllib.request
import sys
import os
import time
from datetime import date, timedelta, datetime
from calendar import monthrange
def main():
# path for files to be saved
dir_path = '..' + os.sep + '..' + os.sep + 'data' + os.sep + 'weather' + os.sep
# command line arguments for specifying start and end data ISO 8601 date format
start = sys.argv[1].split('-')
end = sys.argv[2].split('-')
# create date objects
start_date = date(int(start[0]), int(start[1]), int(start[2]))
end_date = date(int(end[0]), int(end[1]), int(end[2]))
# get today's date and create date object
today = datetime.now().date()
full_month = True
# get weather data for each month between the two dates
while start_date != end_date:
# get the number of days for current month
month_days_no = monthrange(start_date.year, start_date.month)[1]
# set the end date for current iteration
month_end = start_date + timedelta(days=month_days_no - 1)
# case when last month is not a full month
if month_end > today:
month_end = today
full_month = False
# specify url for worldweatheronline.com API call
url_data = 'http://api.worldweatheronline.com/premium/v1/past-weather.ashx?q=NG1&format=json&date=' + str(
start_date) + '&enddate=' + str(month_end) + '&tp=24&key=ec3b4eb6ae0fd0c07912a156c046d'
# api call
web_url = urllib.request.urlopen(url_data)
# handle result
response = web_url.read()
data = response.decode('utf-8')
# save the result as a json file
file_name = dir_path + str(start_date.year) + "-" + str(start_date.month).zfill(2) + ".json"
with open(file_name, 'w') as outfile:
outfile.write(data)
# change the start date
start_date = month_end + timedelta(days=1)
# break if last month is not full month
if not full_month:
break
time.sleep(10)
if __name__ == '__main__':
main()
|
Add script that collects weather data from worldweatheronline.com
|
Add script that collects weather data from worldweatheronline.com
|
Python
|
apache-2.0
|
jayBana/InventoryMan,jayBana/InventoryMan,jayBana/InventoryMan,jayBana/InventoryMan
|
Add script that collects weather data from worldweatheronline.com
|
#!/usr/bin/python3
'''
script that collects weather data from
'''
import urllib.request
import sys
import os
import time
from datetime import date, timedelta, datetime
from calendar import monthrange
def main():
# path for files to be saved
dir_path = '..' + os.sep + '..' + os.sep + 'data' + os.sep + 'weather' + os.sep
# command line arguments for specifying start and end data ISO 8601 date format
start = sys.argv[1].split('-')
end = sys.argv[2].split('-')
# create date objects
start_date = date(int(start[0]), int(start[1]), int(start[2]))
end_date = date(int(end[0]), int(end[1]), int(end[2]))
# get today's date and create date object
today = datetime.now().date()
full_month = True
# get weather data for each month between the two dates
while start_date != end_date:
# get the number of days for current month
month_days_no = monthrange(start_date.year, start_date.month)[1]
# set the end date for current iteration
month_end = start_date + timedelta(days=month_days_no - 1)
# case when last month is not a full month
if month_end > today:
month_end = today
full_month = False
# specify url for worldweatheronline.com API call
url_data = 'http://api.worldweatheronline.com/premium/v1/past-weather.ashx?q=NG1&format=json&date=' + str(
start_date) + '&enddate=' + str(month_end) + '&tp=24&key=ec3b4eb6ae0fd0c07912a156c046d'
# api call
web_url = urllib.request.urlopen(url_data)
# handle result
response = web_url.read()
data = response.decode('utf-8')
# save the result as a json file
file_name = dir_path + str(start_date.year) + "-" + str(start_date.month).zfill(2) + ".json"
with open(file_name, 'w') as outfile:
outfile.write(data)
# change the start date
start_date = month_end + timedelta(days=1)
# break if last month is not full month
if not full_month:
break
time.sleep(10)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script that collects weather data from worldweatheronline.com<commit_after>
|
#!/usr/bin/python3
'''
script that collects weather data from
'''
import urllib.request
import sys
import os
import time
from datetime import date, timedelta, datetime
from calendar import monthrange
def main():
# path for files to be saved
dir_path = '..' + os.sep + '..' + os.sep + 'data' + os.sep + 'weather' + os.sep
# command line arguments for specifying start and end data ISO 8601 date format
start = sys.argv[1].split('-')
end = sys.argv[2].split('-')
# create date objects
start_date = date(int(start[0]), int(start[1]), int(start[2]))
end_date = date(int(end[0]), int(end[1]), int(end[2]))
# get today's date and create date object
today = datetime.now().date()
full_month = True
# get weather data for each month between the two dates
while start_date != end_date:
# get the number of days for current month
month_days_no = monthrange(start_date.year, start_date.month)[1]
# set the end date for current iteration
month_end = start_date + timedelta(days=month_days_no - 1)
# case when last month is not a full month
if month_end > today:
month_end = today
full_month = False
# specify url for worldweatheronline.com API call
url_data = 'http://api.worldweatheronline.com/premium/v1/past-weather.ashx?q=NG1&format=json&date=' + str(
start_date) + '&enddate=' + str(month_end) + '&tp=24&key=ec3b4eb6ae0fd0c07912a156c046d'
# api call
web_url = urllib.request.urlopen(url_data)
# handle result
response = web_url.read()
data = response.decode('utf-8')
# save the result as a json file
file_name = dir_path + str(start_date.year) + "-" + str(start_date.month).zfill(2) + ".json"
with open(file_name, 'w') as outfile:
outfile.write(data)
# change the start date
start_date = month_end + timedelta(days=1)
# break if last month is not full month
if not full_month:
break
time.sleep(10)
if __name__ == '__main__':
main()
|
Add script that collects weather data from worldweatheronline.com#!/usr/bin/python3
'''
script that collects weather data from
'''
import urllib.request
import sys
import os
import time
from datetime import date, timedelta, datetime
from calendar import monthrange
def main():
# path for files to be saved
dir_path = '..' + os.sep + '..' + os.sep + 'data' + os.sep + 'weather' + os.sep
# command line arguments for specifying start and end data ISO 8601 date format
start = sys.argv[1].split('-')
end = sys.argv[2].split('-')
# create date objects
start_date = date(int(start[0]), int(start[1]), int(start[2]))
end_date = date(int(end[0]), int(end[1]), int(end[2]))
# get today's date and create date object
today = datetime.now().date()
full_month = True
# get weather data for each month between the two dates
while start_date != end_date:
# get the number of days for current month
month_days_no = monthrange(start_date.year, start_date.month)[1]
# set the end date for current iteration
month_end = start_date + timedelta(days=month_days_no - 1)
# case when last month is not a full month
if month_end > today:
month_end = today
full_month = False
# specify url for worldweatheronline.com API call
url_data = 'http://api.worldweatheronline.com/premium/v1/past-weather.ashx?q=NG1&format=json&date=' + str(
start_date) + '&enddate=' + str(month_end) + '&tp=24&key=ec3b4eb6ae0fd0c07912a156c046d'
# api call
web_url = urllib.request.urlopen(url_data)
# handle result
response = web_url.read()
data = response.decode('utf-8')
# save the result as a json file
file_name = dir_path + str(start_date.year) + "-" + str(start_date.month).zfill(2) + ".json"
with open(file_name, 'w') as outfile:
outfile.write(data)
# change the start date
start_date = month_end + timedelta(days=1)
# break if last month is not full month
if not full_month:
break
time.sleep(10)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script that collects weather data from worldweatheronline.com<commit_after>#!/usr/bin/python3
'''
script that collects weather data from
'''
import urllib.request
import sys
import os
import time
from datetime import date, timedelta, datetime
from calendar import monthrange
def main():
# path for files to be saved
dir_path = '..' + os.sep + '..' + os.sep + 'data' + os.sep + 'weather' + os.sep
# command line arguments for specifying start and end data ISO 8601 date format
start = sys.argv[1].split('-')
end = sys.argv[2].split('-')
# create date objects
start_date = date(int(start[0]), int(start[1]), int(start[2]))
end_date = date(int(end[0]), int(end[1]), int(end[2]))
# get today's date and create date object
today = datetime.now().date()
full_month = True
# get weather data for each month between the two dates
while start_date != end_date:
# get the number of days for current month
month_days_no = monthrange(start_date.year, start_date.month)[1]
# set the end date for current iteration
month_end = start_date + timedelta(days=month_days_no - 1)
# case when last month is not a full month
if month_end > today:
month_end = today
full_month = False
# specify url for worldweatheronline.com API call
url_data = 'http://api.worldweatheronline.com/premium/v1/past-weather.ashx?q=NG1&format=json&date=' + str(
start_date) + '&enddate=' + str(month_end) + '&tp=24&key=ec3b4eb6ae0fd0c07912a156c046d'
# api call
web_url = urllib.request.urlopen(url_data)
# handle result
response = web_url.read()
data = response.decode('utf-8')
# save the result as a json file
file_name = dir_path + str(start_date.year) + "-" + str(start_date.month).zfill(2) + ".json"
with open(file_name, 'w') as outfile:
outfile.write(data)
# change the start date
start_date = month_end + timedelta(days=1)
# break if last month is not full month
if not full_month:
break
time.sleep(10)
if __name__ == '__main__':
main()
|
|
d6866c6f2bbf14d9904aacfa2c84b3499b2b093d
|
make_toponym_xml.py
|
make_toponym_xml.py
|
#Make Annotation Files
import os
docgeo_directory = "/Users/grant/devel/GeoAnnotate/docgeo_spans_dloaded_103115"
toponym_directory = "/Users/grant/devel/GeoAnnotate/toponym_annotated_103115"
for f in os.listdir(docgeo_directory):
fp = os.path.join(docgeo_directory, f)
vol = fp.split('-')[1].split('.')[0]
vol_stop_strings = {'61':"The detachment from Army of the Tennessee re-embarks for Vicksburg, Miss.",
'77':"On taking command, by the request of my superior officer, Colonel F. Campbell, by direction of Colonel McMillen",
'78':"I suggest that rations be sent to Colonel Wolfe's brigade, and that they",
'79': "Two prisoners brought in on train, captured near Midway.",
}
for f in os.listdir(toponym_directory):
fp = os.path.join(toponym_directory, f)
vol = fp.split('-')[1].split('.')[0]
|
Add a python scrip that stitches together annotated toponym json
|
Add a python scrip that stitches together annotated toponym json
|
Python
|
apache-2.0
|
utcompling/GeoAnnotate,utcompling/GeoAnnotate,utcompling/GeoAnnotate,utcompling/GeoAnnotate,utcompling/GeoAnnotate
|
Add a python scrip that stitches together annotated toponym json
|
#Make Annotation Files
import os
docgeo_directory = "/Users/grant/devel/GeoAnnotate/docgeo_spans_dloaded_103115"
toponym_directory = "/Users/grant/devel/GeoAnnotate/toponym_annotated_103115"
for f in os.listdir(docgeo_directory):
fp = os.path.join(docgeo_directory, f)
vol = fp.split('-')[1].split('.')[0]
vol_stop_strings = {'61':"The detachment from Army of the Tennessee re-embarks for Vicksburg, Miss.",
'77':"On taking command, by the request of my superior officer, Colonel F. Campbell, by direction of Colonel McMillen",
'78':"I suggest that rations be sent to Colonel Wolfe's brigade, and that they",
'79': "Two prisoners brought in on train, captured near Midway.",
}
for f in os.listdir(toponym_directory):
fp = os.path.join(toponym_directory, f)
vol = fp.split('-')[1].split('.')[0]
|
<commit_before><commit_msg>Add a python scrip that stitches together annotated toponym json<commit_after>
|
#Make Annotation Files
import os
docgeo_directory = "/Users/grant/devel/GeoAnnotate/docgeo_spans_dloaded_103115"
toponym_directory = "/Users/grant/devel/GeoAnnotate/toponym_annotated_103115"
for f in os.listdir(docgeo_directory):
fp = os.path.join(docgeo_directory, f)
vol = fp.split('-')[1].split('.')[0]
vol_stop_strings = {'61':"The detachment from Army of the Tennessee re-embarks for Vicksburg, Miss.",
'77':"On taking command, by the request of my superior officer, Colonel F. Campbell, by direction of Colonel McMillen",
'78':"I suggest that rations be sent to Colonel Wolfe's brigade, and that they",
'79': "Two prisoners brought in on train, captured near Midway.",
}
for f in os.listdir(toponym_directory):
fp = os.path.join(toponym_directory, f)
vol = fp.split('-')[1].split('.')[0]
|
Add a python scrip that stitches together annotated toponym json#Make Annotation Files
import os
docgeo_directory = "/Users/grant/devel/GeoAnnotate/docgeo_spans_dloaded_103115"
toponym_directory = "/Users/grant/devel/GeoAnnotate/toponym_annotated_103115"
for f in os.listdir(docgeo_directory):
fp = os.path.join(docgeo_directory, f)
vol = fp.split('-')[1].split('.')[0]
vol_stop_strings = {'61':"The detachment from Army of the Tennessee re-embarks for Vicksburg, Miss.",
'77':"On taking command, by the request of my superior officer, Colonel F. Campbell, by direction of Colonel McMillen",
'78':"I suggest that rations be sent to Colonel Wolfe's brigade, and that they",
'79': "Two prisoners brought in on train, captured near Midway.",
}
for f in os.listdir(toponym_directory):
fp = os.path.join(toponym_directory, f)
vol = fp.split('-')[1].split('.')[0]
|
<commit_before><commit_msg>Add a python scrip that stitches together annotated toponym json<commit_after>#Make Annotation Files
import os
docgeo_directory = "/Users/grant/devel/GeoAnnotate/docgeo_spans_dloaded_103115"
toponym_directory = "/Users/grant/devel/GeoAnnotate/toponym_annotated_103115"
for f in os.listdir(docgeo_directory):
fp = os.path.join(docgeo_directory, f)
vol = fp.split('-')[1].split('.')[0]
vol_stop_strings = {'61':"The detachment from Army of the Tennessee re-embarks for Vicksburg, Miss.",
'77':"On taking command, by the request of my superior officer, Colonel F. Campbell, by direction of Colonel McMillen",
'78':"I suggest that rations be sent to Colonel Wolfe's brigade, and that they",
'79': "Two prisoners brought in on train, captured near Midway.",
}
for f in os.listdir(toponym_directory):
fp = os.path.join(toponym_directory, f)
vol = fp.split('-')[1].split('.')[0]
|
|
ebcc2e9d4295b1e85e43c7b7a01c69a7f28193a5
|
hera_mc/tests/test_utils.py
|
hera_mc/tests/test_utils.py
|
import nose.tools as nt
from .. import utils
def test_reraise_context():
with nt.assert_raises(ValueError) as cm:
try:
raise ValueError('Initial Exception message.')
except ValueError:
utils._reraise_context('Add some info')
ex = cm.exception
nt.assert_equal(ex.args[0], 'Add some info: Initial Exception message.')
with nt.assert_raises(ValueError) as cm:
try:
raise ValueError('Initial Exception message.')
except ValueError:
utils._reraise_context('Add some info %s', 'and then more')
ex = cm.exception
nt.assert_equal(ex.args[0], 'Add some info and then more: Initial Exception message.')
with nt.assert_raises(EnvironmentError) as cm:
try:
raise EnvironmentError(1, 'some bad problem')
except EnvironmentError:
utils._reraise_context('Add some info')
ex = cm.exception
nt.assert_equal(ex.args[1], 'Add some info: some bad problem')
|
Add testing for reraise function
|
Add testing for reraise function
|
Python
|
bsd-2-clause
|
HERA-Team/hera_mc,HERA-Team/hera_mc,HERA-Team/Monitor_and_Control
|
Add testing for reraise function
|
import nose.tools as nt
from .. import utils
def test_reraise_context():
with nt.assert_raises(ValueError) as cm:
try:
raise ValueError('Initial Exception message.')
except ValueError:
utils._reraise_context('Add some info')
ex = cm.exception
nt.assert_equal(ex.args[0], 'Add some info: Initial Exception message.')
with nt.assert_raises(ValueError) as cm:
try:
raise ValueError('Initial Exception message.')
except ValueError:
utils._reraise_context('Add some info %s', 'and then more')
ex = cm.exception
nt.assert_equal(ex.args[0], 'Add some info and then more: Initial Exception message.')
with nt.assert_raises(EnvironmentError) as cm:
try:
raise EnvironmentError(1, 'some bad problem')
except EnvironmentError:
utils._reraise_context('Add some info')
ex = cm.exception
nt.assert_equal(ex.args[1], 'Add some info: some bad problem')
|
<commit_before><commit_msg>Add testing for reraise function<commit_after>
|
import nose.tools as nt
from .. import utils
def test_reraise_context():
with nt.assert_raises(ValueError) as cm:
try:
raise ValueError('Initial Exception message.')
except ValueError:
utils._reraise_context('Add some info')
ex = cm.exception
nt.assert_equal(ex.args[0], 'Add some info: Initial Exception message.')
with nt.assert_raises(ValueError) as cm:
try:
raise ValueError('Initial Exception message.')
except ValueError:
utils._reraise_context('Add some info %s', 'and then more')
ex = cm.exception
nt.assert_equal(ex.args[0], 'Add some info and then more: Initial Exception message.')
with nt.assert_raises(EnvironmentError) as cm:
try:
raise EnvironmentError(1, 'some bad problem')
except EnvironmentError:
utils._reraise_context('Add some info')
ex = cm.exception
nt.assert_equal(ex.args[1], 'Add some info: some bad problem')
|
Add testing for reraise functionimport nose.tools as nt
from .. import utils
def test_reraise_context():
with nt.assert_raises(ValueError) as cm:
try:
raise ValueError('Initial Exception message.')
except ValueError:
utils._reraise_context('Add some info')
ex = cm.exception
nt.assert_equal(ex.args[0], 'Add some info: Initial Exception message.')
with nt.assert_raises(ValueError) as cm:
try:
raise ValueError('Initial Exception message.')
except ValueError:
utils._reraise_context('Add some info %s', 'and then more')
ex = cm.exception
nt.assert_equal(ex.args[0], 'Add some info and then more: Initial Exception message.')
with nt.assert_raises(EnvironmentError) as cm:
try:
raise EnvironmentError(1, 'some bad problem')
except EnvironmentError:
utils._reraise_context('Add some info')
ex = cm.exception
nt.assert_equal(ex.args[1], 'Add some info: some bad problem')
|
<commit_before><commit_msg>Add testing for reraise function<commit_after>import nose.tools as nt
from .. import utils
def test_reraise_context():
with nt.assert_raises(ValueError) as cm:
try:
raise ValueError('Initial Exception message.')
except ValueError:
utils._reraise_context('Add some info')
ex = cm.exception
nt.assert_equal(ex.args[0], 'Add some info: Initial Exception message.')
with nt.assert_raises(ValueError) as cm:
try:
raise ValueError('Initial Exception message.')
except ValueError:
utils._reraise_context('Add some info %s', 'and then more')
ex = cm.exception
nt.assert_equal(ex.args[0], 'Add some info and then more: Initial Exception message.')
with nt.assert_raises(EnvironmentError) as cm:
try:
raise EnvironmentError(1, 'some bad problem')
except EnvironmentError:
utils._reraise_context('Add some info')
ex = cm.exception
nt.assert_equal(ex.args[1], 'Add some info: some bad problem')
|
|
158b38bdeff8b38c31766968f3fb5bdb73a85b6a
|
CodeFights/areEquallyStrong.py
|
CodeFights/areEquallyStrong.py
|
#!/usr/local/bin/python
# Code Fights Are Equally Strong Problem
def areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight):
s = {yourLeft, yourRight, friendsLeft, friendsRight}
return (
len(s) <= 2 and
max(yourLeft, yourRight) == max(friendsLeft, friendsRight)
)
def main():
tests = [
[10, 15, 15, 10, True],
[15, 10, 15, 10, True],
[15, 10, 15, 9, False],
[10, 5, 5, 10, True],
[10, 15, 5, 20, False],
[10, 20, 10, 20, True],
[5, 20, 20, 5, True],
[20, 15, 5, 20, False],
[5, 10, 5, 10, True],
[1, 10, 10, 0, False],
[5, 5, 10, 10, False],
[10, 5, 10, 6, False],
[1, 1, 1, 1, True]
]
for t in tests:
res = areEquallyStrong(t[0], t[1], t[2], t[3])
if t[4] == res:
print("PASSED: areEquallyStrong({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print("FAILED: areEquallyStrong({}, {}, {}, {}) returned {}, should have returned {}"
.format(t[0], t[1], t[2], t[3], res, t[4]))
if __name__ == '__main__':
main()
|
Solve Code Fights are equally strong problem
|
Solve Code Fights are equally strong problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights are equally strong problem
|
#!/usr/local/bin/python
# Code Fights Are Equally Strong Problem
def areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight):
s = {yourLeft, yourRight, friendsLeft, friendsRight}
return (
len(s) <= 2 and
max(yourLeft, yourRight) == max(friendsLeft, friendsRight)
)
def main():
tests = [
[10, 15, 15, 10, True],
[15, 10, 15, 10, True],
[15, 10, 15, 9, False],
[10, 5, 5, 10, True],
[10, 15, 5, 20, False],
[10, 20, 10, 20, True],
[5, 20, 20, 5, True],
[20, 15, 5, 20, False],
[5, 10, 5, 10, True],
[1, 10, 10, 0, False],
[5, 5, 10, 10, False],
[10, 5, 10, 6, False],
[1, 1, 1, 1, True]
]
for t in tests:
res = areEquallyStrong(t[0], t[1], t[2], t[3])
if t[4] == res:
print("PASSED: areEquallyStrong({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print("FAILED: areEquallyStrong({}, {}, {}, {}) returned {}, should have returned {}"
.format(t[0], t[1], t[2], t[3], res, t[4]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights are equally strong problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Are Equally Strong Problem
def areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight):
s = {yourLeft, yourRight, friendsLeft, friendsRight}
return (
len(s) <= 2 and
max(yourLeft, yourRight) == max(friendsLeft, friendsRight)
)
def main():
tests = [
[10, 15, 15, 10, True],
[15, 10, 15, 10, True],
[15, 10, 15, 9, False],
[10, 5, 5, 10, True],
[10, 15, 5, 20, False],
[10, 20, 10, 20, True],
[5, 20, 20, 5, True],
[20, 15, 5, 20, False],
[5, 10, 5, 10, True],
[1, 10, 10, 0, False],
[5, 5, 10, 10, False],
[10, 5, 10, 6, False],
[1, 1, 1, 1, True]
]
for t in tests:
res = areEquallyStrong(t[0], t[1], t[2], t[3])
if t[4] == res:
print("PASSED: areEquallyStrong({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print("FAILED: areEquallyStrong({}, {}, {}, {}) returned {}, should have returned {}"
.format(t[0], t[1], t[2], t[3], res, t[4]))
if __name__ == '__main__':
main()
|
Solve Code Fights are equally strong problem#!/usr/local/bin/python
# Code Fights Are Equally Strong Problem
def areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight):
s = {yourLeft, yourRight, friendsLeft, friendsRight}
return (
len(s) <= 2 and
max(yourLeft, yourRight) == max(friendsLeft, friendsRight)
)
def main():
tests = [
[10, 15, 15, 10, True],
[15, 10, 15, 10, True],
[15, 10, 15, 9, False],
[10, 5, 5, 10, True],
[10, 15, 5, 20, False],
[10, 20, 10, 20, True],
[5, 20, 20, 5, True],
[20, 15, 5, 20, False],
[5, 10, 5, 10, True],
[1, 10, 10, 0, False],
[5, 5, 10, 10, False],
[10, 5, 10, 6, False],
[1, 1, 1, 1, True]
]
for t in tests:
res = areEquallyStrong(t[0], t[1], t[2], t[3])
if t[4] == res:
print("PASSED: areEquallyStrong({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print("FAILED: areEquallyStrong({}, {}, {}, {}) returned {}, should have returned {}"
.format(t[0], t[1], t[2], t[3], res, t[4]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights are equally strong problem<commit_after>#!/usr/local/bin/python
# Code Fights Are Equally Strong Problem
def areEquallyStrong(yourLeft, yourRight, friendsLeft, friendsRight):
s = {yourLeft, yourRight, friendsLeft, friendsRight}
return (
len(s) <= 2 and
max(yourLeft, yourRight) == max(friendsLeft, friendsRight)
)
def main():
tests = [
[10, 15, 15, 10, True],
[15, 10, 15, 10, True],
[15, 10, 15, 9, False],
[10, 5, 5, 10, True],
[10, 15, 5, 20, False],
[10, 20, 10, 20, True],
[5, 20, 20, 5, True],
[20, 15, 5, 20, False],
[5, 10, 5, 10, True],
[1, 10, 10, 0, False],
[5, 5, 10, 10, False],
[10, 5, 10, 6, False],
[1, 1, 1, 1, True]
]
for t in tests:
res = areEquallyStrong(t[0], t[1], t[2], t[3])
if t[4] == res:
print("PASSED: areEquallyStrong({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print("FAILED: areEquallyStrong({}, {}, {}, {}) returned {}, should have returned {}"
.format(t[0], t[1], t[2], t[3], res, t[4]))
if __name__ == '__main__':
main()
|
|
859bc842da554e7e85b3684ade024ee533142d0b
|
modules/__init__.py
|
modules/__init__.py
|
import botconfig
from settings import wolfgame as var
# Todo: Allow game modes to be set via config
# Carry over settings from botconfig into settings/wolfgame.py
for setting, value in botconfig.__dict__.items():
if not setting.isupper():
continue # Not a setting
if not setting in var.__dict__.keys():
continue # Don't carry over config-only settings
# If we got that far, it's valid
setattr(var, setting, value)
|
Add ability to carry settings from botconfig to var.
|
Add ability to carry settings from botconfig to var.
|
Python
|
bsd-2-clause
|
Cr0wb4r/lykos,billion57/lykos,Diitto/lykos,Agent-Isai/lykos
|
Add ability to carry settings from botconfig to var.
|
import botconfig
from settings import wolfgame as var
# Todo: Allow game modes to be set via config
# Carry over settings from botconfig into settings/wolfgame.py
for setting, value in botconfig.__dict__.items():
if not setting.isupper():
continue # Not a setting
if not setting in var.__dict__.keys():
continue # Don't carry over config-only settings
# If we got that far, it's valid
setattr(var, setting, value)
|
<commit_before><commit_msg>Add ability to carry settings from botconfig to var.<commit_after>
|
import botconfig
from settings import wolfgame as var
# Todo: Allow game modes to be set via config
# Carry over settings from botconfig into settings/wolfgame.py
for setting, value in botconfig.__dict__.items():
if not setting.isupper():
continue # Not a setting
if not setting in var.__dict__.keys():
continue # Don't carry over config-only settings
# If we got that far, it's valid
setattr(var, setting, value)
|
Add ability to carry settings from botconfig to var.import botconfig
from settings import wolfgame as var
# Todo: Allow game modes to be set via config
# Carry over settings from botconfig into settings/wolfgame.py
for setting, value in botconfig.__dict__.items():
if not setting.isupper():
continue # Not a setting
if not setting in var.__dict__.keys():
continue # Don't carry over config-only settings
# If we got that far, it's valid
setattr(var, setting, value)
|
<commit_before><commit_msg>Add ability to carry settings from botconfig to var.<commit_after>import botconfig
from settings import wolfgame as var
# Todo: Allow game modes to be set via config
# Carry over settings from botconfig into settings/wolfgame.py
for setting, value in botconfig.__dict__.items():
if not setting.isupper():
continue # Not a setting
if not setting in var.__dict__.keys():
continue # Don't carry over config-only settings
# If we got that far, it's valid
setattr(var, setting, value)
|
|
dc2040c1e30a21224145f02ec339e600a53bc823
|
test/buildings.py
|
test/buildings.py
|
from django.test import TestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot, SpotExtendedInfo
from spotseeker_server import models
from django.core import cache
from mock import patch
from django.test.utils import override_settings
import simplejson as json
@override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok',
SPOTSEEKER_SPOT_FORM='spotseeker_server.default_forms.spot.DefaultSpotForm')
class BuildingTest(TestCase):
""" Tests getting building information
"""
def setUp(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
# creating testing spots
# keep track of the buildings of the created spots
self.building_list = []
self.spots_counts = 100
for x in range(1, self.spots_counts + 1):
new_name = "Test Spot " + str(x)
new_building_name = "Test Building No." + str(x) + "(TB" + str(x) + ")"
spot = Spot.objects.create(name=new_name, building_name=new_building_name)
spot.save()
self.building_list.append(new_building_name)
def test_json(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
# use the test client to get the building information
c = Client()
url = '/api/v1/buildings'
response = c.get(url)
self.assertEquals(response["Content-Type"], "application/json", "Make sure it has a json header")
# compare the submitted building json against the json got back from the GET request
post_building = sorted(self.building_list)
get_building = json.loads(response.content)
self.assertEquals(post_building, get_building, "Incorrect building JSON")
|
Add unit test for building API v1
|
SPOT-1101: Add unit test for building API v1
|
Python
|
apache-2.0
|
uw-it-aca/spotseeker_server,uw-it-aca/spotseeker_server,uw-it-aca/spotseeker_server
|
SPOT-1101: Add unit test for building API v1
|
from django.test import TestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot, SpotExtendedInfo
from spotseeker_server import models
from django.core import cache
from mock import patch
from django.test.utils import override_settings
import simplejson as json
@override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok',
SPOTSEEKER_SPOT_FORM='spotseeker_server.default_forms.spot.DefaultSpotForm')
class BuildingTest(TestCase):
""" Tests getting building information
"""
def setUp(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
# creating testing spots
# keep track of the buildings of the created spots
self.building_list = []
self.spots_counts = 100
for x in range(1, self.spots_counts + 1):
new_name = "Test Spot " + str(x)
new_building_name = "Test Building No." + str(x) + "(TB" + str(x) + ")"
spot = Spot.objects.create(name=new_name, building_name=new_building_name)
spot.save()
self.building_list.append(new_building_name)
def test_json(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
# use the test client to get the building information
c = Client()
url = '/api/v1/buildings'
response = c.get(url)
self.assertEquals(response["Content-Type"], "application/json", "Make sure it has a json header")
# compare the submitted building json against the json got back from the GET request
post_building = sorted(self.building_list)
get_building = json.loads(response.content)
self.assertEquals(post_building, get_building, "Incorrect building JSON")
|
<commit_before><commit_msg>SPOT-1101: Add unit test for building API v1<commit_after>
|
from django.test import TestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot, SpotExtendedInfo
from spotseeker_server import models
from django.core import cache
from mock import patch
from django.test.utils import override_settings
import simplejson as json
@override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok',
SPOTSEEKER_SPOT_FORM='spotseeker_server.default_forms.spot.DefaultSpotForm')
class BuildingTest(TestCase):
""" Tests getting building information
"""
def setUp(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
# creating testing spots
# keep track of the buildings of the created spots
self.building_list = []
self.spots_counts = 100
for x in range(1, self.spots_counts + 1):
new_name = "Test Spot " + str(x)
new_building_name = "Test Building No." + str(x) + "(TB" + str(x) + ")"
spot = Spot.objects.create(name=new_name, building_name=new_building_name)
spot.save()
self.building_list.append(new_building_name)
def test_json(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
# use the test client to get the building information
c = Client()
url = '/api/v1/buildings'
response = c.get(url)
self.assertEquals(response["Content-Type"], "application/json", "Make sure it has a json header")
# compare the submitted building json against the json got back from the GET request
post_building = sorted(self.building_list)
get_building = json.loads(response.content)
self.assertEquals(post_building, get_building, "Incorrect building JSON")
|
SPOT-1101: Add unit test for building API v1from django.test import TestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot, SpotExtendedInfo
from spotseeker_server import models
from django.core import cache
from mock import patch
from django.test.utils import override_settings
import simplejson as json
@override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok',
SPOTSEEKER_SPOT_FORM='spotseeker_server.default_forms.spot.DefaultSpotForm')
class BuildingTest(TestCase):
""" Tests getting building information
"""
def setUp(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
# creating testing spots
# keep track of the buildings of the created spots
self.building_list = []
self.spots_counts = 100
for x in range(1, self.spots_counts + 1):
new_name = "Test Spot " + str(x)
new_building_name = "Test Building No." + str(x) + "(TB" + str(x) + ")"
spot = Spot.objects.create(name=new_name, building_name=new_building_name)
spot.save()
self.building_list.append(new_building_name)
def test_json(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
# use the test client to get the building information
c = Client()
url = '/api/v1/buildings'
response = c.get(url)
self.assertEquals(response["Content-Type"], "application/json", "Make sure it has a json header")
# compare the submitted building json against the json got back from the GET request
post_building = sorted(self.building_list)
get_building = json.loads(response.content)
self.assertEquals(post_building, get_building, "Incorrect building JSON")
|
<commit_before><commit_msg>SPOT-1101: Add unit test for building API v1<commit_after>from django.test import TestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot, SpotExtendedInfo
from spotseeker_server import models
from django.core import cache
from mock import patch
from django.test.utils import override_settings
import simplejson as json
@override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok',
SPOTSEEKER_SPOT_FORM='spotseeker_server.default_forms.spot.DefaultSpotForm')
class BuildingTest(TestCase):
""" Tests getting building information
"""
def setUp(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
# creating testing spots
# keep track of the buildings of the created spots
self.building_list = []
self.spots_counts = 100
for x in range(1, self.spots_counts + 1):
new_name = "Test Spot " + str(x)
new_building_name = "Test Building No." + str(x) + "(TB" + str(x) + ")"
spot = Spot.objects.create(name=new_name, building_name=new_building_name)
spot.save()
self.building_list.append(new_building_name)
def test_json(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
# use the test client to get the building information
c = Client()
url = '/api/v1/buildings'
response = c.get(url)
self.assertEquals(response["Content-Type"], "application/json", "Make sure it has a json header")
# compare the submitted building json against the json got back from the GET request
post_building = sorted(self.building_list)
get_building = json.loads(response.content)
self.assertEquals(post_building, get_building, "Incorrect building JSON")
|
|
ed92a8932dc0647643a79b53fa0f5885a59d31a5
|
LiSE/LiSE/tests/test_examples.py
|
LiSE/LiSE/tests/test_examples.py
|
from LiSE.examples import college, kobold, polygons, sickle
def test_college(engy):
college.install(engy)
engy.turn = 10 # wake up the students
engy.next_turn()
def test_kobold(engy):
kobold.inittest(engy, shrubberies=20, kobold_sprint_chance=.9)
for i in range(10):
engy.next_turn()
def test_polygons(engy):
polygons.install(engy)
for i in range(10):
engy.next_turn()
def test_sickle(engy):
sickle.install(engy)
for i in range(10):
engy.next_turn()
|
Add tests to make sure the examples run
|
Add tests to make sure the examples run
|
Python
|
agpl-3.0
|
LogicalDash/LiSE,LogicalDash/LiSE
|
Add tests to make sure the examples run
|
from LiSE.examples import college, kobold, polygons, sickle
def test_college(engy):
college.install(engy)
engy.turn = 10 # wake up the students
engy.next_turn()
def test_kobold(engy):
kobold.inittest(engy, shrubberies=20, kobold_sprint_chance=.9)
for i in range(10):
engy.next_turn()
def test_polygons(engy):
polygons.install(engy)
for i in range(10):
engy.next_turn()
def test_sickle(engy):
sickle.install(engy)
for i in range(10):
engy.next_turn()
|
<commit_before><commit_msg>Add tests to make sure the examples run<commit_after>
|
from LiSE.examples import college, kobold, polygons, sickle
def test_college(engy):
college.install(engy)
engy.turn = 10 # wake up the students
engy.next_turn()
def test_kobold(engy):
kobold.inittest(engy, shrubberies=20, kobold_sprint_chance=.9)
for i in range(10):
engy.next_turn()
def test_polygons(engy):
polygons.install(engy)
for i in range(10):
engy.next_turn()
def test_sickle(engy):
sickle.install(engy)
for i in range(10):
engy.next_turn()
|
Add tests to make sure the examples runfrom LiSE.examples import college, kobold, polygons, sickle
def test_college(engy):
college.install(engy)
engy.turn = 10 # wake up the students
engy.next_turn()
def test_kobold(engy):
kobold.inittest(engy, shrubberies=20, kobold_sprint_chance=.9)
for i in range(10):
engy.next_turn()
def test_polygons(engy):
polygons.install(engy)
for i in range(10):
engy.next_turn()
def test_sickle(engy):
sickle.install(engy)
for i in range(10):
engy.next_turn()
|
<commit_before><commit_msg>Add tests to make sure the examples run<commit_after>from LiSE.examples import college, kobold, polygons, sickle
def test_college(engy):
college.install(engy)
engy.turn = 10 # wake up the students
engy.next_turn()
def test_kobold(engy):
kobold.inittest(engy, shrubberies=20, kobold_sprint_chance=.9)
for i in range(10):
engy.next_turn()
def test_polygons(engy):
polygons.install(engy)
for i in range(10):
engy.next_turn()
def test_sickle(engy):
sickle.install(engy)
for i in range(10):
engy.next_turn()
|
|
18d5005f4255b0d0c9687c4ae535a652da6a6b31
|
bin/utils/removePasswords.py
|
bin/utils/removePasswords.py
|
import sys
from MaKaC.common.db import DBMgr
from MaKaC.user import AvatarHolder
from MaKaC.authentication import AuthenticatorMgr
from MaKaC.authentication.LocalAuthentication import LocalIdentity
print('This script will remove all local identities from users.')
print('This will remove passwords from the database and prevent them from')
print('logging in locally (so you need e.g. NICE or LDAP authentication)')
print
if raw_input('Do you want to continue? [yes|NO]: ').lower() != 'yes':
print 'Cancelled.'
sys.exit(0)
DBMgr.getInstance().startRequest()
ah = AvatarHolder()
am = AuthenticatorMgr()
for avatar in ah.getValuesToList():
for identity in avatar.getIdentityList():
if isinstance(identity, LocalIdentity):
print('Removing LocalIdentity(%s, %s) from %s' %
(identity.getLogin(), len(identity.password) * '*',
avatar.getFullName()))
am.removeIdentity(identity)
avatar.removeIdentity(identity)
DBMgr.getInstance().endRequest()
|
Add script to remove all local identities
|
[ADD] Add script to remove all local identities
|
Python
|
mit
|
mvidalgarcia/indico,indico/indico,OmeGak/indico,mvidalgarcia/indico,ThiefMaster/indico,pferreir/indico,DirkHoffmann/indico,mic4ael/indico,OmeGak/indico,mic4ael/indico,OmeGak/indico,mic4ael/indico,indico/indico,DirkHoffmann/indico,indico/indico,pferreir/indico,ThiefMaster/indico,pferreir/indico,mic4ael/indico,mvidalgarcia/indico,OmeGak/indico,ThiefMaster/indico,ThiefMaster/indico,indico/indico,DirkHoffmann/indico,DirkHoffmann/indico,mvidalgarcia/indico,pferreir/indico
|
[ADD] Add script to remove all local identities
|
import sys
from MaKaC.common.db import DBMgr
from MaKaC.user import AvatarHolder
from MaKaC.authentication import AuthenticatorMgr
from MaKaC.authentication.LocalAuthentication import LocalIdentity
print('This script will remove all local identities from users.')
print('This will remove passwords from the database and prevent them from')
print('logging in locally (so you need e.g. NICE or LDAP authentication)')
print
if raw_input('Do you want to continue? [yes|NO]: ').lower() != 'yes':
print 'Cancelled.'
sys.exit(0)
DBMgr.getInstance().startRequest()
ah = AvatarHolder()
am = AuthenticatorMgr()
for avatar in ah.getValuesToList():
for identity in avatar.getIdentityList():
if isinstance(identity, LocalIdentity):
print('Removing LocalIdentity(%s, %s) from %s' %
(identity.getLogin(), len(identity.password) * '*',
avatar.getFullName()))
am.removeIdentity(identity)
avatar.removeIdentity(identity)
DBMgr.getInstance().endRequest()
|
<commit_before><commit_msg>[ADD] Add script to remove all local identities<commit_after>
|
import sys
from MaKaC.common.db import DBMgr
from MaKaC.user import AvatarHolder
from MaKaC.authentication import AuthenticatorMgr
from MaKaC.authentication.LocalAuthentication import LocalIdentity
print('This script will remove all local identities from users.')
print('This will remove passwords from the database and prevent them from')
print('logging in locally (so you need e.g. NICE or LDAP authentication)')
print
if raw_input('Do you want to continue? [yes|NO]: ').lower() != 'yes':
print 'Cancelled.'
sys.exit(0)
DBMgr.getInstance().startRequest()
ah = AvatarHolder()
am = AuthenticatorMgr()
for avatar in ah.getValuesToList():
for identity in avatar.getIdentityList():
if isinstance(identity, LocalIdentity):
print('Removing LocalIdentity(%s, %s) from %s' %
(identity.getLogin(), len(identity.password) * '*',
avatar.getFullName()))
am.removeIdentity(identity)
avatar.removeIdentity(identity)
DBMgr.getInstance().endRequest()
|
[ADD] Add script to remove all local identitiesimport sys
from MaKaC.common.db import DBMgr
from MaKaC.user import AvatarHolder
from MaKaC.authentication import AuthenticatorMgr
from MaKaC.authentication.LocalAuthentication import LocalIdentity
print('This script will remove all local identities from users.')
print('This will remove passwords from the database and prevent them from')
print('logging in locally (so you need e.g. NICE or LDAP authentication)')
print
if raw_input('Do you want to continue? [yes|NO]: ').lower() != 'yes':
print 'Cancelled.'
sys.exit(0)
DBMgr.getInstance().startRequest()
ah = AvatarHolder()
am = AuthenticatorMgr()
for avatar in ah.getValuesToList():
for identity in avatar.getIdentityList():
if isinstance(identity, LocalIdentity):
print('Removing LocalIdentity(%s, %s) from %s' %
(identity.getLogin(), len(identity.password) * '*',
avatar.getFullName()))
am.removeIdentity(identity)
avatar.removeIdentity(identity)
DBMgr.getInstance().endRequest()
|
<commit_before><commit_msg>[ADD] Add script to remove all local identities<commit_after>import sys
from MaKaC.common.db import DBMgr
from MaKaC.user import AvatarHolder
from MaKaC.authentication import AuthenticatorMgr
from MaKaC.authentication.LocalAuthentication import LocalIdentity
print('This script will remove all local identities from users.')
print('This will remove passwords from the database and prevent them from')
print('logging in locally (so you need e.g. NICE or LDAP authentication)')
print
if raw_input('Do you want to continue? [yes|NO]: ').lower() != 'yes':
print 'Cancelled.'
sys.exit(0)
DBMgr.getInstance().startRequest()
ah = AvatarHolder()
am = AuthenticatorMgr()
for avatar in ah.getValuesToList():
for identity in avatar.getIdentityList():
if isinstance(identity, LocalIdentity):
print('Removing LocalIdentity(%s, %s) from %s' %
(identity.getLogin(), len(identity.password) * '*',
avatar.getFullName()))
am.removeIdentity(identity)
avatar.removeIdentity(identity)
DBMgr.getInstance().endRequest()
|
|
a9a74a1ff2e118c76abebf1738d1c6bb75b4589a
|
Python_Data/smpl4.py
|
Python_Data/smpl4.py
|
'''
2.1 - Numpy Array library
'''
import numpy as np
def main():
n = 10
a = [x**2 for x in range(n)]
b = [x**3 for x in range(n)]
v = plainVectorAddition(a, b)
print(v)
def plainVectorAddition(a, b):
v = list()
for i,j,k in zip(a, b, len(a)):
v[k] = i + j
return v
if __name__ == '__main__':
main()
|
Add new file for numpy library usage
|
Add new file for numpy library usage
|
Python
|
unlicense
|
robotenique/RandomAccessMemory,robotenique/RandomAccessMemory,robotenique/RandomAccessMemory
|
Add new file for numpy library usage
|
'''
2.1 - Numpy Array library
'''
import numpy as np
def main():
n = 10
a = [x**2 for x in range(n)]
b = [x**3 for x in range(n)]
v = plainVectorAddition(a, b)
print(v)
def plainVectorAddition(a, b):
v = list()
for i,j,k in zip(a, b, len(a)):
v[k] = i + j
return v
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add new file for numpy library usage<commit_after>
|
'''
2.1 - Numpy Array library
'''
import numpy as np
def main():
n = 10
a = [x**2 for x in range(n)]
b = [x**3 for x in range(n)]
v = plainVectorAddition(a, b)
print(v)
def plainVectorAddition(a, b):
v = list()
for i,j,k in zip(a, b, len(a)):
v[k] = i + j
return v
if __name__ == '__main__':
main()
|
Add new file for numpy library usage'''
2.1 - Numpy Array library
'''
import numpy as np
def main():
n = 10
a = [x**2 for x in range(n)]
b = [x**3 for x in range(n)]
v = plainVectorAddition(a, b)
print(v)
def plainVectorAddition(a, b):
v = list()
for i,j,k in zip(a, b, len(a)):
v[k] = i + j
return v
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add new file for numpy library usage<commit_after>'''
2.1 - Numpy Array library
'''
import numpy as np
def main():
n = 10
a = [x**2 for x in range(n)]
b = [x**3 for x in range(n)]
v = plainVectorAddition(a, b)
print(v)
def plainVectorAddition(a, b):
v = list()
for i,j,k in zip(a, b, len(a)):
v[k] = i + j
return v
if __name__ == '__main__':
main()
|
|
35c2c26ba379c4fc33465c11bb77a5cc8b4a7d2d
|
data/process_bigrams.py
|
data/process_bigrams.py
|
# Intended to be used with count_2w.txt which has the following format:
# A B\tFREQENCY
# Sometimes "A" is "<S>" for start and "</S>" for end.
# Output is similar with all output lower-cased (including "<S>" and "</S>").
import collections
from src.data import data
all_results = collections.defaultdict(int)
for line in data.open_project_path('data/count_2w.txt', errors='ignore'):
a, b, count = line.split()
key = ('%s %s' % (a, b)).lower()
all_results[key] += int(count)
for item in sorted(all_results.items(), key=lambda x: x[1], reverse=True):
print('%s\t%s' % item)
|
Reformat words_2w.txt to sort and remove caps.
|
Reformat words_2w.txt to sort and remove caps.
|
Python
|
mit
|
PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge
|
Reformat words_2w.txt to sort and remove caps.
|
# Intended to be used with count_2w.txt which has the following format:
# A B\tFREQENCY
# Sometimes "A" is "<S>" for start and "</S>" for end.
# Output is similar with all output lower-cased (including "<S>" and "</S>").
import collections
from src.data import data
all_results = collections.defaultdict(int)
for line in data.open_project_path('data/count_2w.txt', errors='ignore'):
a, b, count = line.split()
key = ('%s %s' % (a, b)).lower()
all_results[key] += int(count)
for item in sorted(all_results.items(), key=lambda x: x[1], reverse=True):
print('%s\t%s' % item)
|
<commit_before><commit_msg>Reformat words_2w.txt to sort and remove caps.<commit_after>
|
# Intended to be used with count_2w.txt which has the following format:
# A B\tFREQENCY
# Sometimes "A" is "<S>" for start and "</S>" for end.
# Output is similar with all output lower-cased (including "<S>" and "</S>").
import collections
from src.data import data
all_results = collections.defaultdict(int)
for line in data.open_project_path('data/count_2w.txt', errors='ignore'):
a, b, count = line.split()
key = ('%s %s' % (a, b)).lower()
all_results[key] += int(count)
for item in sorted(all_results.items(), key=lambda x: x[1], reverse=True):
print('%s\t%s' % item)
|
Reformat words_2w.txt to sort and remove caps.# Intended to be used with count_2w.txt which has the following format:
# A B\tFREQENCY
# Sometimes "A" is "<S>" for start and "</S>" for end.
# Output is similar with all output lower-cased (including "<S>" and "</S>").
import collections
from src.data import data
all_results = collections.defaultdict(int)
for line in data.open_project_path('data/count_2w.txt', errors='ignore'):
a, b, count = line.split()
key = ('%s %s' % (a, b)).lower()
all_results[key] += int(count)
for item in sorted(all_results.items(), key=lambda x: x[1], reverse=True):
print('%s\t%s' % item)
|
<commit_before><commit_msg>Reformat words_2w.txt to sort and remove caps.<commit_after># Intended to be used with count_2w.txt which has the following format:
# A B\tFREQENCY
# Sometimes "A" is "<S>" for start and "</S>" for end.
# Output is similar with all output lower-cased (including "<S>" and "</S>").
import collections
from src.data import data
all_results = collections.defaultdict(int)
for line in data.open_project_path('data/count_2w.txt', errors='ignore'):
a, b, count = line.split()
key = ('%s %s' % (a, b)).lower()
all_results[key] += int(count)
for item in sorted(all_results.items(), key=lambda x: x[1], reverse=True):
print('%s\t%s' % item)
|
|
0b1266eb66fd02e8513b1f36d52b699bfa152285
|
build/android/test_runner.py
|
build/android/test_runner.py
|
#!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
Runs tests on Android devices.
This script exists to avoid WebRTC being broken by changes in the Chrome Android
test execution toolchain.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.join(SCRIPT_DIR, os.pardir, os.pardir, os.pardir)
CHROMIUM_BUILD_ANDROID_DIR = os.path.join(ROOT_DIR, 'build', 'android')
sys.path.insert(0, CHROMIUM_BUILD_ANDROID_DIR)
import test_runner
from pylib.gtest import gtest_config
def main():
# Override the stable test suites with the WebRTC tests.
gtest_config.STABLE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_capture_tests',
'video_engine_tests',
'video_engine_core_unittests',
'voice_engine_unittests',
'webrtc_perf_tests',
]
return test_runner.main()
if __name__ == '__main__':
sys.exit(main())
|
Add Android test runner script for WebRTC.
|
Add Android test runner script for WebRTC.
The Android test execution toolchain scripts in Chromium
has been causing headaches for us several times. Mostly
because they're tailored at running Chrome tests only.
Wrapping their script in our own avoids the pain of
upstreaming new test names to Chromium and rolling them
in to get them running on our bots.
TESTED=Ran a test on a local device using:
webrtc/build/android/test_runner.py gtest -s audio_decoder_unittests --verbose --isolate-file-path webrtc/modules/audio_coding/neteq/audio_decoder_unittests.isolate --release
TBR=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/25269004
git-svn-id: 03ae4fbe531b1eefc9d815f31e49022782c42458@7794 4adac7df-926f-26a2-2b94-8c16560cd09d
|
Python
|
bsd-3-clause
|
svn2github/webrtc-Revision-8758,svn2github/webrtc-Revision-8758,svn2github/webrtc-Revision-8758,svn2github/webrtc-Revision-8758,svn2github/webrtc-Revision-8758,svn2github/webrtc-Revision-8758
|
Add Android test runner script for WebRTC.
The Android test execution toolchain scripts in Chromium
has been causing headaches for us several times. Mostly
because they're tailored at running Chrome tests only.
Wrapping their script in our own avoids the pain of
upstreaming new test names to Chromium and rolling them
in to get them running on our bots.
TESTED=Ran a test on a local device using:
webrtc/build/android/test_runner.py gtest -s audio_decoder_unittests --verbose --isolate-file-path webrtc/modules/audio_coding/neteq/audio_decoder_unittests.isolate --release
TBR=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/25269004
git-svn-id: 03ae4fbe531b1eefc9d815f31e49022782c42458@7794 4adac7df-926f-26a2-2b94-8c16560cd09d
|
#!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
Runs tests on Android devices.
This script exists to avoid WebRTC being broken by changes in the Chrome Android
test execution toolchain.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.join(SCRIPT_DIR, os.pardir, os.pardir, os.pardir)
CHROMIUM_BUILD_ANDROID_DIR = os.path.join(ROOT_DIR, 'build', 'android')
sys.path.insert(0, CHROMIUM_BUILD_ANDROID_DIR)
import test_runner
from pylib.gtest import gtest_config
def main():
# Override the stable test suites with the WebRTC tests.
gtest_config.STABLE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_capture_tests',
'video_engine_tests',
'video_engine_core_unittests',
'voice_engine_unittests',
'webrtc_perf_tests',
]
return test_runner.main()
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add Android test runner script for WebRTC.
The Android test execution toolchain scripts in Chromium
has been causing headaches for us several times. Mostly
because they're tailored at running Chrome tests only.
Wrapping their script in our own avoids the pain of
upstreaming new test names to Chromium and rolling them
in to get them running on our bots.
TESTED=Ran a test on a local device using:
webrtc/build/android/test_runner.py gtest -s audio_decoder_unittests --verbose --isolate-file-path webrtc/modules/audio_coding/neteq/audio_decoder_unittests.isolate --release
TBR=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/25269004
git-svn-id: 03ae4fbe531b1eefc9d815f31e49022782c42458@7794 4adac7df-926f-26a2-2b94-8c16560cd09d<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
Runs tests on Android devices.
This script exists to avoid WebRTC being broken by changes in the Chrome Android
test execution toolchain.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.join(SCRIPT_DIR, os.pardir, os.pardir, os.pardir)
CHROMIUM_BUILD_ANDROID_DIR = os.path.join(ROOT_DIR, 'build', 'android')
sys.path.insert(0, CHROMIUM_BUILD_ANDROID_DIR)
import test_runner
from pylib.gtest import gtest_config
def main():
# Override the stable test suites with the WebRTC tests.
gtest_config.STABLE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_capture_tests',
'video_engine_tests',
'video_engine_core_unittests',
'voice_engine_unittests',
'webrtc_perf_tests',
]
return test_runner.main()
if __name__ == '__main__':
sys.exit(main())
|
Add Android test runner script for WebRTC.
The Android test execution toolchain scripts in Chromium
has been causing headaches for us several times. Mostly
because they're tailored at running Chrome tests only.
Wrapping their script in our own avoids the pain of
upstreaming new test names to Chromium and rolling them
in to get them running on our bots.
TESTED=Ran a test on a local device using:
webrtc/build/android/test_runner.py gtest -s audio_decoder_unittests --verbose --isolate-file-path webrtc/modules/audio_coding/neteq/audio_decoder_unittests.isolate --release
TBR=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/25269004
git-svn-id: 03ae4fbe531b1eefc9d815f31e49022782c42458@7794 4adac7df-926f-26a2-2b94-8c16560cd09d#!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
Runs tests on Android devices.
This script exists to avoid WebRTC being broken by changes in the Chrome Android
test execution toolchain.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.join(SCRIPT_DIR, os.pardir, os.pardir, os.pardir)
CHROMIUM_BUILD_ANDROID_DIR = os.path.join(ROOT_DIR, 'build', 'android')
sys.path.insert(0, CHROMIUM_BUILD_ANDROID_DIR)
import test_runner
from pylib.gtest import gtest_config
def main():
# Override the stable test suites with the WebRTC tests.
gtest_config.STABLE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_capture_tests',
'video_engine_tests',
'video_engine_core_unittests',
'voice_engine_unittests',
'webrtc_perf_tests',
]
return test_runner.main()
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add Android test runner script for WebRTC.
The Android test execution toolchain scripts in Chromium
has been causing headaches for us several times. Mostly
because they're tailored at running Chrome tests only.
Wrapping their script in our own avoids the pain of
upstreaming new test names to Chromium and rolling them
in to get them running on our bots.
TESTED=Ran a test on a local device using:
webrtc/build/android/test_runner.py gtest -s audio_decoder_unittests --verbose --isolate-file-path webrtc/modules/audio_coding/neteq/audio_decoder_unittests.isolate --release
TBR=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/25269004
git-svn-id: 03ae4fbe531b1eefc9d815f31e49022782c42458@7794 4adac7df-926f-26a2-2b94-8c16560cd09d<commit_after>#!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
Runs tests on Android devices.
This script exists to avoid WebRTC being broken by changes in the Chrome Android
test execution toolchain.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.join(SCRIPT_DIR, os.pardir, os.pardir, os.pardir)
CHROMIUM_BUILD_ANDROID_DIR = os.path.join(ROOT_DIR, 'build', 'android')
sys.path.insert(0, CHROMIUM_BUILD_ANDROID_DIR)
import test_runner
from pylib.gtest import gtest_config
def main():
# Override the stable test suites with the WebRTC tests.
gtest_config.STABLE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_capture_tests',
'video_engine_tests',
'video_engine_core_unittests',
'voice_engine_unittests',
'webrtc_perf_tests',
]
return test_runner.main()
if __name__ == '__main__':
sys.exit(main())
|
|
8e2e666f22c83ca17484eaef54566ec7213bc0d3
|
RpiAir/mqttsender.py
|
RpiAir/mqttsender.py
|
# coding=utf-8
import sys
import time
import paho.mqtt.client as paho
def on_connect(client, userdata, flags, rc):
print("MQTT CONNACK received with code %d." % (rc))
client.subscribe("#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
msg_data = msg.payload.decode('utf-8')
print(msg.topic+" "+str(msg_data))
client = paho.Client()
# client.username_pw_set("rpiair", "rpiair")
client.on_connect = on_connect
client.on_message = on_message
client.connect("127.0.0.1", 1883)
# client.loop_start()
def main(arg):
global client
try:
msg = 'Just woke up!'
(rc, mid) = client.publish("sensor/test", msg, qos=0)
client.loop_start()
time.sleep(10)
except KeyboardInterrupt:
print("Ctrl-c received, Bye bye!")
exit()
if __name__ == '__main__':
main(sys.argv)
|
Add very simple MQTT sender setup
|
Add very simple MQTT sender setup
|
Python
|
mit
|
aapris/VekotinVerstas,aapris/VekotinVerstas
|
Add very simple MQTT sender setup
|
# coding=utf-8
import sys
import time
import paho.mqtt.client as paho
def on_connect(client, userdata, flags, rc):
print("MQTT CONNACK received with code %d." % (rc))
client.subscribe("#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
msg_data = msg.payload.decode('utf-8')
print(msg.topic+" "+str(msg_data))
client = paho.Client()
# client.username_pw_set("rpiair", "rpiair")
client.on_connect = on_connect
client.on_message = on_message
client.connect("127.0.0.1", 1883)
# client.loop_start()
def main(arg):
global client
try:
msg = 'Just woke up!'
(rc, mid) = client.publish("sensor/test", msg, qos=0)
client.loop_start()
time.sleep(10)
except KeyboardInterrupt:
print("Ctrl-c received, Bye bye!")
exit()
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add very simple MQTT sender setup<commit_after>
|
# coding=utf-8
import sys
import time
import paho.mqtt.client as paho
def on_connect(client, userdata, flags, rc):
print("MQTT CONNACK received with code %d." % (rc))
client.subscribe("#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
msg_data = msg.payload.decode('utf-8')
print(msg.topic+" "+str(msg_data))
client = paho.Client()
# client.username_pw_set("rpiair", "rpiair")
client.on_connect = on_connect
client.on_message = on_message
client.connect("127.0.0.1", 1883)
# client.loop_start()
def main(arg):
global client
try:
msg = 'Just woke up!'
(rc, mid) = client.publish("sensor/test", msg, qos=0)
client.loop_start()
time.sleep(10)
except KeyboardInterrupt:
print("Ctrl-c received, Bye bye!")
exit()
if __name__ == '__main__':
main(sys.argv)
|
Add very simple MQTT sender setup# coding=utf-8
import sys
import time
import paho.mqtt.client as paho
def on_connect(client, userdata, flags, rc):
print("MQTT CONNACK received with code %d." % (rc))
client.subscribe("#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
msg_data = msg.payload.decode('utf-8')
print(msg.topic+" "+str(msg_data))
client = paho.Client()
# client.username_pw_set("rpiair", "rpiair")
client.on_connect = on_connect
client.on_message = on_message
client.connect("127.0.0.1", 1883)
# client.loop_start()
def main(arg):
global client
try:
msg = 'Just woke up!'
(rc, mid) = client.publish("sensor/test", msg, qos=0)
client.loop_start()
time.sleep(10)
except KeyboardInterrupt:
print("Ctrl-c received, Bye bye!")
exit()
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add very simple MQTT sender setup<commit_after># coding=utf-8
import sys
import time
import paho.mqtt.client as paho
def on_connect(client, userdata, flags, rc):
print("MQTT CONNACK received with code %d." % (rc))
client.subscribe("#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
msg_data = msg.payload.decode('utf-8')
print(msg.topic+" "+str(msg_data))
client = paho.Client()
# client.username_pw_set("rpiair", "rpiair")
client.on_connect = on_connect
client.on_message = on_message
client.connect("127.0.0.1", 1883)
# client.loop_start()
def main(arg):
global client
try:
msg = 'Just woke up!'
(rc, mid) = client.publish("sensor/test", msg, qos=0)
client.loop_start()
time.sleep(10)
except KeyboardInterrupt:
print("Ctrl-c received, Bye bye!")
exit()
if __name__ == '__main__':
main(sys.argv)
|
|
5de30944c3348bde0f3cbf6c6105894142e194fb
|
cmdline.py
|
cmdline.py
|
class Command:
"""
The definition of a command line command, allows specification of the command itself
the options it expects and also optional arguments
"""
def __init__(self, description):
self.description = description
class CommandNotFoundError(Exception):
pass
class CommandParser:
"""
Parses input from the user based on a set of registered commands
"""
def __init__(self, registered_commands={}):
self._registered_commands = registered_commands
def register_command(self, command, implementation):
"""
Registers the command in the command parser
:param command: the command text
:param implementation: the implementation of the command
:return: None
:raises: ValueError if command is already registered
"""
if command not in self._registered_commands:
self._registered_commands[command] = implementation
else:
raise ValueError('Command {0} has already been implemented.'.format(command))
def process(self, input):
parts = input.split(' ')
if parts[0] in self._registered_commands:
implementation = self._registered_commands[parts[0]]
implementation.execute(input)
else:
raise CommandNotFoundError('Command {0} was not registered with the command parser.'.format(parts[0]))
|
Migrate existing code to new project.
|
Migrate existing code to new project.
|
Python
|
mit
|
nathanroys/commandlineparser
|
Migrate existing code to new project.
|
class Command:
"""
The definition of a command line command, allows specification of the command itself
the options it expects and also optional arguments
"""
def __init__(self, description):
self.description = description
class CommandNotFoundError(Exception):
pass
class CommandParser:
"""
Parses input from the user based on a set of registered commands
"""
def __init__(self, registered_commands={}):
self._registered_commands = registered_commands
def register_command(self, command, implementation):
"""
Registers the command in the command parser
:param command: the command text
:param implementation: the implementation of the command
:return: None
:raises: ValueError if command is already registered
"""
if command not in self._registered_commands:
self._registered_commands[command] = implementation
else:
raise ValueError('Command {0} has already been implemented.'.format(command))
def process(self, input):
parts = input.split(' ')
if parts[0] in self._registered_commands:
implementation = self._registered_commands[parts[0]]
implementation.execute(input)
else:
raise CommandNotFoundError('Command {0} was not registered with the command parser.'.format(parts[0]))
|
<commit_before><commit_msg>Migrate existing code to new project.<commit_after>
|
class Command:
"""
The definition of a command line command, allows specification of the command itself
the options it expects and also optional arguments
"""
def __init__(self, description):
self.description = description
class CommandNotFoundError(Exception):
pass
class CommandParser:
"""
Parses input from the user based on a set of registered commands
"""
def __init__(self, registered_commands={}):
self._registered_commands = registered_commands
def register_command(self, command, implementation):
"""
Registers the command in the command parser
:param command: the command text
:param implementation: the implementation of the command
:return: None
:raises: ValueError if command is already registered
"""
if command not in self._registered_commands:
self._registered_commands[command] = implementation
else:
raise ValueError('Command {0} has already been implemented.'.format(command))
def process(self, input):
parts = input.split(' ')
if parts[0] in self._registered_commands:
implementation = self._registered_commands[parts[0]]
implementation.execute(input)
else:
raise CommandNotFoundError('Command {0} was not registered with the command parser.'.format(parts[0]))
|
Migrate existing code to new project.class Command:
"""
The definition of a command line command, allows specification of the command itself
the options it expects and also optional arguments
"""
def __init__(self, description):
self.description = description
class CommandNotFoundError(Exception):
pass
class CommandParser:
"""
Parses input from the user based on a set of registered commands
"""
def __init__(self, registered_commands={}):
self._registered_commands = registered_commands
def register_command(self, command, implementation):
"""
Registers the command in the command parser
:param command: the command text
:param implementation: the implementation of the command
:return: None
:raises: ValueError if command is already registered
"""
if command not in self._registered_commands:
self._registered_commands[command] = implementation
else:
raise ValueError('Command {0} has already been implemented.'.format(command))
def process(self, input):
parts = input.split(' ')
if parts[0] in self._registered_commands:
implementation = self._registered_commands[parts[0]]
implementation.execute(input)
else:
raise CommandNotFoundError('Command {0} was not registered with the command parser.'.format(parts[0]))
|
<commit_before><commit_msg>Migrate existing code to new project.<commit_after>class Command:
"""
The definition of a command line command, allows specification of the command itself
the options it expects and also optional arguments
"""
def __init__(self, description):
self.description = description
class CommandNotFoundError(Exception):
pass
class CommandParser:
"""
Parses input from the user based on a set of registered commands
"""
def __init__(self, registered_commands={}):
self._registered_commands = registered_commands
def register_command(self, command, implementation):
"""
Registers the command in the command parser
:param command: the command text
:param implementation: the implementation of the command
:return: None
:raises: ValueError if command is already registered
"""
if command not in self._registered_commands:
self._registered_commands[command] = implementation
else:
raise ValueError('Command {0} has already been implemented.'.format(command))
def process(self, input):
parts = input.split(' ')
if parts[0] in self._registered_commands:
implementation = self._registered_commands[parts[0]]
implementation.execute(input)
else:
raise CommandNotFoundError('Command {0} was not registered with the command parser.'.format(parts[0]))
|
|
43a4fa21ce8cf8176efc79f84474f3dd4c08fd44
|
python/robotics/examples/aizek_remote.py
|
python/robotics/examples/aizek_remote.py
|
from robotics.robots.factory import RobotFactory
from robotics.robots.proxy import AizekProxy
from zmq import zmq_server
def main():
robot = RobotFactory.createAizekRobot()
robot.start()
proxy = AizekProxy(robot)
zmq_server.run_zmq_server(proxy)
robot.stop()
if __name__ == '__main__':
main()
|
Add Aizek remote control example
|
Add Aizek remote control example
Change-Id: I713c0cce94ca0f312c3f6eb14c69c44b31751eaa
|
Python
|
mit
|
asydorchuk/robotics,asydorchuk/robotics
|
Add Aizek remote control example
Change-Id: I713c0cce94ca0f312c3f6eb14c69c44b31751eaa
|
from robotics.robots.factory import RobotFactory
from robotics.robots.proxy import AizekProxy
from zmq import zmq_server
def main():
robot = RobotFactory.createAizekRobot()
robot.start()
proxy = AizekProxy(robot)
zmq_server.run_zmq_server(proxy)
robot.stop()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add Aizek remote control example
Change-Id: I713c0cce94ca0f312c3f6eb14c69c44b31751eaa<commit_after>
|
from robotics.robots.factory import RobotFactory
from robotics.robots.proxy import AizekProxy
from zmq import zmq_server
def main():
robot = RobotFactory.createAizekRobot()
robot.start()
proxy = AizekProxy(robot)
zmq_server.run_zmq_server(proxy)
robot.stop()
if __name__ == '__main__':
main()
|
Add Aizek remote control example
Change-Id: I713c0cce94ca0f312c3f6eb14c69c44b31751eaafrom robotics.robots.factory import RobotFactory
from robotics.robots.proxy import AizekProxy
from zmq import zmq_server
def main():
robot = RobotFactory.createAizekRobot()
robot.start()
proxy = AizekProxy(robot)
zmq_server.run_zmq_server(proxy)
robot.stop()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add Aizek remote control example
Change-Id: I713c0cce94ca0f312c3f6eb14c69c44b31751eaa<commit_after>from robotics.robots.factory import RobotFactory
from robotics.robots.proxy import AizekProxy
from zmq import zmq_server
def main():
robot = RobotFactory.createAizekRobot()
robot.start()
proxy = AizekProxy(robot)
zmq_server.run_zmq_server(proxy)
robot.stop()
if __name__ == '__main__':
main()
|
|
5dac625970d647fa7c28a77b319bed00153cc59d
|
testbed_image.py
|
testbed_image.py
|
#!/usr/bin/env python3
"""A script for downloading the current FMI Testbed image and sending
it to a storage backend."""
import argparse
import base64
import sys
import requests
from bs4 import BeautifulSoup
def download_image():
"""Downloads the latest FMI Testbed image. Returns the image data on success
and None otherwise."""
try:
resp = requests.get('http://testbed.fmi.fi')
except requests.ConnectionError as ce:
print('Failed to access Testbed page: {}'.format(ce), file=sys.stderr)
return None
if not resp.ok:
return None
bs = BeautifulSoup(resp.text, 'lxml')
images = bs.find_all('img')
for img in images:
if img['src'].find('data/area/') > -1:
radar_img = img
break
if not radar_img:
return None
try:
resp = requests.get('http://testbed.fmi.fi/{}'.format(radar_img['src']))
except requests.ConnectionError as ce:
print('Failed to download Testbed image: {}'.format(ce), file=sys.stderr)
return None
return resp.content
def main():
"""Module main function."""
parser = argparse.ArgumentParser(description='FMI Testbed image downloading script.')
parser.add_argument('backend_url', type=str, help='Backend URL')
args = parser.parse_args()
image = download_image()
if image:
resp = requests.post(args.backend_url, data={'image': base64.b64encode(image)})
if not resp.ok:
print('Failed to send Testbed image: HTTP status {}'.format(resp.status_code),
file=sys.stderr)
exit(1)
print('Image storage status: status code {}, response {}'
.format(resp.status_code, resp.text))
main()
|
Add Python script for fetching the latest FMI Testbed image
|
Add Python script for fetching the latest FMI Testbed image
|
Python
|
mit
|
terop/env-logger,terop/env-logger,terop/env-logger,terop/env-logger,terop/env-logger,terop/env-logger,terop/env-logger
|
Add Python script for fetching the latest FMI Testbed image
|
#!/usr/bin/env python3
"""A script for downloading the current FMI Testbed image and sending
it to a storage backend."""
import argparse
import base64
import sys
import requests
from bs4 import BeautifulSoup
def download_image():
"""Downloads the latest FMI Testbed image. Returns the image data on success
and None otherwise."""
try:
resp = requests.get('http://testbed.fmi.fi')
except requests.ConnectionError as ce:
print('Failed to access Testbed page: {}'.format(ce), file=sys.stderr)
return None
if not resp.ok:
return None
bs = BeautifulSoup(resp.text, 'lxml')
images = bs.find_all('img')
for img in images:
if img['src'].find('data/area/') > -1:
radar_img = img
break
if not radar_img:
return None
try:
resp = requests.get('http://testbed.fmi.fi/{}'.format(radar_img['src']))
except requests.ConnectionError as ce:
print('Failed to download Testbed image: {}'.format(ce), file=sys.stderr)
return None
return resp.content
def main():
"""Module main function."""
parser = argparse.ArgumentParser(description='FMI Testbed image downloading script.')
parser.add_argument('backend_url', type=str, help='Backend URL')
args = parser.parse_args()
image = download_image()
if image:
resp = requests.post(args.backend_url, data={'image': base64.b64encode(image)})
if not resp.ok:
print('Failed to send Testbed image: HTTP status {}'.format(resp.status_code),
file=sys.stderr)
exit(1)
print('Image storage status: status code {}, response {}'
.format(resp.status_code, resp.text))
main()
|
<commit_before><commit_msg>Add Python script for fetching the latest FMI Testbed image<commit_after>
|
#!/usr/bin/env python3
"""A script for downloading the current FMI Testbed image and sending
it to a storage backend."""
import argparse
import base64
import sys
import requests
from bs4 import BeautifulSoup
def download_image():
"""Downloads the latest FMI Testbed image. Returns the image data on success
and None otherwise."""
try:
resp = requests.get('http://testbed.fmi.fi')
except requests.ConnectionError as ce:
print('Failed to access Testbed page: {}'.format(ce), file=sys.stderr)
return None
if not resp.ok:
return None
bs = BeautifulSoup(resp.text, 'lxml')
images = bs.find_all('img')
for img in images:
if img['src'].find('data/area/') > -1:
radar_img = img
break
if not radar_img:
return None
try:
resp = requests.get('http://testbed.fmi.fi/{}'.format(radar_img['src']))
except requests.ConnectionError as ce:
print('Failed to download Testbed image: {}'.format(ce), file=sys.stderr)
return None
return resp.content
def main():
"""Module main function."""
parser = argparse.ArgumentParser(description='FMI Testbed image downloading script.')
parser.add_argument('backend_url', type=str, help='Backend URL')
args = parser.parse_args()
image = download_image()
if image:
resp = requests.post(args.backend_url, data={'image': base64.b64encode(image)})
if not resp.ok:
print('Failed to send Testbed image: HTTP status {}'.format(resp.status_code),
file=sys.stderr)
exit(1)
print('Image storage status: status code {}, response {}'
.format(resp.status_code, resp.text))
main()
|
Add Python script for fetching the latest FMI Testbed image#!/usr/bin/env python3
"""A script for downloading the current FMI Testbed image and sending
it to a storage backend."""
import argparse
import base64
import sys
import requests
from bs4 import BeautifulSoup
def download_image():
"""Downloads the latest FMI Testbed image. Returns the image data on success
and None otherwise."""
try:
resp = requests.get('http://testbed.fmi.fi')
except requests.ConnectionError as ce:
print('Failed to access Testbed page: {}'.format(ce), file=sys.stderr)
return None
if not resp.ok:
return None
bs = BeautifulSoup(resp.text, 'lxml')
images = bs.find_all('img')
for img in images:
if img['src'].find('data/area/') > -1:
radar_img = img
break
if not radar_img:
return None
try:
resp = requests.get('http://testbed.fmi.fi/{}'.format(radar_img['src']))
except requests.ConnectionError as ce:
print('Failed to download Testbed image: {}'.format(ce), file=sys.stderr)
return None
return resp.content
def main():
"""Module main function."""
parser = argparse.ArgumentParser(description='FMI Testbed image downloading script.')
parser.add_argument('backend_url', type=str, help='Backend URL')
args = parser.parse_args()
image = download_image()
if image:
resp = requests.post(args.backend_url, data={'image': base64.b64encode(image)})
if not resp.ok:
print('Failed to send Testbed image: HTTP status {}'.format(resp.status_code),
file=sys.stderr)
exit(1)
print('Image storage status: status code {}, response {}'
.format(resp.status_code, resp.text))
main()
|
<commit_before><commit_msg>Add Python script for fetching the latest FMI Testbed image<commit_after>#!/usr/bin/env python3
"""A script for downloading the current FMI Testbed image and sending
it to a storage backend."""
import argparse
import base64
import sys
import requests
from bs4 import BeautifulSoup
def download_image():
"""Downloads the latest FMI Testbed image. Returns the image data on success
and None otherwise."""
try:
resp = requests.get('http://testbed.fmi.fi')
except requests.ConnectionError as ce:
print('Failed to access Testbed page: {}'.format(ce), file=sys.stderr)
return None
if not resp.ok:
return None
bs = BeautifulSoup(resp.text, 'lxml')
images = bs.find_all('img')
for img in images:
if img['src'].find('data/area/') > -1:
radar_img = img
break
if not radar_img:
return None
try:
resp = requests.get('http://testbed.fmi.fi/{}'.format(radar_img['src']))
except requests.ConnectionError as ce:
print('Failed to download Testbed image: {}'.format(ce), file=sys.stderr)
return None
return resp.content
def main():
"""Module main function."""
parser = argparse.ArgumentParser(description='FMI Testbed image downloading script.')
parser.add_argument('backend_url', type=str, help='Backend URL')
args = parser.parse_args()
image = download_image()
if image:
resp = requests.post(args.backend_url, data={'image': base64.b64encode(image)})
if not resp.ok:
print('Failed to send Testbed image: HTTP status {}'.format(resp.status_code),
file=sys.stderr)
exit(1)
print('Image storage status: status code {}, response {}'
.format(resp.status_code, resp.text))
main()
|
|
93e861c49ccd84f8b50661b491fad62d38b75421
|
rally/cmd/manage.py
|
rally/cmd/manage.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" CLI interface for Rally. """
from __future__ import print_function
import sys
from rally.cmd import cliutils
from rally import db
from rally.openstack.common.gettextutils import _ # noqa
class DBCommands(object):
"""Commands for DB management."""
def recreate(self):
try:
db.db_drop()
db.db_create()
except Exception as e:
print(_("Something went wrong %s") % e)
def main(argv):
categories = {'db': DBCommands}
cliutils.run(argv, categories)
if __name__ == '__main__':
main(sys.argv)
|
Add CLI utils for DB recreate
|
Add CLI utils for DB recreate
We should be able to init DB for rally from CLI
blueprint db-task-track
Change-Id: I52804e86b24d0e9334687d2545ada9a6a00595de
|
Python
|
apache-2.0
|
eayunstack/rally,vganapath/rally,paboldin/rally,pandeyop/rally,varunarya10/rally,yeming233/rally,amit0701/rally,eayunstack/rally,openstack/rally,vganapath/rally,vganapath/rally,cernops/rally,amit0701/rally,openstack/rally,gluke77/rally,shdowofdeath/rally,pyKun/rally,eonpatapon/rally,pandeyop/rally,vponomaryov/rally,eonpatapon/rally,varunarya10/rally,group-policy/rally,shdowofdeath/rally,gluke77/rally,redhat-openstack/rally,redhat-openstack/rally,afaheem88/rally,gluke77/rally,aforalee/RRally,ytsarev/rally,vganapath/rally,paboldin/rally,aforalee/RRally,cernops/rally,amit0701/rally,group-policy/rally,vponomaryov/rally,vefimova/rally,openstack/rally,yeming233/rally,gluke77/rally,aplanas/rally,eayunstack/rally,paboldin/rally,go-bears/rally,group-policy/rally,openstack/rally,aplanas/rally,go-bears/rally,pyKun/rally,vefimova/rally,afaheem88/rally,ytsarev/rally
|
Add CLI utils for DB recreate
We should be able to init DB for rally from CLI
blueprint db-task-track
Change-Id: I52804e86b24d0e9334687d2545ada9a6a00595de
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" CLI interface for Rally. """
from __future__ import print_function
import sys
from rally.cmd import cliutils
from rally import db
from rally.openstack.common.gettextutils import _ # noqa
class DBCommands(object):
"""Commands for DB management."""
def recreate(self):
try:
db.db_drop()
db.db_create()
except Exception as e:
print(_("Something went wrong %s") % e)
def main(argv):
categories = {'db': DBCommands}
cliutils.run(argv, categories)
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add CLI utils for DB recreate
We should be able to init DB for rally from CLI
blueprint db-task-track
Change-Id: I52804e86b24d0e9334687d2545ada9a6a00595de<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" CLI interface for Rally. """
from __future__ import print_function
import sys
from rally.cmd import cliutils
from rally import db
from rally.openstack.common.gettextutils import _ # noqa
class DBCommands(object):
"""Commands for DB management."""
def recreate(self):
try:
db.db_drop()
db.db_create()
except Exception as e:
print(_("Something went wrong %s") % e)
def main(argv):
categories = {'db': DBCommands}
cliutils.run(argv, categories)
if __name__ == '__main__':
main(sys.argv)
|
Add CLI utils for DB recreate
We should be able to init DB for rally from CLI
blueprint db-task-track
Change-Id: I52804e86b24d0e9334687d2545ada9a6a00595de# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" CLI interface for Rally. """
from __future__ import print_function
import sys
from rally.cmd import cliutils
from rally import db
from rally.openstack.common.gettextutils import _ # noqa
class DBCommands(object):
"""Commands for DB management."""
def recreate(self):
try:
db.db_drop()
db.db_create()
except Exception as e:
print(_("Something went wrong %s") % e)
def main(argv):
categories = {'db': DBCommands}
cliutils.run(argv, categories)
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add CLI utils for DB recreate
We should be able to init DB for rally from CLI
blueprint db-task-track
Change-Id: I52804e86b24d0e9334687d2545ada9a6a00595de<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" CLI interface for Rally. """
from __future__ import print_function
import sys
from rally.cmd import cliutils
from rally import db
from rally.openstack.common.gettextutils import _ # noqa
class DBCommands(object):
"""Commands for DB management."""
def recreate(self):
try:
db.db_drop()
db.db_create()
except Exception as e:
print(_("Something went wrong %s") % e)
def main(argv):
categories = {'db': DBCommands}
cliutils.run(argv, categories)
if __name__ == '__main__':
main(sys.argv)
|
|
592732019b37c1ea72d503fa5a6498f46ae023e1
|
scripts/add_acceptable_subjects_to_provider.py
|
scripts/add_acceptable_subjects_to_provider.py
|
from modularodm import Q
from website.app import init_app
from website.models import Subject, PreprintProvider
def find_child_and_grandchild(grandpa, childIndex=0):
parent = Subject.find('parents', 'eq', grandpa)[childIndex]
try:
child = Subject.find(Q('parents', 'eq', parent))[0]
except IndexError:
return find_child_and_grandchild(grandpa, childIndex=childIndex+1)
return parent, child
def create_subject_rules():
top_levels = Subject.find(Q('parents', 'eq', []))
subA = top_levels[0]
subB = top_levels[1]
subC = top_levels[2]
children_of_A = Subject.find(Q('parents', 'eq', subA))
subD = children_of_A[0]
subE = children_of_A[1]
subF, subG = find_child_and_grandchild(subB)
rules = [
([subA._id, subD._id], False),
([subA._id, subE._id], True),
([subB._id, subF._id, subG._id], True),
([subC._id], True)
]
return rules
def main():
provider = PreprintProvider.find()
provider.subjects_acceptable = create_subject_rules()
provider.save()
if __name__ == '__main__':
init_app(set_backends=True)
main()
|
Add acceptable subjects to provider script
|
Add acceptable subjects to provider script
|
Python
|
apache-2.0
|
monikagrabowska/osf.io,monikagrabowska/osf.io,cwisecarver/osf.io,caneruguz/osf.io,CenterForOpenScience/osf.io,alexschiller/osf.io,chennan47/osf.io,adlius/osf.io,hmoco/osf.io,mluo613/osf.io,cslzchen/osf.io,rdhyee/osf.io,brianjgeiger/osf.io,acshi/osf.io,chennan47/osf.io,mattclark/osf.io,aaxelb/osf.io,chrisseto/osf.io,chrisseto/osf.io,caseyrollins/osf.io,saradbowman/osf.io,mfraezz/osf.io,adlius/osf.io,caseyrollins/osf.io,felliott/osf.io,HalcyonChimera/osf.io,monikagrabowska/osf.io,TomBaxter/osf.io,felliott/osf.io,crcresearch/osf.io,mattclark/osf.io,crcresearch/osf.io,CenterForOpenScience/osf.io,cwisecarver/osf.io,mfraezz/osf.io,TomBaxter/osf.io,mattclark/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,chennan47/osf.io,chrisseto/osf.io,brianjgeiger/osf.io,HalcyonChimera/osf.io,acshi/osf.io,pattisdr/osf.io,alexschiller/osf.io,baylee-d/osf.io,acshi/osf.io,alexschiller/osf.io,mluo613/osf.io,rdhyee/osf.io,HalcyonChimera/osf.io,Nesiehr/osf.io,alexschiller/osf.io,alexschiller/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,aaxelb/osf.io,monikagrabowska/osf.io,hmoco/osf.io,laurenrevere/osf.io,cslzchen/osf.io,rdhyee/osf.io,mluo613/osf.io,laurenrevere/osf.io,cwisecarver/osf.io,icereval/osf.io,binoculars/osf.io,hmoco/osf.io,rdhyee/osf.io,chrisseto/osf.io,laurenrevere/osf.io,mluo613/osf.io,Nesiehr/osf.io,acshi/osf.io,aaxelb/osf.io,baylee-d/osf.io,binoculars/osf.io,felliott/osf.io,HalcyonChimera/osf.io,acshi/osf.io,adlius/osf.io,caneruguz/osf.io,mfraezz/osf.io,leb2dg/osf.io,monikagrabowska/osf.io,cslzchen/osf.io,crcresearch/osf.io,sloria/osf.io,aaxelb/osf.io,Nesiehr/osf.io,erinspace/osf.io,adlius/osf.io,hmoco/osf.io,binoculars/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,sloria/osf.io,leb2dg/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,sloria/osf.io,caneruguz/osf.io,felliott/osf.io,TomBaxter/osf.io,caseyrollins/osf.io,CenterForOpenScience/osf.io,mluo613/osf.io,erinspace/osf.io,baylee-d/osf.io,Nesiehr/osf.io,mfraezz/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,icereval/osf.io,cwisecarver/osf.io,pattisdr/osf.io,leb2dg/osf.io,caneruguz/osf.io,erinspace/osf.io,icereval/osf.io
|
Add acceptable subjects to provider script
|
from modularodm import Q
from website.app import init_app
from website.models import Subject, PreprintProvider
def find_child_and_grandchild(grandpa, childIndex=0):
parent = Subject.find('parents', 'eq', grandpa)[childIndex]
try:
child = Subject.find(Q('parents', 'eq', parent))[0]
except IndexError:
return find_child_and_grandchild(grandpa, childIndex=childIndex+1)
return parent, child
def create_subject_rules():
top_levels = Subject.find(Q('parents', 'eq', []))
subA = top_levels[0]
subB = top_levels[1]
subC = top_levels[2]
children_of_A = Subject.find(Q('parents', 'eq', subA))
subD = children_of_A[0]
subE = children_of_A[1]
subF, subG = find_child_and_grandchild(subB)
rules = [
([subA._id, subD._id], False),
([subA._id, subE._id], True),
([subB._id, subF._id, subG._id], True),
([subC._id], True)
]
return rules
def main():
provider = PreprintProvider.find()
provider.subjects_acceptable = create_subject_rules()
provider.save()
if __name__ == '__main__':
init_app(set_backends=True)
main()
|
<commit_before><commit_msg>Add acceptable subjects to provider script<commit_after>
|
from modularodm import Q
from website.app import init_app
from website.models import Subject, PreprintProvider
def find_child_and_grandchild(grandpa, childIndex=0):
parent = Subject.find('parents', 'eq', grandpa)[childIndex]
try:
child = Subject.find(Q('parents', 'eq', parent))[0]
except IndexError:
return find_child_and_grandchild(grandpa, childIndex=childIndex+1)
return parent, child
def create_subject_rules():
top_levels = Subject.find(Q('parents', 'eq', []))
subA = top_levels[0]
subB = top_levels[1]
subC = top_levels[2]
children_of_A = Subject.find(Q('parents', 'eq', subA))
subD = children_of_A[0]
subE = children_of_A[1]
subF, subG = find_child_and_grandchild(subB)
rules = [
([subA._id, subD._id], False),
([subA._id, subE._id], True),
([subB._id, subF._id, subG._id], True),
([subC._id], True)
]
return rules
def main():
provider = PreprintProvider.find()
provider.subjects_acceptable = create_subject_rules()
provider.save()
if __name__ == '__main__':
init_app(set_backends=True)
main()
|
Add acceptable subjects to provider scriptfrom modularodm import Q
from website.app import init_app
from website.models import Subject, PreprintProvider
def find_child_and_grandchild(grandpa, childIndex=0):
parent = Subject.find('parents', 'eq', grandpa)[childIndex]
try:
child = Subject.find(Q('parents', 'eq', parent))[0]
except IndexError:
return find_child_and_grandchild(grandpa, childIndex=childIndex+1)
return parent, child
def create_subject_rules():
top_levels = Subject.find(Q('parents', 'eq', []))
subA = top_levels[0]
subB = top_levels[1]
subC = top_levels[2]
children_of_A = Subject.find(Q('parents', 'eq', subA))
subD = children_of_A[0]
subE = children_of_A[1]
subF, subG = find_child_and_grandchild(subB)
rules = [
([subA._id, subD._id], False),
([subA._id, subE._id], True),
([subB._id, subF._id, subG._id], True),
([subC._id], True)
]
return rules
def main():
provider = PreprintProvider.find()
provider.subjects_acceptable = create_subject_rules()
provider.save()
if __name__ == '__main__':
init_app(set_backends=True)
main()
|
<commit_before><commit_msg>Add acceptable subjects to provider script<commit_after>from modularodm import Q
from website.app import init_app
from website.models import Subject, PreprintProvider
def find_child_and_grandchild(grandpa, childIndex=0):
parent = Subject.find('parents', 'eq', grandpa)[childIndex]
try:
child = Subject.find(Q('parents', 'eq', parent))[0]
except IndexError:
return find_child_and_grandchild(grandpa, childIndex=childIndex+1)
return parent, child
def create_subject_rules():
top_levels = Subject.find(Q('parents', 'eq', []))
subA = top_levels[0]
subB = top_levels[1]
subC = top_levels[2]
children_of_A = Subject.find(Q('parents', 'eq', subA))
subD = children_of_A[0]
subE = children_of_A[1]
subF, subG = find_child_and_grandchild(subB)
rules = [
([subA._id, subD._id], False),
([subA._id, subE._id], True),
([subB._id, subF._id, subG._id], True),
([subC._id], True)
]
return rules
def main():
provider = PreprintProvider.find()
provider.subjects_acceptable = create_subject_rules()
provider.save()
if __name__ == '__main__':
init_app(set_backends=True)
main()
|
|
0db90f8cd74661a973578f393163125eabeefb7e
|
src/info/name_table_util.py
|
src/info/name_table_util.py
|
_name_ids = {
0: { 'short': 'Copyright', 'name': 'Copyright notice' },
1: { 'short': 'Family', 'name': 'Font Family name' },
2: { 'short': 'Subfamily', 'name': 'Font Subfamily name' },
3: { 'short': 'Unique ID', 'name': 'Unique font identifier' },
4: { 'short': 'Full name', 'name': 'Full font name' },
5: { 'short': 'Version', 'name': 'Version string' },
6: { 'short': 'PS name', 'name': 'Postscript name' },
7: { 'short': 'Trademark', 'name': 'Trademark' },
8: { 'short': 'Manufacturer', 'name': 'Manufacturer Name' },
9: { 'short': 'Designer', 'name': 'Designer' },
10: { 'short': 'Desc.', 'name': 'Description' },
11: { 'short': 'Vendor URL', 'name': 'URL Vendor' },
12: { 'short': 'Designer URL', 'name': 'URL Designer' },
13: { 'short': 'License', 'name': 'License Description' },
14: { 'short': 'License URL', 'name': 'License Info URL' },
15: { 'short': 'Reserved', 'name': 'Reserved' },
16: { 'short': 'Preferred Fam', 'name': 'Preferred Family' },
17: { 'short': 'Preferred Subfam', 'name': 'Preferred Subfamily' },
18: { 'short': 'Compatible', 'name': 'Compatible Full' },
19: { 'short': 'Sample text', 'name': 'Sample text' },
20: { 'short': 'CID', 'name': 'PostScript CID' },
21: { 'short': 'WWS Family', 'name': 'WWS Family Name' },
21: { 'short': 'WWS Subfamily', 'name': 'WWS Subfamily Name' }
}
def name_id_to_name(name_id):
info = _name_ids.get(name_id, { 'name': ''})
return info['name']
def name_id_to_short_name(name_id):
info = _name_ids.get(name_id, { 'short': ''})
return info['short']
|
Add ability to display the name table.
|
Add ability to display the name table.
|
Python
|
apache-2.0
|
googlei18n/fontuley,googlei18n/fontuley,wskplho/fontuley,wskplho/fontuley,googlei18n/fontuley
|
Add ability to display the name table.
|
_name_ids = {
0: { 'short': 'Copyright', 'name': 'Copyright notice' },
1: { 'short': 'Family', 'name': 'Font Family name' },
2: { 'short': 'Subfamily', 'name': 'Font Subfamily name' },
3: { 'short': 'Unique ID', 'name': 'Unique font identifier' },
4: { 'short': 'Full name', 'name': 'Full font name' },
5: { 'short': 'Version', 'name': 'Version string' },
6: { 'short': 'PS name', 'name': 'Postscript name' },
7: { 'short': 'Trademark', 'name': 'Trademark' },
8: { 'short': 'Manufacturer', 'name': 'Manufacturer Name' },
9: { 'short': 'Designer', 'name': 'Designer' },
10: { 'short': 'Desc.', 'name': 'Description' },
11: { 'short': 'Vendor URL', 'name': 'URL Vendor' },
12: { 'short': 'Designer URL', 'name': 'URL Designer' },
13: { 'short': 'License', 'name': 'License Description' },
14: { 'short': 'License URL', 'name': 'License Info URL' },
15: { 'short': 'Reserved', 'name': 'Reserved' },
16: { 'short': 'Preferred Fam', 'name': 'Preferred Family' },
17: { 'short': 'Preferred Subfam', 'name': 'Preferred Subfamily' },
18: { 'short': 'Compatible', 'name': 'Compatible Full' },
19: { 'short': 'Sample text', 'name': 'Sample text' },
20: { 'short': 'CID', 'name': 'PostScript CID' },
21: { 'short': 'WWS Family', 'name': 'WWS Family Name' },
21: { 'short': 'WWS Subfamily', 'name': 'WWS Subfamily Name' }
}
def name_id_to_name(name_id):
info = _name_ids.get(name_id, { 'name': ''})
return info['name']
def name_id_to_short_name(name_id):
info = _name_ids.get(name_id, { 'short': ''})
return info['short']
|
<commit_before><commit_msg>Add ability to display the name table.<commit_after>
|
_name_ids = {
0: { 'short': 'Copyright', 'name': 'Copyright notice' },
1: { 'short': 'Family', 'name': 'Font Family name' },
2: { 'short': 'Subfamily', 'name': 'Font Subfamily name' },
3: { 'short': 'Unique ID', 'name': 'Unique font identifier' },
4: { 'short': 'Full name', 'name': 'Full font name' },
5: { 'short': 'Version', 'name': 'Version string' },
6: { 'short': 'PS name', 'name': 'Postscript name' },
7: { 'short': 'Trademark', 'name': 'Trademark' },
8: { 'short': 'Manufacturer', 'name': 'Manufacturer Name' },
9: { 'short': 'Designer', 'name': 'Designer' },
10: { 'short': 'Desc.', 'name': 'Description' },
11: { 'short': 'Vendor URL', 'name': 'URL Vendor' },
12: { 'short': 'Designer URL', 'name': 'URL Designer' },
13: { 'short': 'License', 'name': 'License Description' },
14: { 'short': 'License URL', 'name': 'License Info URL' },
15: { 'short': 'Reserved', 'name': 'Reserved' },
16: { 'short': 'Preferred Fam', 'name': 'Preferred Family' },
17: { 'short': 'Preferred Subfam', 'name': 'Preferred Subfamily' },
18: { 'short': 'Compatible', 'name': 'Compatible Full' },
19: { 'short': 'Sample text', 'name': 'Sample text' },
20: { 'short': 'CID', 'name': 'PostScript CID' },
21: { 'short': 'WWS Family', 'name': 'WWS Family Name' },
21: { 'short': 'WWS Subfamily', 'name': 'WWS Subfamily Name' }
}
def name_id_to_name(name_id):
info = _name_ids.get(name_id, { 'name': ''})
return info['name']
def name_id_to_short_name(name_id):
info = _name_ids.get(name_id, { 'short': ''})
return info['short']
|
Add ability to display the name table.
_name_ids = {
0: { 'short': 'Copyright', 'name': 'Copyright notice' },
1: { 'short': 'Family', 'name': 'Font Family name' },
2: { 'short': 'Subfamily', 'name': 'Font Subfamily name' },
3: { 'short': 'Unique ID', 'name': 'Unique font identifier' },
4: { 'short': 'Full name', 'name': 'Full font name' },
5: { 'short': 'Version', 'name': 'Version string' },
6: { 'short': 'PS name', 'name': 'Postscript name' },
7: { 'short': 'Trademark', 'name': 'Trademark' },
8: { 'short': 'Manufacturer', 'name': 'Manufacturer Name' },
9: { 'short': 'Designer', 'name': 'Designer' },
10: { 'short': 'Desc.', 'name': 'Description' },
11: { 'short': 'Vendor URL', 'name': 'URL Vendor' },
12: { 'short': 'Designer URL', 'name': 'URL Designer' },
13: { 'short': 'License', 'name': 'License Description' },
14: { 'short': 'License URL', 'name': 'License Info URL' },
15: { 'short': 'Reserved', 'name': 'Reserved' },
16: { 'short': 'Preferred Fam', 'name': 'Preferred Family' },
17: { 'short': 'Preferred Subfam', 'name': 'Preferred Subfamily' },
18: { 'short': 'Compatible', 'name': 'Compatible Full' },
19: { 'short': 'Sample text', 'name': 'Sample text' },
20: { 'short': 'CID', 'name': 'PostScript CID' },
21: { 'short': 'WWS Family', 'name': 'WWS Family Name' },
21: { 'short': 'WWS Subfamily', 'name': 'WWS Subfamily Name' }
}
def name_id_to_name(name_id):
info = _name_ids.get(name_id, { 'name': ''})
return info['name']
def name_id_to_short_name(name_id):
info = _name_ids.get(name_id, { 'short': ''})
return info['short']
|
<commit_before><commit_msg>Add ability to display the name table.<commit_after>
_name_ids = {
0: { 'short': 'Copyright', 'name': 'Copyright notice' },
1: { 'short': 'Family', 'name': 'Font Family name' },
2: { 'short': 'Subfamily', 'name': 'Font Subfamily name' },
3: { 'short': 'Unique ID', 'name': 'Unique font identifier' },
4: { 'short': 'Full name', 'name': 'Full font name' },
5: { 'short': 'Version', 'name': 'Version string' },
6: { 'short': 'PS name', 'name': 'Postscript name' },
7: { 'short': 'Trademark', 'name': 'Trademark' },
8: { 'short': 'Manufacturer', 'name': 'Manufacturer Name' },
9: { 'short': 'Designer', 'name': 'Designer' },
10: { 'short': 'Desc.', 'name': 'Description' },
11: { 'short': 'Vendor URL', 'name': 'URL Vendor' },
12: { 'short': 'Designer URL', 'name': 'URL Designer' },
13: { 'short': 'License', 'name': 'License Description' },
14: { 'short': 'License URL', 'name': 'License Info URL' },
15: { 'short': 'Reserved', 'name': 'Reserved' },
16: { 'short': 'Preferred Fam', 'name': 'Preferred Family' },
17: { 'short': 'Preferred Subfam', 'name': 'Preferred Subfamily' },
18: { 'short': 'Compatible', 'name': 'Compatible Full' },
19: { 'short': 'Sample text', 'name': 'Sample text' },
20: { 'short': 'CID', 'name': 'PostScript CID' },
21: { 'short': 'WWS Family', 'name': 'WWS Family Name' },
21: { 'short': 'WWS Subfamily', 'name': 'WWS Subfamily Name' }
}
def name_id_to_name(name_id):
info = _name_ids.get(name_id, { 'name': ''})
return info['name']
def name_id_to_short_name(name_id):
info = _name_ids.get(name_id, { 'short': ''})
return info['short']
|
|
ffca8058de944543cf25606ec1989da8f206e4bb
|
takeout_inspector/tests/__init__.py
|
takeout_inspector/tests/__init__.py
|
"""takeout_inspector/tests/__init__.py
Tests for the main Takeout Inspector module.
Copyright (c) 2016 Christopher Charbonneau Wells
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = 'Christopher Charbonneau Wells'
__copyright__ = 'Copyright (c) 2016 Christopher Charbonneau Wells'
__license__ = 'MIT'
|
Create base files for adding tests to the project.
|
Create base files for adding tests to the project.
|
Python
|
mit
|
cdubz/takeout-inspector
|
Create base files for adding tests to the project.
|
"""takeout_inspector/tests/__init__.py
Tests for the main Takeout Inspector module.
Copyright (c) 2016 Christopher Charbonneau Wells
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = 'Christopher Charbonneau Wells'
__copyright__ = 'Copyright (c) 2016 Christopher Charbonneau Wells'
__license__ = 'MIT'
|
<commit_before><commit_msg>Create base files for adding tests to the project.<commit_after>
|
"""takeout_inspector/tests/__init__.py
Tests for the main Takeout Inspector module.
Copyright (c) 2016 Christopher Charbonneau Wells
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = 'Christopher Charbonneau Wells'
__copyright__ = 'Copyright (c) 2016 Christopher Charbonneau Wells'
__license__ = 'MIT'
|
Create base files for adding tests to the project."""takeout_inspector/tests/__init__.py
Tests for the main Takeout Inspector module.
Copyright (c) 2016 Christopher Charbonneau Wells
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = 'Christopher Charbonneau Wells'
__copyright__ = 'Copyright (c) 2016 Christopher Charbonneau Wells'
__license__ = 'MIT'
|
<commit_before><commit_msg>Create base files for adding tests to the project.<commit_after>"""takeout_inspector/tests/__init__.py
Tests for the main Takeout Inspector module.
Copyright (c) 2016 Christopher Charbonneau Wells
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = 'Christopher Charbonneau Wells'
__copyright__ = 'Copyright (c) 2016 Christopher Charbonneau Wells'
__license__ = 'MIT'
|
|
5d9e624b69a38e826a5591b0976a29c61ebd20f3
|
tests/smoketests/test_components.py
|
tests/smoketests/test_components.py
|
"""
Create an actual instance of one of the client/server components and make sure
we can talk to it.
"""
from twisted.internet import reactor, task
from twisted.internet.defer import Deferred
from src.client.backend import Backend as ClientBackend
from src.shared.message_infrastructure import deserializeMessage
from src.shared.messages import YourIdIs, RequestQuit
def test_backend_components():
testDone = getMainDeferred()
clientDone = Deferred()
backend = ClientBackend(clientDone)
# Give it some components.
graphics_dummy = DummyComponent(backend.graphicsMessage)
stdio_dummy = DummyComponent(backend.stdioMessage)
network_dummy = DummyComponent(backend.networkMessage)
# Tell it they're all ready. A fancier test might randomize the order of
# these and insert some delays, but this is just a simple smoketest.
backend.stdioReady(stdio_dummy)
backend.networkReady(network_dummy)
backend.graphicsInterfaceReady(graphics_dummy)
def sendMessages():
network_dummy.sendMessage(YourIdIs(42).serialize())
graphics_dummy.sendMessage(RequestQuit().serialize())
def finalChecks(x):
# Check that the YourIdIs(42) was successfully forwarded to the
# graphics interface.
gotId = False
for msgData in graphics_dummy.messageLog:
msg = deserializeMessage(msgData)
if isinstance(msg, YourIdIs):
assert msg.playerId == 42
gotId = True
assert gotId
clientDone.addCallback(finalChecks)
clientDone.chainDeferred(testDone)
# Make sure to chain the main Deferred's errback to that of this
# deferLater, so that if something goes wrong in sendMessage then it will
# be reported (rather than silently dropped).
d = task.deferLater(reactor, 0.05, sendMessages)
d.addErrback(testDone.errback)
return testDone
# TODO: Move this to a shared file.
def getMainDeferred(timeout=1.0):
"""
Create and return a Deferred object suitable for returning to
pytest-twisted. Use this function instead of creating a Deferred directly,
because this function does things like making sure the test will fail with
a timeout if it doesn't otherwise finish quickly. Arguments:
timeout -- duration until timeout, in seconds
"""
mainDeferred = Deferred()
# Schedule a timeout after 'timeout' seconds.
timeoutError = RuntimeError("Test timed out after {} seconds."
.format(timeout))
task.deferLater(reactor, timeout, mainDeferred.errback, timeoutError)
return mainDeferred
class DummyComponent:
def __init__(self, sendMessage):
self.sendMessage = sendMessage
self.messageLog = []
def backendMessage(self, msg):
self.messageLog.append(msg)
def cleanup(self):
pass
|
Add a test that interacts with the client backend.
|
Add a test that interacts with the client backend.
|
Python
|
mit
|
CheeseLord/warts,CheeseLord/warts
|
Add a test that interacts with the client backend.
|
"""
Create an actual instance of one of the client/server components and make sure
we can talk to it.
"""
from twisted.internet import reactor, task
from twisted.internet.defer import Deferred
from src.client.backend import Backend as ClientBackend
from src.shared.message_infrastructure import deserializeMessage
from src.shared.messages import YourIdIs, RequestQuit
def test_backend_components():
testDone = getMainDeferred()
clientDone = Deferred()
backend = ClientBackend(clientDone)
# Give it some components.
graphics_dummy = DummyComponent(backend.graphicsMessage)
stdio_dummy = DummyComponent(backend.stdioMessage)
network_dummy = DummyComponent(backend.networkMessage)
# Tell it they're all ready. A fancier test might randomize the order of
# these and insert some delays, but this is just a simple smoketest.
backend.stdioReady(stdio_dummy)
backend.networkReady(network_dummy)
backend.graphicsInterfaceReady(graphics_dummy)
def sendMessages():
network_dummy.sendMessage(YourIdIs(42).serialize())
graphics_dummy.sendMessage(RequestQuit().serialize())
def finalChecks(x):
# Check that the YourIdIs(42) was successfully forwarded to the
# graphics interface.
gotId = False
for msgData in graphics_dummy.messageLog:
msg = deserializeMessage(msgData)
if isinstance(msg, YourIdIs):
assert msg.playerId == 42
gotId = True
assert gotId
clientDone.addCallback(finalChecks)
clientDone.chainDeferred(testDone)
# Make sure to chain the main Deferred's errback to that of this
# deferLater, so that if something goes wrong in sendMessage then it will
# be reported (rather than silently dropped).
d = task.deferLater(reactor, 0.05, sendMessages)
d.addErrback(testDone.errback)
return testDone
# TODO: Move this to a shared file.
def getMainDeferred(timeout=1.0):
"""
Create and return a Deferred object suitable for returning to
pytest-twisted. Use this function instead of creating a Deferred directly,
because this function does things like making sure the test will fail with
a timeout if it doesn't otherwise finish quickly. Arguments:
timeout -- duration until timeout, in seconds
"""
mainDeferred = Deferred()
# Schedule a timeout after 'timeout' seconds.
timeoutError = RuntimeError("Test timed out after {} seconds."
.format(timeout))
task.deferLater(reactor, timeout, mainDeferred.errback, timeoutError)
return mainDeferred
class DummyComponent:
def __init__(self, sendMessage):
self.sendMessage = sendMessage
self.messageLog = []
def backendMessage(self, msg):
self.messageLog.append(msg)
def cleanup(self):
pass
|
<commit_before><commit_msg>Add a test that interacts with the client backend.<commit_after>
|
"""
Create an actual instance of one of the client/server components and make sure
we can talk to it.
"""
from twisted.internet import reactor, task
from twisted.internet.defer import Deferred
from src.client.backend import Backend as ClientBackend
from src.shared.message_infrastructure import deserializeMessage
from src.shared.messages import YourIdIs, RequestQuit
def test_backend_components():
testDone = getMainDeferred()
clientDone = Deferred()
backend = ClientBackend(clientDone)
# Give it some components.
graphics_dummy = DummyComponent(backend.graphicsMessage)
stdio_dummy = DummyComponent(backend.stdioMessage)
network_dummy = DummyComponent(backend.networkMessage)
# Tell it they're all ready. A fancier test might randomize the order of
# these and insert some delays, but this is just a simple smoketest.
backend.stdioReady(stdio_dummy)
backend.networkReady(network_dummy)
backend.graphicsInterfaceReady(graphics_dummy)
def sendMessages():
network_dummy.sendMessage(YourIdIs(42).serialize())
graphics_dummy.sendMessage(RequestQuit().serialize())
def finalChecks(x):
# Check that the YourIdIs(42) was successfully forwarded to the
# graphics interface.
gotId = False
for msgData in graphics_dummy.messageLog:
msg = deserializeMessage(msgData)
if isinstance(msg, YourIdIs):
assert msg.playerId == 42
gotId = True
assert gotId
clientDone.addCallback(finalChecks)
clientDone.chainDeferred(testDone)
# Make sure to chain the main Deferred's errback to that of this
# deferLater, so that if something goes wrong in sendMessage then it will
# be reported (rather than silently dropped).
d = task.deferLater(reactor, 0.05, sendMessages)
d.addErrback(testDone.errback)
return testDone
# TODO: Move this to a shared file.
def getMainDeferred(timeout=1.0):
"""
Create and return a Deferred object suitable for returning to
pytest-twisted. Use this function instead of creating a Deferred directly,
because this function does things like making sure the test will fail with
a timeout if it doesn't otherwise finish quickly. Arguments:
timeout -- duration until timeout, in seconds
"""
mainDeferred = Deferred()
# Schedule a timeout after 'timeout' seconds.
timeoutError = RuntimeError("Test timed out after {} seconds."
.format(timeout))
task.deferLater(reactor, timeout, mainDeferred.errback, timeoutError)
return mainDeferred
class DummyComponent:
def __init__(self, sendMessage):
self.sendMessage = sendMessage
self.messageLog = []
def backendMessage(self, msg):
self.messageLog.append(msg)
def cleanup(self):
pass
|
Add a test that interacts with the client backend."""
Create an actual instance of one of the client/server components and make sure
we can talk to it.
"""
from twisted.internet import reactor, task
from twisted.internet.defer import Deferred
from src.client.backend import Backend as ClientBackend
from src.shared.message_infrastructure import deserializeMessage
from src.shared.messages import YourIdIs, RequestQuit
def test_backend_components():
testDone = getMainDeferred()
clientDone = Deferred()
backend = ClientBackend(clientDone)
# Give it some components.
graphics_dummy = DummyComponent(backend.graphicsMessage)
stdio_dummy = DummyComponent(backend.stdioMessage)
network_dummy = DummyComponent(backend.networkMessage)
# Tell it they're all ready. A fancier test might randomize the order of
# these and insert some delays, but this is just a simple smoketest.
backend.stdioReady(stdio_dummy)
backend.networkReady(network_dummy)
backend.graphicsInterfaceReady(graphics_dummy)
def sendMessages():
network_dummy.sendMessage(YourIdIs(42).serialize())
graphics_dummy.sendMessage(RequestQuit().serialize())
def finalChecks(x):
# Check that the YourIdIs(42) was successfully forwarded to the
# graphics interface.
gotId = False
for msgData in graphics_dummy.messageLog:
msg = deserializeMessage(msgData)
if isinstance(msg, YourIdIs):
assert msg.playerId == 42
gotId = True
assert gotId
clientDone.addCallback(finalChecks)
clientDone.chainDeferred(testDone)
# Make sure to chain the main Deferred's errback to that of this
# deferLater, so that if something goes wrong in sendMessage then it will
# be reported (rather than silently dropped).
d = task.deferLater(reactor, 0.05, sendMessages)
d.addErrback(testDone.errback)
return testDone
# TODO: Move this to a shared file.
def getMainDeferred(timeout=1.0):
"""
Create and return a Deferred object suitable for returning to
pytest-twisted. Use this function instead of creating a Deferred directly,
because this function does things like making sure the test will fail with
a timeout if it doesn't otherwise finish quickly. Arguments:
timeout -- duration until timeout, in seconds
"""
mainDeferred = Deferred()
# Schedule a timeout after 'timeout' seconds.
timeoutError = RuntimeError("Test timed out after {} seconds."
.format(timeout))
task.deferLater(reactor, timeout, mainDeferred.errback, timeoutError)
return mainDeferred
class DummyComponent:
def __init__(self, sendMessage):
self.sendMessage = sendMessage
self.messageLog = []
def backendMessage(self, msg):
self.messageLog.append(msg)
def cleanup(self):
pass
|
<commit_before><commit_msg>Add a test that interacts with the client backend.<commit_after>"""
Create an actual instance of one of the client/server components and make sure
we can talk to it.
"""
from twisted.internet import reactor, task
from twisted.internet.defer import Deferred
from src.client.backend import Backend as ClientBackend
from src.shared.message_infrastructure import deserializeMessage
from src.shared.messages import YourIdIs, RequestQuit
def test_backend_components():
testDone = getMainDeferred()
clientDone = Deferred()
backend = ClientBackend(clientDone)
# Give it some components.
graphics_dummy = DummyComponent(backend.graphicsMessage)
stdio_dummy = DummyComponent(backend.stdioMessage)
network_dummy = DummyComponent(backend.networkMessage)
# Tell it they're all ready. A fancier test might randomize the order of
# these and insert some delays, but this is just a simple smoketest.
backend.stdioReady(stdio_dummy)
backend.networkReady(network_dummy)
backend.graphicsInterfaceReady(graphics_dummy)
def sendMessages():
network_dummy.sendMessage(YourIdIs(42).serialize())
graphics_dummy.sendMessage(RequestQuit().serialize())
def finalChecks(x):
# Check that the YourIdIs(42) was successfully forwarded to the
# graphics interface.
gotId = False
for msgData in graphics_dummy.messageLog:
msg = deserializeMessage(msgData)
if isinstance(msg, YourIdIs):
assert msg.playerId == 42
gotId = True
assert gotId
clientDone.addCallback(finalChecks)
clientDone.chainDeferred(testDone)
# Make sure to chain the main Deferred's errback to that of this
# deferLater, so that if something goes wrong in sendMessage then it will
# be reported (rather than silently dropped).
d = task.deferLater(reactor, 0.05, sendMessages)
d.addErrback(testDone.errback)
return testDone
# TODO: Move this to a shared file.
def getMainDeferred(timeout=1.0):
"""
Create and return a Deferred object suitable for returning to
pytest-twisted. Use this function instead of creating a Deferred directly,
because this function does things like making sure the test will fail with
a timeout if it doesn't otherwise finish quickly. Arguments:
timeout -- duration until timeout, in seconds
"""
mainDeferred = Deferred()
# Schedule a timeout after 'timeout' seconds.
timeoutError = RuntimeError("Test timed out after {} seconds."
.format(timeout))
task.deferLater(reactor, timeout, mainDeferred.errback, timeoutError)
return mainDeferred
class DummyComponent:
def __init__(self, sendMessage):
self.sendMessage = sendMessage
self.messageLog = []
def backendMessage(self, msg):
self.messageLog.append(msg)
def cleanup(self):
pass
|
|
eb3d5c78255e3b9d60f7b1f1a1379017058c0417
|
migrations/versions/190_eas_add_device_retirement.py
|
migrations/versions/190_eas_add_device_retirement.py
|
"""eas_add_device_retirement
Revision ID: 246a6bf050bc
Revises: 3b093f2d7419
Create Date: 2015-07-17 02:46:47.842573
"""
# revision identifiers, used by Alembic.
revision = '246a6bf050bc'
down_revision = '3b093f2d7419'
from alembic import op
import sqlalchemy as sa
def upgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easdevice'):
return
op.add_column('easdevice',
sa.Column('retired', sa.Boolean(),
server_default=sa.sql.expression.false(),
nullable=False))
def downgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easdevice'):
return
op.drop_column('easdevice', 'retired')
|
Add migration for Exchange bookkeeping.
|
Add migration for Exchange bookkeeping.
|
Python
|
agpl-3.0
|
wakermahmud/sync-engine,jobscore/sync-engine,wakermahmud/sync-engine,Eagles2F/sync-engine,closeio/nylas,jobscore/sync-engine,gale320/sync-engine,ErinCall/sync-engine,nylas/sync-engine,Eagles2F/sync-engine,nylas/sync-engine,nylas/sync-engine,ErinCall/sync-engine,ErinCall/sync-engine,wakermahmud/sync-engine,ErinCall/sync-engine,gale320/sync-engine,closeio/nylas,ErinCall/sync-engine,gale320/sync-engine,wakermahmud/sync-engine,jobscore/sync-engine,jobscore/sync-engine,closeio/nylas,Eagles2F/sync-engine,nylas/sync-engine,Eagles2F/sync-engine,wakermahmud/sync-engine,Eagles2F/sync-engine,gale320/sync-engine,gale320/sync-engine,closeio/nylas
|
Add migration for Exchange bookkeeping.
|
"""eas_add_device_retirement
Revision ID: 246a6bf050bc
Revises: 3b093f2d7419
Create Date: 2015-07-17 02:46:47.842573
"""
# revision identifiers, used by Alembic.
revision = '246a6bf050bc'
down_revision = '3b093f2d7419'
from alembic import op
import sqlalchemy as sa
def upgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easdevice'):
return
op.add_column('easdevice',
sa.Column('retired', sa.Boolean(),
server_default=sa.sql.expression.false(),
nullable=False))
def downgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easdevice'):
return
op.drop_column('easdevice', 'retired')
|
<commit_before><commit_msg>Add migration for Exchange bookkeeping.<commit_after>
|
"""eas_add_device_retirement
Revision ID: 246a6bf050bc
Revises: 3b093f2d7419
Create Date: 2015-07-17 02:46:47.842573
"""
# revision identifiers, used by Alembic.
revision = '246a6bf050bc'
down_revision = '3b093f2d7419'
from alembic import op
import sqlalchemy as sa
def upgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easdevice'):
return
op.add_column('easdevice',
sa.Column('retired', sa.Boolean(),
server_default=sa.sql.expression.false(),
nullable=False))
def downgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easdevice'):
return
op.drop_column('easdevice', 'retired')
|
Add migration for Exchange bookkeeping."""eas_add_device_retirement
Revision ID: 246a6bf050bc
Revises: 3b093f2d7419
Create Date: 2015-07-17 02:46:47.842573
"""
# revision identifiers, used by Alembic.
revision = '246a6bf050bc'
down_revision = '3b093f2d7419'
from alembic import op
import sqlalchemy as sa
def upgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easdevice'):
return
op.add_column('easdevice',
sa.Column('retired', sa.Boolean(),
server_default=sa.sql.expression.false(),
nullable=False))
def downgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easdevice'):
return
op.drop_column('easdevice', 'retired')
|
<commit_before><commit_msg>Add migration for Exchange bookkeeping.<commit_after>"""eas_add_device_retirement
Revision ID: 246a6bf050bc
Revises: 3b093f2d7419
Create Date: 2015-07-17 02:46:47.842573
"""
# revision identifiers, used by Alembic.
revision = '246a6bf050bc'
down_revision = '3b093f2d7419'
from alembic import op
import sqlalchemy as sa
def upgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easdevice'):
return
op.add_column('easdevice',
sa.Column('retired', sa.Boolean(),
server_default=sa.sql.expression.false(),
nullable=False))
def downgrade():
from inbox.ignition import main_engine
engine = main_engine(pool_size=1, max_overflow=0)
if not engine.has_table('easdevice'):
return
op.drop_column('easdevice', 'retired')
|
|
24d4c412d7655d2f8c5d98c80741864fc1068418
|
quickstats/tests/test_query.py
|
quickstats/tests/test_query.py
|
import time
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from quickstats import models, shortcuts
class SamplesTest(TestCase):
def setUp(self):
self.user = User.objects.create(username="SamplesTest")
self.date = "2019-11-06T11:42:53.800762+00:00"
def test_manager(self):
print()
self.assertEqual(models.Widget.objects.count(), 0)
models.Widget.objects.lookup_or_create(
labels={"A": 1, "B": 2, "__name__": "test"},
owner=self.user,
defaults={"timestamp": timezone.now(), "title": "test"},
)
time.sleep(1)
models.Widget.objects.lookup_or_create(
labels={"B": 2, "A": 1, "__name__": "test"},
owner=self.user,
defaults={"timestamp": timezone.now(), "title": "test"},
)
self.assertEqual(
models.Widget.objects.count(), 1, "Should only be one new widget"
)
def test_shortcut(self):
print()
self.assertEqual(models.Widget.objects.count(), 0)
shortcuts.quick_record(
self.user,
value=1,
metric="test",
labels={"A": 1, "B": 2},
timestamp=timezone.now(),
)
time.sleep(1)
shortcuts.quick_record(
self.user,
value=1,
metric="test",
labels={"B": 2, "A": 1},
timestamp=timezone.now(),
)
self.assertEqual(
models.Widget.objects.count(), 1, "Should only be one new widget"
)
|
Add test for our manager and shortcut
|
Add test for our manager and shortcut
|
Python
|
mit
|
kfdm/django-simplestats,kfdm/django-simplestats
|
Add test for our manager and shortcut
|
import time
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from quickstats import models, shortcuts
class SamplesTest(TestCase):
def setUp(self):
self.user = User.objects.create(username="SamplesTest")
self.date = "2019-11-06T11:42:53.800762+00:00"
def test_manager(self):
print()
self.assertEqual(models.Widget.objects.count(), 0)
models.Widget.objects.lookup_or_create(
labels={"A": 1, "B": 2, "__name__": "test"},
owner=self.user,
defaults={"timestamp": timezone.now(), "title": "test"},
)
time.sleep(1)
models.Widget.objects.lookup_or_create(
labels={"B": 2, "A": 1, "__name__": "test"},
owner=self.user,
defaults={"timestamp": timezone.now(), "title": "test"},
)
self.assertEqual(
models.Widget.objects.count(), 1, "Should only be one new widget"
)
def test_shortcut(self):
print()
self.assertEqual(models.Widget.objects.count(), 0)
shortcuts.quick_record(
self.user,
value=1,
metric="test",
labels={"A": 1, "B": 2},
timestamp=timezone.now(),
)
time.sleep(1)
shortcuts.quick_record(
self.user,
value=1,
metric="test",
labels={"B": 2, "A": 1},
timestamp=timezone.now(),
)
self.assertEqual(
models.Widget.objects.count(), 1, "Should only be one new widget"
)
|
<commit_before><commit_msg>Add test for our manager and shortcut<commit_after>
|
import time
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from quickstats import models, shortcuts
class SamplesTest(TestCase):
def setUp(self):
self.user = User.objects.create(username="SamplesTest")
self.date = "2019-11-06T11:42:53.800762+00:00"
def test_manager(self):
print()
self.assertEqual(models.Widget.objects.count(), 0)
models.Widget.objects.lookup_or_create(
labels={"A": 1, "B": 2, "__name__": "test"},
owner=self.user,
defaults={"timestamp": timezone.now(), "title": "test"},
)
time.sleep(1)
models.Widget.objects.lookup_or_create(
labels={"B": 2, "A": 1, "__name__": "test"},
owner=self.user,
defaults={"timestamp": timezone.now(), "title": "test"},
)
self.assertEqual(
models.Widget.objects.count(), 1, "Should only be one new widget"
)
def test_shortcut(self):
print()
self.assertEqual(models.Widget.objects.count(), 0)
shortcuts.quick_record(
self.user,
value=1,
metric="test",
labels={"A": 1, "B": 2},
timestamp=timezone.now(),
)
time.sleep(1)
shortcuts.quick_record(
self.user,
value=1,
metric="test",
labels={"B": 2, "A": 1},
timestamp=timezone.now(),
)
self.assertEqual(
models.Widget.objects.count(), 1, "Should only be one new widget"
)
|
Add test for our manager and shortcutimport time
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from quickstats import models, shortcuts
class SamplesTest(TestCase):
def setUp(self):
self.user = User.objects.create(username="SamplesTest")
self.date = "2019-11-06T11:42:53.800762+00:00"
def test_manager(self):
print()
self.assertEqual(models.Widget.objects.count(), 0)
models.Widget.objects.lookup_or_create(
labels={"A": 1, "B": 2, "__name__": "test"},
owner=self.user,
defaults={"timestamp": timezone.now(), "title": "test"},
)
time.sleep(1)
models.Widget.objects.lookup_or_create(
labels={"B": 2, "A": 1, "__name__": "test"},
owner=self.user,
defaults={"timestamp": timezone.now(), "title": "test"},
)
self.assertEqual(
models.Widget.objects.count(), 1, "Should only be one new widget"
)
def test_shortcut(self):
print()
self.assertEqual(models.Widget.objects.count(), 0)
shortcuts.quick_record(
self.user,
value=1,
metric="test",
labels={"A": 1, "B": 2},
timestamp=timezone.now(),
)
time.sleep(1)
shortcuts.quick_record(
self.user,
value=1,
metric="test",
labels={"B": 2, "A": 1},
timestamp=timezone.now(),
)
self.assertEqual(
models.Widget.objects.count(), 1, "Should only be one new widget"
)
|
<commit_before><commit_msg>Add test for our manager and shortcut<commit_after>import time
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from quickstats import models, shortcuts
class SamplesTest(TestCase):
def setUp(self):
self.user = User.objects.create(username="SamplesTest")
self.date = "2019-11-06T11:42:53.800762+00:00"
def test_manager(self):
print()
self.assertEqual(models.Widget.objects.count(), 0)
models.Widget.objects.lookup_or_create(
labels={"A": 1, "B": 2, "__name__": "test"},
owner=self.user,
defaults={"timestamp": timezone.now(), "title": "test"},
)
time.sleep(1)
models.Widget.objects.lookup_or_create(
labels={"B": 2, "A": 1, "__name__": "test"},
owner=self.user,
defaults={"timestamp": timezone.now(), "title": "test"},
)
self.assertEqual(
models.Widget.objects.count(), 1, "Should only be one new widget"
)
def test_shortcut(self):
print()
self.assertEqual(models.Widget.objects.count(), 0)
shortcuts.quick_record(
self.user,
value=1,
metric="test",
labels={"A": 1, "B": 2},
timestamp=timezone.now(),
)
time.sleep(1)
shortcuts.quick_record(
self.user,
value=1,
metric="test",
labels={"B": 2, "A": 1},
timestamp=timezone.now(),
)
self.assertEqual(
models.Widget.objects.count(), 1, "Should only be one new widget"
)
|
|
534558d30b5afb2d14dd119cf27c42ae3d6aed39
|
starter_project/normalize_breton_test.py
|
starter_project/normalize_breton_test.py
|
import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
if __name__ == '__main__':
unittest.main()
|
Add example unit test for the Breton starter project.
|
Add example unit test for the Breton starter project.
|
Python
|
apache-2.0
|
googleinterns/text-norm-for-low-resource-languages,googleinterns/text-norm-for-low-resource-languages
|
Add example unit test for the Breton starter project.
|
import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add example unit test for the Breton starter project.<commit_after>
|
import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
if __name__ == '__main__':
unittest.main()
|
Add example unit test for the Breton starter project.import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add example unit test for the Breton starter project.<commit_after>import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
if __name__ == '__main__':
unittest.main()
|
|
0505061e9fef8120a02443d5e8dabcdaadc08c56
|
stateMessageParser/stateMessageParser.py
|
stateMessageParser/stateMessageParser.py
|
#!/usr/bin/env python3
description = """State Message Parser
Parses all the code in the MINDS-i-Drone project looking for comments formatted
to contain /*# followed by a state name (no spaces). All text following the
first space after the state name is considered the description.
/*#STATE_NAME ...description...*/
The names and description will be dumped to an XML file the Dashboard can use
to provide more detailed information about errors it receives containing the
state name string.
The script requires two arguments;
the folder path to search within,
followed by the destination directory for XML message database"""
import sys, os, re
import xml.etree.ElementTree as ET
FILETYPES = {'.ino', '.h', '.c', '.hpp', '.cpp'}
MSG_REGEX = re.compile("""/\*#(\w*) (.*)\*/""")
def hasExtension(name, extList):
""" Returns true if `name` has an extension that appears in `extList` """
_, foundExt = os.path.splitext(name)
return any(map(lambda e: foundExt == e, extList))
def filesUnder(directory):
""" Returns a list of files found recursivly under `directory` """
files = list()
for root,_,names in os.walk(directory):
files += [os.path.join(root,n) for n in names]
return files
def getMatches(file, regex):
""" Returns a list of all passages in `file` matching `regex` """
source = open(file, "r")
return [x for x in regex.findall(source.read())]
class Message:
def __init__(self, path, match):
self.name = match[0]
self.path = path
self.text = match[1]
# check args
if len(sys.argv) != 3:
print(description)
exit(1)
searchDir = sys.argv[1]
destFile = sys.argv[2]
# find messages
filesToScan = [f for f in filesUnder(searchDir) if hasExtension(f,FILETYPES)]
messages = list()
for f in filesToScan:
path = f[len(searchDir):] # show path relative to search folder
messages += [Message(path,m) for m in getMatches(f, MSG_REGEX)]
# check for name collisions
usedNames = set()
for m in messages:
if m.name in usedNames:
print("Name Collision Detected on "+m.name)
for col in messages:
if col.name==m.name: print("Used in "+m.path)
exit(1)
usedNames.add(m.name)
# dump to XML
xmlRoot = ET.Element('MessageDB')
for m in messages:
e = ET.SubElement(xmlRoot, "Message")
e.set("path",m.path)
e.set("name",m.name)
e.text = m.text
ET.ElementTree(xmlRoot).write(destFile)
|
Implement doc comment => XML DB script for state message descriptions
|
Implement doc comment => XML DB script for state message descriptions
|
Python
|
apache-2.0
|
MINDS-i/Dashboard,MINDS-i/Dashboard,MINDS-i/Dashboard
|
Implement doc comment => XML DB script for state message descriptions
|
#!/usr/bin/env python3
description = """State Message Parser
Parses all the code in the MINDS-i-Drone project looking for comments formatted
to contain /*# followed by a state name (no spaces). All text following the
first space after the state name is considered the description.
/*#STATE_NAME ...description...*/
The names and description will be dumped to an XML file the Dashboard can use
to provide more detailed information about errors it receives containing the
state name string.
The script requires two arguments;
the folder path to search within,
followed by the destination directory for XML message database"""
import sys, os, re
import xml.etree.ElementTree as ET
FILETYPES = {'.ino', '.h', '.c', '.hpp', '.cpp'}
MSG_REGEX = re.compile("""/\*#(\w*) (.*)\*/""")
def hasExtension(name, extList):
""" Returns true if `name` has an extension that appears in `extList` """
_, foundExt = os.path.splitext(name)
return any(map(lambda e: foundExt == e, extList))
def filesUnder(directory):
""" Returns a list of files found recursivly under `directory` """
files = list()
for root,_,names in os.walk(directory):
files += [os.path.join(root,n) for n in names]
return files
def getMatches(file, regex):
""" Returns a list of all passages in `file` matching `regex` """
source = open(file, "r")
return [x for x in regex.findall(source.read())]
class Message:
def __init__(self, path, match):
self.name = match[0]
self.path = path
self.text = match[1]
# check args
if len(sys.argv) != 3:
print(description)
exit(1)
searchDir = sys.argv[1]
destFile = sys.argv[2]
# find messages
filesToScan = [f for f in filesUnder(searchDir) if hasExtension(f,FILETYPES)]
messages = list()
for f in filesToScan:
path = f[len(searchDir):] # show path relative to search folder
messages += [Message(path,m) for m in getMatches(f, MSG_REGEX)]
# check for name collisions
usedNames = set()
for m in messages:
if m.name in usedNames:
print("Name Collision Detected on "+m.name)
for col in messages:
if col.name==m.name: print("Used in "+m.path)
exit(1)
usedNames.add(m.name)
# dump to XML
xmlRoot = ET.Element('MessageDB')
for m in messages:
e = ET.SubElement(xmlRoot, "Message")
e.set("path",m.path)
e.set("name",m.name)
e.text = m.text
ET.ElementTree(xmlRoot).write(destFile)
|
<commit_before><commit_msg>Implement doc comment => XML DB script for state message descriptions<commit_after>
|
#!/usr/bin/env python3
description = """State Message Parser
Parses all the code in the MINDS-i-Drone project looking for comments formatted
to contain /*# followed by a state name (no spaces). All text following the
first space after the state name is considered the description.
/*#STATE_NAME ...description...*/
The names and description will be dumped to an XML file the Dashboard can use
to provide more detailed information about errors it receives containing the
state name string.
The script requires two arguments;
the folder path to search within,
followed by the destination directory for XML message database"""
import sys, os, re
import xml.etree.ElementTree as ET
FILETYPES = {'.ino', '.h', '.c', '.hpp', '.cpp'}
MSG_REGEX = re.compile("""/\*#(\w*) (.*)\*/""")
def hasExtension(name, extList):
""" Returns true if `name` has an extension that appears in `extList` """
_, foundExt = os.path.splitext(name)
return any(map(lambda e: foundExt == e, extList))
def filesUnder(directory):
""" Returns a list of files found recursivly under `directory` """
files = list()
for root,_,names in os.walk(directory):
files += [os.path.join(root,n) for n in names]
return files
def getMatches(file, regex):
""" Returns a list of all passages in `file` matching `regex` """
source = open(file, "r")
return [x for x in regex.findall(source.read())]
class Message:
def __init__(self, path, match):
self.name = match[0]
self.path = path
self.text = match[1]
# check args
if len(sys.argv) != 3:
print(description)
exit(1)
searchDir = sys.argv[1]
destFile = sys.argv[2]
# find messages
filesToScan = [f for f in filesUnder(searchDir) if hasExtension(f,FILETYPES)]
messages = list()
for f in filesToScan:
path = f[len(searchDir):] # show path relative to search folder
messages += [Message(path,m) for m in getMatches(f, MSG_REGEX)]
# check for name collisions
usedNames = set()
for m in messages:
if m.name in usedNames:
print("Name Collision Detected on "+m.name)
for col in messages:
if col.name==m.name: print("Used in "+m.path)
exit(1)
usedNames.add(m.name)
# dump to XML
xmlRoot = ET.Element('MessageDB')
for m in messages:
e = ET.SubElement(xmlRoot, "Message")
e.set("path",m.path)
e.set("name",m.name)
e.text = m.text
ET.ElementTree(xmlRoot).write(destFile)
|
Implement doc comment => XML DB script for state message descriptions#!/usr/bin/env python3
description = """State Message Parser
Parses all the code in the MINDS-i-Drone project looking for comments formatted
to contain /*# followed by a state name (no spaces). All text following the
first space after the state name is considered the description.
/*#STATE_NAME ...description...*/
The names and description will be dumped to an XML file the Dashboard can use
to provide more detailed information about errors it receives containing the
state name string.
The script requires two arguments;
the folder path to search within,
followed by the destination directory for XML message database"""
import sys, os, re
import xml.etree.ElementTree as ET
FILETYPES = {'.ino', '.h', '.c', '.hpp', '.cpp'}
MSG_REGEX = re.compile("""/\*#(\w*) (.*)\*/""")
def hasExtension(name, extList):
""" Returns true if `name` has an extension that appears in `extList` """
_, foundExt = os.path.splitext(name)
return any(map(lambda e: foundExt == e, extList))
def filesUnder(directory):
""" Returns a list of files found recursivly under `directory` """
files = list()
for root,_,names in os.walk(directory):
files += [os.path.join(root,n) for n in names]
return files
def getMatches(file, regex):
""" Returns a list of all passages in `file` matching `regex` """
source = open(file, "r")
return [x for x in regex.findall(source.read())]
class Message:
def __init__(self, path, match):
self.name = match[0]
self.path = path
self.text = match[1]
# check args
if len(sys.argv) != 3:
print(description)
exit(1)
searchDir = sys.argv[1]
destFile = sys.argv[2]
# find messages
filesToScan = [f for f in filesUnder(searchDir) if hasExtension(f,FILETYPES)]
messages = list()
for f in filesToScan:
path = f[len(searchDir):] # show path relative to search folder
messages += [Message(path,m) for m in getMatches(f, MSG_REGEX)]
# check for name collisions
usedNames = set()
for m in messages:
if m.name in usedNames:
print("Name Collision Detected on "+m.name)
for col in messages:
if col.name==m.name: print("Used in "+m.path)
exit(1)
usedNames.add(m.name)
# dump to XML
xmlRoot = ET.Element('MessageDB')
for m in messages:
e = ET.SubElement(xmlRoot, "Message")
e.set("path",m.path)
e.set("name",m.name)
e.text = m.text
ET.ElementTree(xmlRoot).write(destFile)
|
<commit_before><commit_msg>Implement doc comment => XML DB script for state message descriptions<commit_after>#!/usr/bin/env python3
description = """State Message Parser
Parses all the code in the MINDS-i-Drone project looking for comments formatted
to contain /*# followed by a state name (no spaces). All text following the
first space after the state name is considered the description.
/*#STATE_NAME ...description...*/
The names and description will be dumped to an XML file the Dashboard can use
to provide more detailed information about errors it receives containing the
state name string.
The script requires two arguments;
the folder path to search within,
followed by the destination directory for XML message database"""
import sys, os, re
import xml.etree.ElementTree as ET
FILETYPES = {'.ino', '.h', '.c', '.hpp', '.cpp'}
MSG_REGEX = re.compile("""/\*#(\w*) (.*)\*/""")
def hasExtension(name, extList):
""" Returns true if `name` has an extension that appears in `extList` """
_, foundExt = os.path.splitext(name)
return any(map(lambda e: foundExt == e, extList))
def filesUnder(directory):
""" Returns a list of files found recursivly under `directory` """
files = list()
for root,_,names in os.walk(directory):
files += [os.path.join(root,n) for n in names]
return files
def getMatches(file, regex):
""" Returns a list of all passages in `file` matching `regex` """
source = open(file, "r")
return [x for x in regex.findall(source.read())]
class Message:
def __init__(self, path, match):
self.name = match[0]
self.path = path
self.text = match[1]
# check args
if len(sys.argv) != 3:
print(description)
exit(1)
searchDir = sys.argv[1]
destFile = sys.argv[2]
# find messages
filesToScan = [f for f in filesUnder(searchDir) if hasExtension(f,FILETYPES)]
messages = list()
for f in filesToScan:
path = f[len(searchDir):] # show path relative to search folder
messages += [Message(path,m) for m in getMatches(f, MSG_REGEX)]
# check for name collisions
usedNames = set()
for m in messages:
if m.name in usedNames:
print("Name Collision Detected on "+m.name)
for col in messages:
if col.name==m.name: print("Used in "+m.path)
exit(1)
usedNames.add(m.name)
# dump to XML
xmlRoot = ET.Element('MessageDB')
for m in messages:
e = ET.SubElement(xmlRoot, "Message")
e.set("path",m.path)
e.set("name",m.name)
e.text = m.text
ET.ElementTree(xmlRoot).write(destFile)
|
|
e4b8275a745d88ec8763e94ea7fa06c3cf152388
|
examples/tornado.py
|
examples/tornado.py
|
import tornado.web
from tornado.ioloop import IOLoop
from pushka import AmazonSESService
# NOTE: make sure your Amazon SES user has `ses:SendEmail` permission!
# Here's user policy example:
#
# {
# "Version": "2012-10-17",
# "Statement": [
# {
# "Effect": "Allow",
# "Action": [
# "ses:SendRawEmail",
# "ses:SendEmail"
# ],
# "Resource": "*"
# }
# ]
# }
access_id='***'
secret_key='***'
mail_to = ['rudy@05bit.com']
mail_from = 'rudy@05bit.com'
subject = "Pushka SES test"
text = "La-la-la! La-la-la!"
class AsyncMailHandler(tornado.web.RequestHandler):
async def get(self):
resp = await self.application.mailer.send_mail(
text=text,
subject=subject,
recipients=mail_to,
sender=mail_from)
self.write(resp)
if __name__ == "__main__":
IOLoop.configure('tornado.platform.asyncio.AsyncIOMainLoop')
app = tornado.web.Application([
(r"/", AsyncMailHandler),
], debug=True)
app.mailer = AmazonSESService(
access_id=access_id,
secret_key=secret_key,
loop=IOLoop.current())
print("Running server at http://%s:%s" % ('127.0.0.1', 8888))
app.listen(8888)
IOLoop.current().start()
|
Add examples dir with Tornado example source
|
Add examples dir with Tornado example source
|
Python
|
apache-2.0
|
rudyryk/pushka
|
Add examples dir with Tornado example source
|
import tornado.web
from tornado.ioloop import IOLoop
from pushka import AmazonSESService
# NOTE: make sure your Amazon SES user has `ses:SendEmail` permission!
# Here's user policy example:
#
# {
# "Version": "2012-10-17",
# "Statement": [
# {
# "Effect": "Allow",
# "Action": [
# "ses:SendRawEmail",
# "ses:SendEmail"
# ],
# "Resource": "*"
# }
# ]
# }
access_id='***'
secret_key='***'
mail_to = ['rudy@05bit.com']
mail_from = 'rudy@05bit.com'
subject = "Pushka SES test"
text = "La-la-la! La-la-la!"
class AsyncMailHandler(tornado.web.RequestHandler):
async def get(self):
resp = await self.application.mailer.send_mail(
text=text,
subject=subject,
recipients=mail_to,
sender=mail_from)
self.write(resp)
if __name__ == "__main__":
IOLoop.configure('tornado.platform.asyncio.AsyncIOMainLoop')
app = tornado.web.Application([
(r"/", AsyncMailHandler),
], debug=True)
app.mailer = AmazonSESService(
access_id=access_id,
secret_key=secret_key,
loop=IOLoop.current())
print("Running server at http://%s:%s" % ('127.0.0.1', 8888))
app.listen(8888)
IOLoop.current().start()
|
<commit_before><commit_msg>Add examples dir with Tornado example source<commit_after>
|
import tornado.web
from tornado.ioloop import IOLoop
from pushka import AmazonSESService
# NOTE: make sure your Amazon SES user has `ses:SendEmail` permission!
# Here's user policy example:
#
# {
# "Version": "2012-10-17",
# "Statement": [
# {
# "Effect": "Allow",
# "Action": [
# "ses:SendRawEmail",
# "ses:SendEmail"
# ],
# "Resource": "*"
# }
# ]
# }
access_id='***'
secret_key='***'
mail_to = ['rudy@05bit.com']
mail_from = 'rudy@05bit.com'
subject = "Pushka SES test"
text = "La-la-la! La-la-la!"
class AsyncMailHandler(tornado.web.RequestHandler):
async def get(self):
resp = await self.application.mailer.send_mail(
text=text,
subject=subject,
recipients=mail_to,
sender=mail_from)
self.write(resp)
if __name__ == "__main__":
IOLoop.configure('tornado.platform.asyncio.AsyncIOMainLoop')
app = tornado.web.Application([
(r"/", AsyncMailHandler),
], debug=True)
app.mailer = AmazonSESService(
access_id=access_id,
secret_key=secret_key,
loop=IOLoop.current())
print("Running server at http://%s:%s" % ('127.0.0.1', 8888))
app.listen(8888)
IOLoop.current().start()
|
Add examples dir with Tornado example sourceimport tornado.web
from tornado.ioloop import IOLoop
from pushka import AmazonSESService
# NOTE: make sure your Amazon SES user has `ses:SendEmail` permission!
# Here's user policy example:
#
# {
# "Version": "2012-10-17",
# "Statement": [
# {
# "Effect": "Allow",
# "Action": [
# "ses:SendRawEmail",
# "ses:SendEmail"
# ],
# "Resource": "*"
# }
# ]
# }
access_id='***'
secret_key='***'
mail_to = ['rudy@05bit.com']
mail_from = 'rudy@05bit.com'
subject = "Pushka SES test"
text = "La-la-la! La-la-la!"
class AsyncMailHandler(tornado.web.RequestHandler):
async def get(self):
resp = await self.application.mailer.send_mail(
text=text,
subject=subject,
recipients=mail_to,
sender=mail_from)
self.write(resp)
if __name__ == "__main__":
IOLoop.configure('tornado.platform.asyncio.AsyncIOMainLoop')
app = tornado.web.Application([
(r"/", AsyncMailHandler),
], debug=True)
app.mailer = AmazonSESService(
access_id=access_id,
secret_key=secret_key,
loop=IOLoop.current())
print("Running server at http://%s:%s" % ('127.0.0.1', 8888))
app.listen(8888)
IOLoop.current().start()
|
<commit_before><commit_msg>Add examples dir with Tornado example source<commit_after>import tornado.web
from tornado.ioloop import IOLoop
from pushka import AmazonSESService
# NOTE: make sure your Amazon SES user has `ses:SendEmail` permission!
# Here's user policy example:
#
# {
# "Version": "2012-10-17",
# "Statement": [
# {
# "Effect": "Allow",
# "Action": [
# "ses:SendRawEmail",
# "ses:SendEmail"
# ],
# "Resource": "*"
# }
# ]
# }
access_id='***'
secret_key='***'
mail_to = ['rudy@05bit.com']
mail_from = 'rudy@05bit.com'
subject = "Pushka SES test"
text = "La-la-la! La-la-la!"
class AsyncMailHandler(tornado.web.RequestHandler):
async def get(self):
resp = await self.application.mailer.send_mail(
text=text,
subject=subject,
recipients=mail_to,
sender=mail_from)
self.write(resp)
if __name__ == "__main__":
IOLoop.configure('tornado.platform.asyncio.AsyncIOMainLoop')
app = tornado.web.Application([
(r"/", AsyncMailHandler),
], debug=True)
app.mailer = AmazonSESService(
access_id=access_id,
secret_key=secret_key,
loop=IOLoop.current())
print("Running server at http://%s:%s" % ('127.0.0.1', 8888))
app.listen(8888)
IOLoop.current().start()
|
|
d5609f7110f31a5d046fd09106a6d143c88ac54d
|
scripts/renew_user_sessions.py
|
scripts/renew_user_sessions.py
|
#!/usr/bin/env python
"""Log out users by renewing their session tokens.
This is meant to be used when new terms of service are published so
users have to log in again and are presented the form to accept the new
terms of service.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
import click
from byceps.database import db
from byceps.services.authentication.session import service as session_service
from byceps.services.user.models.user import User
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
@click.command()
def execute():
user_ids = {u.id for u in find_enabled_users()}
renew_session_tokens(user_ids)
click.secho('Done.', fg='green')
def find_enabled_users():
return User.query.filter_by(enabled=True).all()
def renew_session_tokens(user_ids):
now = datetime.utcnow()
for user_id in user_ids:
renew_session_token(user_id, now)
print('.', end='', flush=True)
print()
db.session.commit()
def renew_session_token(user_id, updated_at):
token = session_service.find_session_token_for_user(user_id)
session_service.update_session_token(token, updated_at)
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to renew user sessions
|
Add script to renew user sessions
|
Python
|
bsd-3-clause
|
m-ober/byceps,homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps
|
Add script to renew user sessions
|
#!/usr/bin/env python
"""Log out users by renewing their session tokens.
This is meant to be used when new terms of service are published so
users have to log in again and are presented the form to accept the new
terms of service.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
import click
from byceps.database import db
from byceps.services.authentication.session import service as session_service
from byceps.services.user.models.user import User
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
@click.command()
def execute():
user_ids = {u.id for u in find_enabled_users()}
renew_session_tokens(user_ids)
click.secho('Done.', fg='green')
def find_enabled_users():
return User.query.filter_by(enabled=True).all()
def renew_session_tokens(user_ids):
now = datetime.utcnow()
for user_id in user_ids:
renew_session_token(user_id, now)
print('.', end='', flush=True)
print()
db.session.commit()
def renew_session_token(user_id, updated_at):
token = session_service.find_session_token_for_user(user_id)
session_service.update_session_token(token, updated_at)
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to renew user sessions<commit_after>
|
#!/usr/bin/env python
"""Log out users by renewing their session tokens.
This is meant to be used when new terms of service are published so
users have to log in again and are presented the form to accept the new
terms of service.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
import click
from byceps.database import db
from byceps.services.authentication.session import service as session_service
from byceps.services.user.models.user import User
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
@click.command()
def execute():
user_ids = {u.id for u in find_enabled_users()}
renew_session_tokens(user_ids)
click.secho('Done.', fg='green')
def find_enabled_users():
return User.query.filter_by(enabled=True).all()
def renew_session_tokens(user_ids):
now = datetime.utcnow()
for user_id in user_ids:
renew_session_token(user_id, now)
print('.', end='', flush=True)
print()
db.session.commit()
def renew_session_token(user_id, updated_at):
token = session_service.find_session_token_for_user(user_id)
session_service.update_session_token(token, updated_at)
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to renew user sessions#!/usr/bin/env python
"""Log out users by renewing their session tokens.
This is meant to be used when new terms of service are published so
users have to log in again and are presented the form to accept the new
terms of service.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
import click
from byceps.database import db
from byceps.services.authentication.session import service as session_service
from byceps.services.user.models.user import User
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
@click.command()
def execute():
user_ids = {u.id for u in find_enabled_users()}
renew_session_tokens(user_ids)
click.secho('Done.', fg='green')
def find_enabled_users():
return User.query.filter_by(enabled=True).all()
def renew_session_tokens(user_ids):
now = datetime.utcnow()
for user_id in user_ids:
renew_session_token(user_id, now)
print('.', end='', flush=True)
print()
db.session.commit()
def renew_session_token(user_id, updated_at):
token = session_service.find_session_token_for_user(user_id)
session_service.update_session_token(token, updated_at)
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to renew user sessions<commit_after>#!/usr/bin/env python
"""Log out users by renewing their session tokens.
This is meant to be used when new terms of service are published so
users have to log in again and are presented the form to accept the new
terms of service.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
import click
from byceps.database import db
from byceps.services.authentication.session import service as session_service
from byceps.services.user.models.user import User
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
@click.command()
def execute():
user_ids = {u.id for u in find_enabled_users()}
renew_session_tokens(user_ids)
click.secho('Done.', fg='green')
def find_enabled_users():
return User.query.filter_by(enabled=True).all()
def renew_session_tokens(user_ids):
now = datetime.utcnow()
for user_id in user_ids:
renew_session_token(user_id, now)
print('.', end='', flush=True)
print()
db.session.commit()
def renew_session_token(user_id, updated_at):
token = session_service.find_session_token_for_user(user_id)
session_service.update_session_token(token, updated_at)
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
|
4a405d5884dee29604cdc0f8908a0c2a6c74168e
|
classes/killmails_cache.py
|
classes/killmails_cache.py
|
# -*- coding: utf-8 -*-
import sqlite3
import json
from classes.sitecfg import SiteConfig
class KillMailsCache:
def __init__(self, siteconfig: SiteConfig):
self._conn = sqlite3.connect(siteconfig.ZKB_CACHE_DIR + '/killmails.db', check_same_thread=False)
self.check_tables()
def check_tables(self):
cur = self._conn.cursor()
cur.execute('SELECT * FROM sqlite_master WHERE type=\'table\'')
rows = cur.fetchall()
tables = []
for row in rows:
tables.append(str(row[0]))
if 'killmails' not in tables:
q = 'CREATE TABLE killmails (kill_id TEXT PRIMARY KEY,' \
' kill_hash TEXT PRIMARY KEY, json TEXT)'
cur.execute(q)
self._conn.commit()
cur.close()
def get_killmail(self, kill_id: str, kill_hash: str) -> dict:
ret = {}
cur = self._conn.cursor()
cur.execute('SELECT json FROM killmails WHERE kill_id=? AND kill_hash=?', (kill_id, kill_hash))
rows = cur.fetchall()
if len(rows) > 0:
json_str = rows[0][0]
if json_str != '':
try:
ret = json.loads(json_str)
except json.JSONDecodeError:
pass
cur.close()
return ret
def save_killmail(self, kill_id: str, kill_hash: str, killmail: object):
json_text = json.dumps(killmail)
cur = self._conn.cursor()
cur.execute('INSERT REPLACE INTO killmails (kill_id, kill_hash, json) VALUES (?,?,?)',
(kill_id, kill_hash, json_text))
self._conn.commit()
cur.close()
|
Add new class to cache killmails indefinitely, forever
|
Add new class to cache killmails indefinitely, forever
|
Python
|
mit
|
minlexx/whdbx_web,minlexx/whdbx_web,minlexx/whdbx_web,minlexx/whdbx_web,minlexx/whdbx_web
|
Add new class to cache killmails indefinitely, forever
|
# -*- coding: utf-8 -*-
import sqlite3
import json
from classes.sitecfg import SiteConfig
class KillMailsCache:
def __init__(self, siteconfig: SiteConfig):
self._conn = sqlite3.connect(siteconfig.ZKB_CACHE_DIR + '/killmails.db', check_same_thread=False)
self.check_tables()
def check_tables(self):
cur = self._conn.cursor()
cur.execute('SELECT * FROM sqlite_master WHERE type=\'table\'')
rows = cur.fetchall()
tables = []
for row in rows:
tables.append(str(row[0]))
if 'killmails' not in tables:
q = 'CREATE TABLE killmails (kill_id TEXT PRIMARY KEY,' \
' kill_hash TEXT PRIMARY KEY, json TEXT)'
cur.execute(q)
self._conn.commit()
cur.close()
def get_killmail(self, kill_id: str, kill_hash: str) -> dict:
ret = {}
cur = self._conn.cursor()
cur.execute('SELECT json FROM killmails WHERE kill_id=? AND kill_hash=?', (kill_id, kill_hash))
rows = cur.fetchall()
if len(rows) > 0:
json_str = rows[0][0]
if json_str != '':
try:
ret = json.loads(json_str)
except json.JSONDecodeError:
pass
cur.close()
return ret
def save_killmail(self, kill_id: str, kill_hash: str, killmail: object):
json_text = json.dumps(killmail)
cur = self._conn.cursor()
cur.execute('INSERT REPLACE INTO killmails (kill_id, kill_hash, json) VALUES (?,?,?)',
(kill_id, kill_hash, json_text))
self._conn.commit()
cur.close()
|
<commit_before><commit_msg>Add new class to cache killmails indefinitely, forever<commit_after>
|
# -*- coding: utf-8 -*-
import sqlite3
import json
from classes.sitecfg import SiteConfig
class KillMailsCache:
def __init__(self, siteconfig: SiteConfig):
self._conn = sqlite3.connect(siteconfig.ZKB_CACHE_DIR + '/killmails.db', check_same_thread=False)
self.check_tables()
def check_tables(self):
cur = self._conn.cursor()
cur.execute('SELECT * FROM sqlite_master WHERE type=\'table\'')
rows = cur.fetchall()
tables = []
for row in rows:
tables.append(str(row[0]))
if 'killmails' not in tables:
q = 'CREATE TABLE killmails (kill_id TEXT PRIMARY KEY,' \
' kill_hash TEXT PRIMARY KEY, json TEXT)'
cur.execute(q)
self._conn.commit()
cur.close()
def get_killmail(self, kill_id: str, kill_hash: str) -> dict:
ret = {}
cur = self._conn.cursor()
cur.execute('SELECT json FROM killmails WHERE kill_id=? AND kill_hash=?', (kill_id, kill_hash))
rows = cur.fetchall()
if len(rows) > 0:
json_str = rows[0][0]
if json_str != '':
try:
ret = json.loads(json_str)
except json.JSONDecodeError:
pass
cur.close()
return ret
def save_killmail(self, kill_id: str, kill_hash: str, killmail: object):
json_text = json.dumps(killmail)
cur = self._conn.cursor()
cur.execute('INSERT REPLACE INTO killmails (kill_id, kill_hash, json) VALUES (?,?,?)',
(kill_id, kill_hash, json_text))
self._conn.commit()
cur.close()
|
Add new class to cache killmails indefinitely, forever# -*- coding: utf-8 -*-
import sqlite3
import json
from classes.sitecfg import SiteConfig
class KillMailsCache:
def __init__(self, siteconfig: SiteConfig):
self._conn = sqlite3.connect(siteconfig.ZKB_CACHE_DIR + '/killmails.db', check_same_thread=False)
self.check_tables()
def check_tables(self):
cur = self._conn.cursor()
cur.execute('SELECT * FROM sqlite_master WHERE type=\'table\'')
rows = cur.fetchall()
tables = []
for row in rows:
tables.append(str(row[0]))
if 'killmails' not in tables:
q = 'CREATE TABLE killmails (kill_id TEXT PRIMARY KEY,' \
' kill_hash TEXT PRIMARY KEY, json TEXT)'
cur.execute(q)
self._conn.commit()
cur.close()
def get_killmail(self, kill_id: str, kill_hash: str) -> dict:
ret = {}
cur = self._conn.cursor()
cur.execute('SELECT json FROM killmails WHERE kill_id=? AND kill_hash=?', (kill_id, kill_hash))
rows = cur.fetchall()
if len(rows) > 0:
json_str = rows[0][0]
if json_str != '':
try:
ret = json.loads(json_str)
except json.JSONDecodeError:
pass
cur.close()
return ret
def save_killmail(self, kill_id: str, kill_hash: str, killmail: object):
json_text = json.dumps(killmail)
cur = self._conn.cursor()
cur.execute('INSERT REPLACE INTO killmails (kill_id, kill_hash, json) VALUES (?,?,?)',
(kill_id, kill_hash, json_text))
self._conn.commit()
cur.close()
|
<commit_before><commit_msg>Add new class to cache killmails indefinitely, forever<commit_after># -*- coding: utf-8 -*-
import sqlite3
import json
from classes.sitecfg import SiteConfig
class KillMailsCache:
def __init__(self, siteconfig: SiteConfig):
self._conn = sqlite3.connect(siteconfig.ZKB_CACHE_DIR + '/killmails.db', check_same_thread=False)
self.check_tables()
def check_tables(self):
cur = self._conn.cursor()
cur.execute('SELECT * FROM sqlite_master WHERE type=\'table\'')
rows = cur.fetchall()
tables = []
for row in rows:
tables.append(str(row[0]))
if 'killmails' not in tables:
q = 'CREATE TABLE killmails (kill_id TEXT PRIMARY KEY,' \
' kill_hash TEXT PRIMARY KEY, json TEXT)'
cur.execute(q)
self._conn.commit()
cur.close()
def get_killmail(self, kill_id: str, kill_hash: str) -> dict:
ret = {}
cur = self._conn.cursor()
cur.execute('SELECT json FROM killmails WHERE kill_id=? AND kill_hash=?', (kill_id, kill_hash))
rows = cur.fetchall()
if len(rows) > 0:
json_str = rows[0][0]
if json_str != '':
try:
ret = json.loads(json_str)
except json.JSONDecodeError:
pass
cur.close()
return ret
def save_killmail(self, kill_id: str, kill_hash: str, killmail: object):
json_text = json.dumps(killmail)
cur = self._conn.cursor()
cur.execute('INSERT REPLACE INTO killmails (kill_id, kill_hash, json) VALUES (?,?,?)',
(kill_id, kill_hash, json_text))
self._conn.commit()
cur.close()
|
|
dcf8d5dc9e36043e27e207a308da1e6a1f0d00d6
|
memefarm/fontutil.py
|
memefarm/fontutil.py
|
""" PIL doesn't have a built-in method for drawing text with a border """
from PIL import ImageFont
def drawTextWithBorder(draw, text, coords,
fontname="Impact", fontsize=80, strokewidth=3,
color="#fff", strokecolor="#000"):
""" Draw text with a border. Although PIL doesn't support this, it can be
faked by drawing the text in the border color, with offsets, and then
drawing the text in the center on top. See
http://stackoverflow.com/a/8050556/4414003 """
font = ImageFont.truetype(fontname, fontsize)
x, y = coords
# Draw background
for c in ((x - strokewidth, y - strokewidth),
(x + strokewidth, y - strokewidth),
(x - strokewidth, y + strokewidth),
(x + strokewidth, y + strokewidth)):
draw.text(c, text, font=font, fill=strokecolor)
draw.text(coords, text, font=font, fill=color)
if __name__ == "__main__":
from PIL import Image, ImageDraw
i = Image.new("RGB", (500, 500), "#abcdef")
d = ImageDraw.Draw(i)
drawTextWithBorder(d, "SUCH FONT", (10, 10))
i.show()
|
Add function for drawing font with border
|
Add function for drawing font with border
|
Python
|
mit
|
The-Penultimate-Defenestrator/memefarm
|
Add function for drawing font with border
|
""" PIL doesn't have a built-in method for drawing text with a border """
from PIL import ImageFont
def drawTextWithBorder(draw, text, coords,
fontname="Impact", fontsize=80, strokewidth=3,
color="#fff", strokecolor="#000"):
""" Draw text with a border. Although PIL doesn't support this, it can be
faked by drawing the text in the border color, with offsets, and then
drawing the text in the center on top. See
http://stackoverflow.com/a/8050556/4414003 """
font = ImageFont.truetype(fontname, fontsize)
x, y = coords
# Draw background
for c in ((x - strokewidth, y - strokewidth),
(x + strokewidth, y - strokewidth),
(x - strokewidth, y + strokewidth),
(x + strokewidth, y + strokewidth)):
draw.text(c, text, font=font, fill=strokecolor)
draw.text(coords, text, font=font, fill=color)
if __name__ == "__main__":
from PIL import Image, ImageDraw
i = Image.new("RGB", (500, 500), "#abcdef")
d = ImageDraw.Draw(i)
drawTextWithBorder(d, "SUCH FONT", (10, 10))
i.show()
|
<commit_before><commit_msg>Add function for drawing font with border<commit_after>
|
""" PIL doesn't have a built-in method for drawing text with a border """
from PIL import ImageFont
def drawTextWithBorder(draw, text, coords,
fontname="Impact", fontsize=80, strokewidth=3,
color="#fff", strokecolor="#000"):
""" Draw text with a border. Although PIL doesn't support this, it can be
faked by drawing the text in the border color, with offsets, and then
drawing the text in the center on top. See
http://stackoverflow.com/a/8050556/4414003 """
font = ImageFont.truetype(fontname, fontsize)
x, y = coords
# Draw background
for c in ((x - strokewidth, y - strokewidth),
(x + strokewidth, y - strokewidth),
(x - strokewidth, y + strokewidth),
(x + strokewidth, y + strokewidth)):
draw.text(c, text, font=font, fill=strokecolor)
draw.text(coords, text, font=font, fill=color)
if __name__ == "__main__":
from PIL import Image, ImageDraw
i = Image.new("RGB", (500, 500), "#abcdef")
d = ImageDraw.Draw(i)
drawTextWithBorder(d, "SUCH FONT", (10, 10))
i.show()
|
Add function for drawing font with border""" PIL doesn't have a built-in method for drawing text with a border """
from PIL import ImageFont
def drawTextWithBorder(draw, text, coords,
fontname="Impact", fontsize=80, strokewidth=3,
color="#fff", strokecolor="#000"):
""" Draw text with a border. Although PIL doesn't support this, it can be
faked by drawing the text in the border color, with offsets, and then
drawing the text in the center on top. See
http://stackoverflow.com/a/8050556/4414003 """
font = ImageFont.truetype(fontname, fontsize)
x, y = coords
# Draw background
for c in ((x - strokewidth, y - strokewidth),
(x + strokewidth, y - strokewidth),
(x - strokewidth, y + strokewidth),
(x + strokewidth, y + strokewidth)):
draw.text(c, text, font=font, fill=strokecolor)
draw.text(coords, text, font=font, fill=color)
if __name__ == "__main__":
from PIL import Image, ImageDraw
i = Image.new("RGB", (500, 500), "#abcdef")
d = ImageDraw.Draw(i)
drawTextWithBorder(d, "SUCH FONT", (10, 10))
i.show()
|
<commit_before><commit_msg>Add function for drawing font with border<commit_after>""" PIL doesn't have a built-in method for drawing text with a border """
from PIL import ImageFont
def drawTextWithBorder(draw, text, coords,
fontname="Impact", fontsize=80, strokewidth=3,
color="#fff", strokecolor="#000"):
""" Draw text with a border. Although PIL doesn't support this, it can be
faked by drawing the text in the border color, with offsets, and then
drawing the text in the center on top. See
http://stackoverflow.com/a/8050556/4414003 """
font = ImageFont.truetype(fontname, fontsize)
x, y = coords
# Draw background
for c in ((x - strokewidth, y - strokewidth),
(x + strokewidth, y - strokewidth),
(x - strokewidth, y + strokewidth),
(x + strokewidth, y + strokewidth)):
draw.text(c, text, font=font, fill=strokecolor)
draw.text(coords, text, font=font, fill=color)
if __name__ == "__main__":
from PIL import Image, ImageDraw
i = Image.new("RGB", (500, 500), "#abcdef")
d = ImageDraw.Draw(i)
drawTextWithBorder(d, "SUCH FONT", (10, 10))
i.show()
|
|
4ee0f8776b4bf74b20df5f0a69e68f8aa1d82a6d
|
tests/unit/express_checkout/facade_tests.py
|
tests/unit/express_checkout/facade_tests.py
|
from decimal import Decimal as D
from unittest.mock import patch
from django.test import TestCase
from paypalhttp.http_response import construct_object
from paypal.express_checkout.facade import refund_order
from paypal.express_checkout.models import ExpressCheckoutTransaction
from .mocked_data import REFUND_ORDER_DATA_MINIMAL
class FacadeTests(TestCase):
def setUp(self):
super().setUp()
# Before getting order must be created
self.txn = ExpressCheckoutTransaction.objects.create(
order_id='4MW805572N795704B',
capture_id='45315376249711632',
amount=D('0.99'),
currency='GBP',
status=ExpressCheckoutTransaction.CREATED,
intent=ExpressCheckoutTransaction.CAPTURE,
)
def test_refund_order(self):
with patch('paypal.express_checkout.facade.PaymentProcessor.refund_order') as mocked_refund_order:
mocked_refund_order.return_value = construct_object('Result', REFUND_ORDER_DATA_MINIMAL)
refund_order('4MW805572N795704B')
self.txn.refresh_from_db()
assert self.txn.refund_id == '0SM71185A67927728'
mocked_refund_order.assert_called_once_with('45315376249711632', D('0.99'), 'GBP')
|
Add test for `refund_order` function of `facade` module
|
Add test for `refund_order` function of `facade` module
|
Python
|
bsd-3-clause
|
lpakula/django-oscar-paypal,lpakula/django-oscar-paypal,django-oscar/django-oscar-paypal,st8st8/django-oscar-paypal,st8st8/django-oscar-paypal,django-oscar/django-oscar-paypal,lpakula/django-oscar-paypal,evonove/django-oscar-paypal,evonove/django-oscar-paypal,st8st8/django-oscar-paypal,evonove/django-oscar-paypal,django-oscar/django-oscar-paypal
|
Add test for `refund_order` function of `facade` module
|
from decimal import Decimal as D
from unittest.mock import patch
from django.test import TestCase
from paypalhttp.http_response import construct_object
from paypal.express_checkout.facade import refund_order
from paypal.express_checkout.models import ExpressCheckoutTransaction
from .mocked_data import REFUND_ORDER_DATA_MINIMAL
class FacadeTests(TestCase):
def setUp(self):
super().setUp()
# Before getting order must be created
self.txn = ExpressCheckoutTransaction.objects.create(
order_id='4MW805572N795704B',
capture_id='45315376249711632',
amount=D('0.99'),
currency='GBP',
status=ExpressCheckoutTransaction.CREATED,
intent=ExpressCheckoutTransaction.CAPTURE,
)
def test_refund_order(self):
with patch('paypal.express_checkout.facade.PaymentProcessor.refund_order') as mocked_refund_order:
mocked_refund_order.return_value = construct_object('Result', REFUND_ORDER_DATA_MINIMAL)
refund_order('4MW805572N795704B')
self.txn.refresh_from_db()
assert self.txn.refund_id == '0SM71185A67927728'
mocked_refund_order.assert_called_once_with('45315376249711632', D('0.99'), 'GBP')
|
<commit_before><commit_msg>Add test for `refund_order` function of `facade` module<commit_after>
|
from decimal import Decimal as D
from unittest.mock import patch
from django.test import TestCase
from paypalhttp.http_response import construct_object
from paypal.express_checkout.facade import refund_order
from paypal.express_checkout.models import ExpressCheckoutTransaction
from .mocked_data import REFUND_ORDER_DATA_MINIMAL
class FacadeTests(TestCase):
def setUp(self):
super().setUp()
# Before getting order must be created
self.txn = ExpressCheckoutTransaction.objects.create(
order_id='4MW805572N795704B',
capture_id='45315376249711632',
amount=D('0.99'),
currency='GBP',
status=ExpressCheckoutTransaction.CREATED,
intent=ExpressCheckoutTransaction.CAPTURE,
)
def test_refund_order(self):
with patch('paypal.express_checkout.facade.PaymentProcessor.refund_order') as mocked_refund_order:
mocked_refund_order.return_value = construct_object('Result', REFUND_ORDER_DATA_MINIMAL)
refund_order('4MW805572N795704B')
self.txn.refresh_from_db()
assert self.txn.refund_id == '0SM71185A67927728'
mocked_refund_order.assert_called_once_with('45315376249711632', D('0.99'), 'GBP')
|
Add test for `refund_order` function of `facade` modulefrom decimal import Decimal as D
from unittest.mock import patch
from django.test import TestCase
from paypalhttp.http_response import construct_object
from paypal.express_checkout.facade import refund_order
from paypal.express_checkout.models import ExpressCheckoutTransaction
from .mocked_data import REFUND_ORDER_DATA_MINIMAL
class FacadeTests(TestCase):
def setUp(self):
super().setUp()
# Before getting order must be created
self.txn = ExpressCheckoutTransaction.objects.create(
order_id='4MW805572N795704B',
capture_id='45315376249711632',
amount=D('0.99'),
currency='GBP',
status=ExpressCheckoutTransaction.CREATED,
intent=ExpressCheckoutTransaction.CAPTURE,
)
def test_refund_order(self):
with patch('paypal.express_checkout.facade.PaymentProcessor.refund_order') as mocked_refund_order:
mocked_refund_order.return_value = construct_object('Result', REFUND_ORDER_DATA_MINIMAL)
refund_order('4MW805572N795704B')
self.txn.refresh_from_db()
assert self.txn.refund_id == '0SM71185A67927728'
mocked_refund_order.assert_called_once_with('45315376249711632', D('0.99'), 'GBP')
|
<commit_before><commit_msg>Add test for `refund_order` function of `facade` module<commit_after>from decimal import Decimal as D
from unittest.mock import patch
from django.test import TestCase
from paypalhttp.http_response import construct_object
from paypal.express_checkout.facade import refund_order
from paypal.express_checkout.models import ExpressCheckoutTransaction
from .mocked_data import REFUND_ORDER_DATA_MINIMAL
class FacadeTests(TestCase):
def setUp(self):
super().setUp()
# Before getting order must be created
self.txn = ExpressCheckoutTransaction.objects.create(
order_id='4MW805572N795704B',
capture_id='45315376249711632',
amount=D('0.99'),
currency='GBP',
status=ExpressCheckoutTransaction.CREATED,
intent=ExpressCheckoutTransaction.CAPTURE,
)
def test_refund_order(self):
with patch('paypal.express_checkout.facade.PaymentProcessor.refund_order') as mocked_refund_order:
mocked_refund_order.return_value = construct_object('Result', REFUND_ORDER_DATA_MINIMAL)
refund_order('4MW805572N795704B')
self.txn.refresh_from_db()
assert self.txn.refund_id == '0SM71185A67927728'
mocked_refund_order.assert_called_once_with('45315376249711632', D('0.99'), 'GBP')
|
|
8f078438c34f845c71b45571d02d09a34bd04ded
|
dimagi/utils/rate_limit.py
|
dimagi/utils/rate_limit.py
|
from dimagi.utils.couch.cache.cache_core import get_redis_client
def rate_limit(key, actions_allowed=60, how_often=60):
"""
A simple util to be used for rate limiting, using redis as a backend.
key - a unique key which describes the action you are rate limiting
actions_allowed - the number of actions to allow for key every how_often
seconds before returning False
returns True to proceed with the action, or False to not proceed
For example, to only allow a single project space to send 100 SMS max every
30 seconds:
if rate_limit('send-sms-for-projectname', actions_allowed=100, how_often=30):
<perform action>
else:
<delay action>
"""
# We need access to the raw redis client because calling incr on
# a django_redis RedisCache object raises an error if the key
# doesn't exist.
client = get_redis_client().client.get_client()
# Increment the key. If they key doesn't exist (or already expired),
# redis sets the value to 0 before incrementing.
value = client.incr(key)
if value == 1 or client.ttl(key) == -1:
# Set the key's expiration if it's the first action we're granting.
# As a precauation, we also check to make sure that the key actually has
# an expiration set in case an error occurred the first time we tried to
# set the expiration. If it doesn't have an expiration (ttl == -1), then
# we'll set it here again.
client.expire(key, how_often)
return value <= actions_allowed
|
Add simple rate limiting util
|
Add simple rate limiting util
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add simple rate limiting util
|
from dimagi.utils.couch.cache.cache_core import get_redis_client
def rate_limit(key, actions_allowed=60, how_often=60):
"""
A simple util to be used for rate limiting, using redis as a backend.
key - a unique key which describes the action you are rate limiting
actions_allowed - the number of actions to allow for key every how_often
seconds before returning False
returns True to proceed with the action, or False to not proceed
For example, to only allow a single project space to send 100 SMS max every
30 seconds:
if rate_limit('send-sms-for-projectname', actions_allowed=100, how_often=30):
<perform action>
else:
<delay action>
"""
# We need access to the raw redis client because calling incr on
# a django_redis RedisCache object raises an error if the key
# doesn't exist.
client = get_redis_client().client.get_client()
# Increment the key. If they key doesn't exist (or already expired),
# redis sets the value to 0 before incrementing.
value = client.incr(key)
if value == 1 or client.ttl(key) == -1:
# Set the key's expiration if it's the first action we're granting.
# As a precauation, we also check to make sure that the key actually has
# an expiration set in case an error occurred the first time we tried to
# set the expiration. If it doesn't have an expiration (ttl == -1), then
# we'll set it here again.
client.expire(key, how_often)
return value <= actions_allowed
|
<commit_before><commit_msg>Add simple rate limiting util<commit_after>
|
from dimagi.utils.couch.cache.cache_core import get_redis_client
def rate_limit(key, actions_allowed=60, how_often=60):
"""
A simple util to be used for rate limiting, using redis as a backend.
key - a unique key which describes the action you are rate limiting
actions_allowed - the number of actions to allow for key every how_often
seconds before returning False
returns True to proceed with the action, or False to not proceed
For example, to only allow a single project space to send 100 SMS max every
30 seconds:
if rate_limit('send-sms-for-projectname', actions_allowed=100, how_often=30):
<perform action>
else:
<delay action>
"""
# We need access to the raw redis client because calling incr on
# a django_redis RedisCache object raises an error if the key
# doesn't exist.
client = get_redis_client().client.get_client()
# Increment the key. If they key doesn't exist (or already expired),
# redis sets the value to 0 before incrementing.
value = client.incr(key)
if value == 1 or client.ttl(key) == -1:
# Set the key's expiration if it's the first action we're granting.
# As a precauation, we also check to make sure that the key actually has
# an expiration set in case an error occurred the first time we tried to
# set the expiration. If it doesn't have an expiration (ttl == -1), then
# we'll set it here again.
client.expire(key, how_often)
return value <= actions_allowed
|
Add simple rate limiting utilfrom dimagi.utils.couch.cache.cache_core import get_redis_client
def rate_limit(key, actions_allowed=60, how_often=60):
"""
A simple util to be used for rate limiting, using redis as a backend.
key - a unique key which describes the action you are rate limiting
actions_allowed - the number of actions to allow for key every how_often
seconds before returning False
returns True to proceed with the action, or False to not proceed
For example, to only allow a single project space to send 100 SMS max every
30 seconds:
if rate_limit('send-sms-for-projectname', actions_allowed=100, how_often=30):
<perform action>
else:
<delay action>
"""
# We need access to the raw redis client because calling incr on
# a django_redis RedisCache object raises an error if the key
# doesn't exist.
client = get_redis_client().client.get_client()
# Increment the key. If they key doesn't exist (or already expired),
# redis sets the value to 0 before incrementing.
value = client.incr(key)
if value == 1 or client.ttl(key) == -1:
# Set the key's expiration if it's the first action we're granting.
# As a precauation, we also check to make sure that the key actually has
# an expiration set in case an error occurred the first time we tried to
# set the expiration. If it doesn't have an expiration (ttl == -1), then
# we'll set it here again.
client.expire(key, how_often)
return value <= actions_allowed
|
<commit_before><commit_msg>Add simple rate limiting util<commit_after>from dimagi.utils.couch.cache.cache_core import get_redis_client
def rate_limit(key, actions_allowed=60, how_often=60):
"""
A simple util to be used for rate limiting, using redis as a backend.
key - a unique key which describes the action you are rate limiting
actions_allowed - the number of actions to allow for key every how_often
seconds before returning False
returns True to proceed with the action, or False to not proceed
For example, to only allow a single project space to send 100 SMS max every
30 seconds:
if rate_limit('send-sms-for-projectname', actions_allowed=100, how_often=30):
<perform action>
else:
<delay action>
"""
# We need access to the raw redis client because calling incr on
# a django_redis RedisCache object raises an error if the key
# doesn't exist.
client = get_redis_client().client.get_client()
# Increment the key. If they key doesn't exist (or already expired),
# redis sets the value to 0 before incrementing.
value = client.incr(key)
if value == 1 or client.ttl(key) == -1:
# Set the key's expiration if it's the first action we're granting.
# As a precauation, we also check to make sure that the key actually has
# an expiration set in case an error occurred the first time we tried to
# set the expiration. If it doesn't have an expiration (ttl == -1), then
# we'll set it here again.
client.expire(key, how_often)
return value <= actions_allowed
|
|
520146d0aac1adf6f377e02b4412c9f7f0619bf7
|
guess_language.py
|
guess_language.py
|
import click
import os
import codecs
import pandas as pd
from xtas.tasks.single import guess_language
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
def guess(input_dir, output_file):
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
data = {}
data['language'] = []
txt_files = os.listdir(input_dir)
for txt_file in txt_files:
with codecs.open(os.path.join(input_dir, txt_file)) as f:
txt = f.read()
data['language'].append(guess_language(txt)[0])
df = pd.DataFrame(data=data, index=txt_files)
df.to_csv(output_file)
if __name__ == '__main__':
guess()
|
Add script to guess the language of all documents
|
Add script to guess the language of all documents
Input: directory with text files
Output: csv file that specifies the language of each document in
the input dir
|
Python
|
apache-2.0
|
WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln
|
Add script to guess the language of all documents
Input: directory with text files
Output: csv file that specifies the language of each document in
the input dir
|
import click
import os
import codecs
import pandas as pd
from xtas.tasks.single import guess_language
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
def guess(input_dir, output_file):
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
data = {}
data['language'] = []
txt_files = os.listdir(input_dir)
for txt_file in txt_files:
with codecs.open(os.path.join(input_dir, txt_file)) as f:
txt = f.read()
data['language'].append(guess_language(txt)[0])
df = pd.DataFrame(data=data, index=txt_files)
df.to_csv(output_file)
if __name__ == '__main__':
guess()
|
<commit_before><commit_msg>Add script to guess the language of all documents
Input: directory with text files
Output: csv file that specifies the language of each document in
the input dir<commit_after>
|
import click
import os
import codecs
import pandas as pd
from xtas.tasks.single import guess_language
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
def guess(input_dir, output_file):
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
data = {}
data['language'] = []
txt_files = os.listdir(input_dir)
for txt_file in txt_files:
with codecs.open(os.path.join(input_dir, txt_file)) as f:
txt = f.read()
data['language'].append(guess_language(txt)[0])
df = pd.DataFrame(data=data, index=txt_files)
df.to_csv(output_file)
if __name__ == '__main__':
guess()
|
Add script to guess the language of all documents
Input: directory with text files
Output: csv file that specifies the language of each document in
the input dirimport click
import os
import codecs
import pandas as pd
from xtas.tasks.single import guess_language
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
def guess(input_dir, output_file):
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
data = {}
data['language'] = []
txt_files = os.listdir(input_dir)
for txt_file in txt_files:
with codecs.open(os.path.join(input_dir, txt_file)) as f:
txt = f.read()
data['language'].append(guess_language(txt)[0])
df = pd.DataFrame(data=data, index=txt_files)
df.to_csv(output_file)
if __name__ == '__main__':
guess()
|
<commit_before><commit_msg>Add script to guess the language of all documents
Input: directory with text files
Output: csv file that specifies the language of each document in
the input dir<commit_after>import click
import os
import codecs
import pandas as pd
from xtas.tasks.single import guess_language
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
def guess(input_dir, output_file):
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
data = {}
data['language'] = []
txt_files = os.listdir(input_dir)
for txt_file in txt_files:
with codecs.open(os.path.join(input_dir, txt_file)) as f:
txt = f.read()
data['language'].append(guess_language(txt)[0])
df = pd.DataFrame(data=data, index=txt_files)
df.to_csv(output_file)
if __name__ == '__main__':
guess()
|
|
b487df165bb257b327fdaf0588240f48f0ded0db
|
ipython_config.py
|
ipython_config.py
|
c = get_config()
# Kernel config
c.IPKernelApp.pylab = 'inline' # if you want plotting support always
# Notebook config
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8080
c.NotebookApp.notebook_dir = '/work'
c.NotebookApp.trust_xheaders = True
c.NotebookApp.tornado_settings = {
'headers': {
'Content-Security-Policy': "frame-ancestors 'self' https://*.terminal.com"
}
}
|
Add ipython config file with notebook dir set to /work
|
Add ipython config file with notebook dir set to /work
|
Python
|
mit
|
louisdorard/bml-base,louisdorard/bml-base
|
Add ipython config file with notebook dir set to /work
|
c = get_config()
# Kernel config
c.IPKernelApp.pylab = 'inline' # if you want plotting support always
# Notebook config
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8080
c.NotebookApp.notebook_dir = '/work'
c.NotebookApp.trust_xheaders = True
c.NotebookApp.tornado_settings = {
'headers': {
'Content-Security-Policy': "frame-ancestors 'self' https://*.terminal.com"
}
}
|
<commit_before><commit_msg>Add ipython config file with notebook dir set to /work<commit_after>
|
c = get_config()
# Kernel config
c.IPKernelApp.pylab = 'inline' # if you want plotting support always
# Notebook config
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8080
c.NotebookApp.notebook_dir = '/work'
c.NotebookApp.trust_xheaders = True
c.NotebookApp.tornado_settings = {
'headers': {
'Content-Security-Policy': "frame-ancestors 'self' https://*.terminal.com"
}
}
|
Add ipython config file with notebook dir set to /workc = get_config()
# Kernel config
c.IPKernelApp.pylab = 'inline' # if you want plotting support always
# Notebook config
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8080
c.NotebookApp.notebook_dir = '/work'
c.NotebookApp.trust_xheaders = True
c.NotebookApp.tornado_settings = {
'headers': {
'Content-Security-Policy': "frame-ancestors 'self' https://*.terminal.com"
}
}
|
<commit_before><commit_msg>Add ipython config file with notebook dir set to /work<commit_after>c = get_config()
# Kernel config
c.IPKernelApp.pylab = 'inline' # if you want plotting support always
# Notebook config
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8080
c.NotebookApp.notebook_dir = '/work'
c.NotebookApp.trust_xheaders = True
c.NotebookApp.tornado_settings = {
'headers': {
'Content-Security-Policy': "frame-ancestors 'self' https://*.terminal.com"
}
}
|
|
59f8b80d6a5e37b9d91ee53fd900d9499f0391ca
|
src/collectors/KVMCollector/KVMCollector.py
|
src/collectors/KVMCollector/KVMCollector.py
|
from diamond import *
import diamond.collector
import os
class KVMCollector(diamond.collector.Collector):
"""
Collects /sys/kernel/debug/kvm/*
"""
PROC = '/sys/kernel/debug/kvm'
def get_default_config(self):
"""
Returns the default collector settings
"""
return {
'path' : 'kvm',
}
def collect(self):
if not os.path.isdir(self.PROC):
self.log.error('/sys/kernel/debug/kvm is missing. Did you "mount -t debugfs debugfs /sys/kernel/debug"?')
return {}
for file in os.listdir(self.PROC):
filepath = os.path.abspath(os.path.join(self.PROC, file))
fh = open(filepath, 'r')
metric_value = self.derivative(file, float(fh.readline()), 4294967295)
self.publish(file, metric_value)
|
Add in a kvm collector that collects the same stats as kvm_stat does
|
Add in a kvm collector that collects the same stats as kvm_stat does
|
Python
|
mit
|
Netuitive/Diamond,skbkontur/Diamond,hamelg/Diamond,joel-airspring/Diamond,hamelg/Diamond,tuenti/Diamond,CYBERBUGJR/Diamond,Slach/Diamond,MediaMath/Diamond,tusharmakkar08/Diamond,anandbhoraskar/Diamond,mzupan/Diamond,disqus/Diamond,tuenti/Diamond,datafiniti/Diamond,mfriedenhagen/Diamond,jumping/Diamond,anandbhoraskar/Diamond,Nihn/Diamond-1,Netuitive/netuitive-diamond,russss/Diamond,EzyInsights/Diamond,tusharmakkar08/Diamond,CYBERBUGJR/Diamond,Ensighten/Diamond,Basis/Diamond,timchenxiaoyu/Diamond,jriguera/Diamond,acquia/Diamond,tusharmakkar08/Diamond,jaingaurav/Diamond,gg7/diamond,dcsquared13/Diamond,stuartbfox/Diamond,TinLe/Diamond,szibis/Diamond,cannium/Diamond,codepython/Diamond,hvnsweeting/Diamond,krbaker/Diamond,joel-airspring/Diamond,skbkontur/Diamond,disqus/Diamond,acquia/Diamond,Clever/Diamond,socialwareinc/Diamond,thardie/Diamond,zoidbergwill/Diamond,sebbrandt87/Diamond,tellapart/Diamond,cannium/Diamond,janisz/Diamond-1,python-diamond/Diamond,mzupan/Diamond,Ensighten/Diamond,bmhatfield/Diamond,Basis/Diamond,saucelabs/Diamond,Precis/Diamond,mfriedenhagen/Diamond,thardie/Diamond,eMerzh/Diamond-1,krbaker/Diamond,krbaker/Diamond,sebbrandt87/Diamond,mzupan/Diamond,eMerzh/Diamond-1,saucelabs/Diamond,hvnsweeting/Diamond,Slach/Diamond,stuartbfox/Diamond,Nihn/Diamond-1,Basis/Diamond,jaingaurav/Diamond,EzyInsights/Diamond,Nihn/Diamond-1,krbaker/Diamond,codepython/Diamond,Netuitive/Diamond,python-diamond/Diamond,jaingaurav/Diamond,socialwareinc/Diamond,tusharmakkar08/Diamond,metamx/Diamond,hamelg/Diamond,codepython/Diamond,python-diamond/Diamond,gg7/diamond,timchenxiaoyu/Diamond,eMerzh/Diamond-1,Ssawa/Diamond,h00dy/Diamond,zoidbergwill/Diamond,Ormod/Diamond,TAKEALOT/Diamond,acquia/Diamond,Ssawa/Diamond,ramjothikumar/Diamond,datafiniti/Diamond,Precis/Diamond,janisz/Diamond-1,metamx/Diamond,EzyInsights/Diamond,datafiniti/Diamond,Clever/Diamond,codepython/Diamond,MediaMath/Diamond,gg7/diamond,Slach/Diamond,Netuitive/Diamond,Precis/Diamond,mfriedenhagen/Diamond,MichaelDoyle/Diamond,anandbhoraskar/Diamond,Nihn/Diamond-1,h00dy/Diamond,h00dy/Diamond,signalfx/Diamond,MichaelDoyle/Diamond,stuartbfox/Diamond,zoidbergwill/Diamond,skbkontur/Diamond,actmd/Diamond,actmd/Diamond,janisz/Diamond-1,cannium/Diamond,thardie/Diamond,socialwareinc/Diamond,mfriedenhagen/Diamond,anandbhoraskar/Diamond,hvnsweeting/Diamond,tuenti/Diamond,cannium/Diamond,rtoma/Diamond,hvnsweeting/Diamond,TAKEALOT/Diamond,jumping/Diamond,Ensighten/Diamond,TAKEALOT/Diamond,dcsquared13/Diamond,tellapart/Diamond,szibis/Diamond,acquia/Diamond,saucelabs/Diamond,works-mobile/Diamond,MediaMath/Diamond,sebbrandt87/Diamond,CYBERBUGJR/Diamond,dcsquared13/Diamond,Precis/Diamond,Ensighten/Diamond,gg7/diamond,russss/Diamond,skbkontur/Diamond,signalfx/Diamond,Basis/Diamond,actmd/Diamond,bmhatfield/Diamond,rtoma/Diamond,Ssawa/Diamond,TinLe/Diamond,Ormod/Diamond,szibis/Diamond,Ormod/Diamond,Netuitive/netuitive-diamond,jriguera/Diamond,hamelg/Diamond,rtoma/Diamond,disqus/Diamond,stuartbfox/Diamond,bmhatfield/Diamond,Netuitive/netuitive-diamond,joel-airspring/Diamond,works-mobile/Diamond,rtoma/Diamond,Slach/Diamond,metamx/Diamond,signalfx/Diamond,bmhatfield/Diamond,Netuitive/netuitive-diamond,tellapart/Diamond,TinLe/Diamond,datafiniti/Diamond,CYBERBUGJR/Diamond,thardie/Diamond,dcsquared13/Diamond,tuenti/Diamond,timchenxiaoyu/Diamond,sebbrandt87/Diamond,Netuitive/Diamond,jaingaurav/Diamond,works-mobile/Diamond,mzupan/Diamond,jumping/Diamond,russss/Diamond,signalfx/Diamond,ceph/Diamond,MichaelDoyle/Diamond,ceph/Diamond,works-mobile/Diamond,Ormod/Diamond,tellapart/Diamond,Clever/Diamond,ramjothikumar/Diamond,h00dy/Diamond,jumping/Diamond,ceph/Diamond,ramjothikumar/Diamond,TinLe/Diamond,eMerzh/Diamond-1,MediaMath/Diamond,zoidbergwill/Diamond,Clever/Diamond,Ssawa/Diamond,socialwareinc/Diamond,jriguera/Diamond,jriguera/Diamond,TAKEALOT/Diamond,timchenxiaoyu/Diamond,janisz/Diamond-1,russss/Diamond,MichaelDoyle/Diamond,EzyInsights/Diamond,szibis/Diamond,ceph/Diamond,actmd/Diamond,joel-airspring/Diamond,ramjothikumar/Diamond,saucelabs/Diamond
|
Add in a kvm collector that collects the same stats as kvm_stat does
|
from diamond import *
import diamond.collector
import os
class KVMCollector(diamond.collector.Collector):
"""
Collects /sys/kernel/debug/kvm/*
"""
PROC = '/sys/kernel/debug/kvm'
def get_default_config(self):
"""
Returns the default collector settings
"""
return {
'path' : 'kvm',
}
def collect(self):
if not os.path.isdir(self.PROC):
self.log.error('/sys/kernel/debug/kvm is missing. Did you "mount -t debugfs debugfs /sys/kernel/debug"?')
return {}
for file in os.listdir(self.PROC):
filepath = os.path.abspath(os.path.join(self.PROC, file))
fh = open(filepath, 'r')
metric_value = self.derivative(file, float(fh.readline()), 4294967295)
self.publish(file, metric_value)
|
<commit_before><commit_msg>Add in a kvm collector that collects the same stats as kvm_stat does<commit_after>
|
from diamond import *
import diamond.collector
import os
class KVMCollector(diamond.collector.Collector):
"""
Collects /sys/kernel/debug/kvm/*
"""
PROC = '/sys/kernel/debug/kvm'
def get_default_config(self):
"""
Returns the default collector settings
"""
return {
'path' : 'kvm',
}
def collect(self):
if not os.path.isdir(self.PROC):
self.log.error('/sys/kernel/debug/kvm is missing. Did you "mount -t debugfs debugfs /sys/kernel/debug"?')
return {}
for file in os.listdir(self.PROC):
filepath = os.path.abspath(os.path.join(self.PROC, file))
fh = open(filepath, 'r')
metric_value = self.derivative(file, float(fh.readline()), 4294967295)
self.publish(file, metric_value)
|
Add in a kvm collector that collects the same stats as kvm_stat does
from diamond import *
import diamond.collector
import os
class KVMCollector(diamond.collector.Collector):
"""
Collects /sys/kernel/debug/kvm/*
"""
PROC = '/sys/kernel/debug/kvm'
def get_default_config(self):
"""
Returns the default collector settings
"""
return {
'path' : 'kvm',
}
def collect(self):
if not os.path.isdir(self.PROC):
self.log.error('/sys/kernel/debug/kvm is missing. Did you "mount -t debugfs debugfs /sys/kernel/debug"?')
return {}
for file in os.listdir(self.PROC):
filepath = os.path.abspath(os.path.join(self.PROC, file))
fh = open(filepath, 'r')
metric_value = self.derivative(file, float(fh.readline()), 4294967295)
self.publish(file, metric_value)
|
<commit_before><commit_msg>Add in a kvm collector that collects the same stats as kvm_stat does<commit_after>
from diamond import *
import diamond.collector
import os
class KVMCollector(diamond.collector.Collector):
"""
Collects /sys/kernel/debug/kvm/*
"""
PROC = '/sys/kernel/debug/kvm'
def get_default_config(self):
"""
Returns the default collector settings
"""
return {
'path' : 'kvm',
}
def collect(self):
if not os.path.isdir(self.PROC):
self.log.error('/sys/kernel/debug/kvm is missing. Did you "mount -t debugfs debugfs /sys/kernel/debug"?')
return {}
for file in os.listdir(self.PROC):
filepath = os.path.abspath(os.path.join(self.PROC, file))
fh = open(filepath, 'r')
metric_value = self.derivative(file, float(fh.readline()), 4294967295)
self.publish(file, metric_value)
|
|
309df37b7381027bcd2691a86b935e2ea0f8ffce
|
python_scripts/WordCount.py
|
python_scripts/WordCount.py
|
#!/usr/bin/python
import re
# this one in honor of 4th July, or pick text file you have!!!!!!!
filename = 'out.txt'
# create list of lower case words, \s+ --> match any whitespace(s)
# you can replace file(filename).read() with given string
word_list = re.split('\s+', file(filename).read().lower())
print 'Words in text:', len(word_list)
# create dictionary of word:frequency pairs
freq_dic = {}
# punctuation marks to be removed
punctuation = re.compile(r'[.?!,":;]')
for word in word_list:
# remove punctuation marks
word = punctuation.sub("", word)
# form dictionary
try:
freq_dic[word] += 1
except:
freq_dic[word] = 1
print 'Unique words:', len(freq_dic)
# create list of (key, val) tuple pairs
freq_list = freq_dic.items()
# sort by key or word
freq_list.sort()
# display result
for word, freq in freq_list:
print word, freq
|
Add initial word counting file.
|
Add initial word counting file.
|
Python
|
agpl-3.0
|
AchyuthIIIT/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud
|
Add initial word counting file.
|
#!/usr/bin/python
import re
# this one in honor of 4th July, or pick text file you have!!!!!!!
filename = 'out.txt'
# create list of lower case words, \s+ --> match any whitespace(s)
# you can replace file(filename).read() with given string
word_list = re.split('\s+', file(filename).read().lower())
print 'Words in text:', len(word_list)
# create dictionary of word:frequency pairs
freq_dic = {}
# punctuation marks to be removed
punctuation = re.compile(r'[.?!,":;]')
for word in word_list:
# remove punctuation marks
word = punctuation.sub("", word)
# form dictionary
try:
freq_dic[word] += 1
except:
freq_dic[word] = 1
print 'Unique words:', len(freq_dic)
# create list of (key, val) tuple pairs
freq_list = freq_dic.items()
# sort by key or word
freq_list.sort()
# display result
for word, freq in freq_list:
print word, freq
|
<commit_before><commit_msg>Add initial word counting file.<commit_after>
|
#!/usr/bin/python
import re
# this one in honor of 4th July, or pick text file you have!!!!!!!
filename = 'out.txt'
# create list of lower case words, \s+ --> match any whitespace(s)
# you can replace file(filename).read() with given string
word_list = re.split('\s+', file(filename).read().lower())
print 'Words in text:', len(word_list)
# create dictionary of word:frequency pairs
freq_dic = {}
# punctuation marks to be removed
punctuation = re.compile(r'[.?!,":;]')
for word in word_list:
# remove punctuation marks
word = punctuation.sub("", word)
# form dictionary
try:
freq_dic[word] += 1
except:
freq_dic[word] = 1
print 'Unique words:', len(freq_dic)
# create list of (key, val) tuple pairs
freq_list = freq_dic.items()
# sort by key or word
freq_list.sort()
# display result
for word, freq in freq_list:
print word, freq
|
Add initial word counting file.#!/usr/bin/python
import re
# this one in honor of 4th July, or pick text file you have!!!!!!!
filename = 'out.txt'
# create list of lower case words, \s+ --> match any whitespace(s)
# you can replace file(filename).read() with given string
word_list = re.split('\s+', file(filename).read().lower())
print 'Words in text:', len(word_list)
# create dictionary of word:frequency pairs
freq_dic = {}
# punctuation marks to be removed
punctuation = re.compile(r'[.?!,":;]')
for word in word_list:
# remove punctuation marks
word = punctuation.sub("", word)
# form dictionary
try:
freq_dic[word] += 1
except:
freq_dic[word] = 1
print 'Unique words:', len(freq_dic)
# create list of (key, val) tuple pairs
freq_list = freq_dic.items()
# sort by key or word
freq_list.sort()
# display result
for word, freq in freq_list:
print word, freq
|
<commit_before><commit_msg>Add initial word counting file.<commit_after>#!/usr/bin/python
import re
# this one in honor of 4th July, or pick text file you have!!!!!!!
filename = 'out.txt'
# create list of lower case words, \s+ --> match any whitespace(s)
# you can replace file(filename).read() with given string
word_list = re.split('\s+', file(filename).read().lower())
print 'Words in text:', len(word_list)
# create dictionary of word:frequency pairs
freq_dic = {}
# punctuation marks to be removed
punctuation = re.compile(r'[.?!,":;]')
for word in word_list:
# remove punctuation marks
word = punctuation.sub("", word)
# form dictionary
try:
freq_dic[word] += 1
except:
freq_dic[word] = 1
print 'Unique words:', len(freq_dic)
# create list of (key, val) tuple pairs
freq_list = freq_dic.items()
# sort by key or word
freq_list.sort()
# display result
for word, freq in freq_list:
print word, freq
|
|
b2dceb40d8e04771098a3aad47c7656071c00b74
|
rxpy-test.py
|
rxpy-test.py
|
#!/usr/bin/env python3
from auth_tokens import *
from rx import Observable
try:
import json
except ImportError:
import simplejson as json
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter_stream = TwitterStream(auth=oauth)
iterator = twitter_stream.statuses.sample()
observable = Observable.from_iterable(iterator)
observable.subscribe(print)
tweet_count = 1
|
Add sample use of RxPy.
|
Add sample use of RxPy.
|
Python
|
mit
|
Pysellus/streaming-api-test,Pysellus/streaming-api-test
|
Add sample use of RxPy.
|
#!/usr/bin/env python3
from auth_tokens import *
from rx import Observable
try:
import json
except ImportError:
import simplejson as json
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter_stream = TwitterStream(auth=oauth)
iterator = twitter_stream.statuses.sample()
observable = Observable.from_iterable(iterator)
observable.subscribe(print)
tweet_count = 1
|
<commit_before><commit_msg>Add sample use of RxPy.<commit_after>
|
#!/usr/bin/env python3
from auth_tokens import *
from rx import Observable
try:
import json
except ImportError:
import simplejson as json
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter_stream = TwitterStream(auth=oauth)
iterator = twitter_stream.statuses.sample()
observable = Observable.from_iterable(iterator)
observable.subscribe(print)
tweet_count = 1
|
Add sample use of RxPy.#!/usr/bin/env python3
from auth_tokens import *
from rx import Observable
try:
import json
except ImportError:
import simplejson as json
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter_stream = TwitterStream(auth=oauth)
iterator = twitter_stream.statuses.sample()
observable = Observable.from_iterable(iterator)
observable.subscribe(print)
tweet_count = 1
|
<commit_before><commit_msg>Add sample use of RxPy.<commit_after>#!/usr/bin/env python3
from auth_tokens import *
from rx import Observable
try:
import json
except ImportError:
import simplejson as json
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter_stream = TwitterStream(auth=oauth)
iterator = twitter_stream.statuses.sample()
observable = Observable.from_iterable(iterator)
observable.subscribe(print)
tweet_count = 1
|
|
c95869f3438268c757638ac1d857360e80070ccf
|
sksos/cli.py
|
sksos/cli.py
|
#!/usr/bin/env python
import argparse
import logging
import sys
import numpy as np
from sos import SOS
def get_stdout():
if sys.version_info.major < 3:
return sys.stdout
else:
return sys.stdout.buffer
def main():
parser = argparse.ArgumentParser(description="Stochastic Outlier Selection")
parser.add_argument('-t', '--threshold', type=float, default=None,
help=("Float between 0.0 and 1.0 to use as threshold for selecting "
"outliers. By default, this is not set, causing the outlier "
"probabilities instead of the classification to be outputted"))
parser.add_argument('-d', '--delimiter', type=str, default=',', help=(
"String to use to separate values. By default, this is a comma."))
parser.add_argument('-i', '--input', type=argparse.FileType('rb'),
default=sys.stdin, help=("File to read data set from. By default, "
"this is <stdin>."))
parser.add_argument('-m', '--metric', type=str, default='euclidean', help=(
"String indicating the metric to use to compute the dissimilarity "
"matrix. By default, this is 'euclidean'. Use 'none' if the data set "
"is a dissimilarity matrix."))
parser.add_argument('-o', '--output', type=argparse.FileType('wb'),
default=get_stdout(), help=("File to write the computed outlier "
"probabilities to. By default, this is <stdout>."))
parser.add_argument('-p', '--perplexity', type=float, default=30.0,
help="Float to use as perpexity. By default, this is 30.0.")
args = parser.parse_args()
X = np.loadtxt(args.input, delimiter=args.delimiter, ndmin=2)
O = SOS(args.perplexity, args.metric).predict(X)
if args.threshold is None:
np.savetxt(args.output, O, '%1.8f')
else:
np.savetxt(args.output, O>=args.threshold, b'%1d')
return 0
if __name__ == '__main__':
exit(main())
|
Put command-line code into separate file
|
Put command-line code into separate file
|
Python
|
bsd-3-clause
|
jeroenjanssens/sos
|
Put command-line code into separate file
|
#!/usr/bin/env python
import argparse
import logging
import sys
import numpy as np
from sos import SOS
def get_stdout():
if sys.version_info.major < 3:
return sys.stdout
else:
return sys.stdout.buffer
def main():
parser = argparse.ArgumentParser(description="Stochastic Outlier Selection")
parser.add_argument('-t', '--threshold', type=float, default=None,
help=("Float between 0.0 and 1.0 to use as threshold for selecting "
"outliers. By default, this is not set, causing the outlier "
"probabilities instead of the classification to be outputted"))
parser.add_argument('-d', '--delimiter', type=str, default=',', help=(
"String to use to separate values. By default, this is a comma."))
parser.add_argument('-i', '--input', type=argparse.FileType('rb'),
default=sys.stdin, help=("File to read data set from. By default, "
"this is <stdin>."))
parser.add_argument('-m', '--metric', type=str, default='euclidean', help=(
"String indicating the metric to use to compute the dissimilarity "
"matrix. By default, this is 'euclidean'. Use 'none' if the data set "
"is a dissimilarity matrix."))
parser.add_argument('-o', '--output', type=argparse.FileType('wb'),
default=get_stdout(), help=("File to write the computed outlier "
"probabilities to. By default, this is <stdout>."))
parser.add_argument('-p', '--perplexity', type=float, default=30.0,
help="Float to use as perpexity. By default, this is 30.0.")
args = parser.parse_args()
X = np.loadtxt(args.input, delimiter=args.delimiter, ndmin=2)
O = SOS(args.perplexity, args.metric).predict(X)
if args.threshold is None:
np.savetxt(args.output, O, '%1.8f')
else:
np.savetxt(args.output, O>=args.threshold, b'%1d')
return 0
if __name__ == '__main__':
exit(main())
|
<commit_before><commit_msg>Put command-line code into separate file<commit_after>
|
#!/usr/bin/env python
import argparse
import logging
import sys
import numpy as np
from sos import SOS
def get_stdout():
if sys.version_info.major < 3:
return sys.stdout
else:
return sys.stdout.buffer
def main():
parser = argparse.ArgumentParser(description="Stochastic Outlier Selection")
parser.add_argument('-t', '--threshold', type=float, default=None,
help=("Float between 0.0 and 1.0 to use as threshold for selecting "
"outliers. By default, this is not set, causing the outlier "
"probabilities instead of the classification to be outputted"))
parser.add_argument('-d', '--delimiter', type=str, default=',', help=(
"String to use to separate values. By default, this is a comma."))
parser.add_argument('-i', '--input', type=argparse.FileType('rb'),
default=sys.stdin, help=("File to read data set from. By default, "
"this is <stdin>."))
parser.add_argument('-m', '--metric', type=str, default='euclidean', help=(
"String indicating the metric to use to compute the dissimilarity "
"matrix. By default, this is 'euclidean'. Use 'none' if the data set "
"is a dissimilarity matrix."))
parser.add_argument('-o', '--output', type=argparse.FileType('wb'),
default=get_stdout(), help=("File to write the computed outlier "
"probabilities to. By default, this is <stdout>."))
parser.add_argument('-p', '--perplexity', type=float, default=30.0,
help="Float to use as perpexity. By default, this is 30.0.")
args = parser.parse_args()
X = np.loadtxt(args.input, delimiter=args.delimiter, ndmin=2)
O = SOS(args.perplexity, args.metric).predict(X)
if args.threshold is None:
np.savetxt(args.output, O, '%1.8f')
else:
np.savetxt(args.output, O>=args.threshold, b'%1d')
return 0
if __name__ == '__main__':
exit(main())
|
Put command-line code into separate file#!/usr/bin/env python
import argparse
import logging
import sys
import numpy as np
from sos import SOS
def get_stdout():
if sys.version_info.major < 3:
return sys.stdout
else:
return sys.stdout.buffer
def main():
parser = argparse.ArgumentParser(description="Stochastic Outlier Selection")
parser.add_argument('-t', '--threshold', type=float, default=None,
help=("Float between 0.0 and 1.0 to use as threshold for selecting "
"outliers. By default, this is not set, causing the outlier "
"probabilities instead of the classification to be outputted"))
parser.add_argument('-d', '--delimiter', type=str, default=',', help=(
"String to use to separate values. By default, this is a comma."))
parser.add_argument('-i', '--input', type=argparse.FileType('rb'),
default=sys.stdin, help=("File to read data set from. By default, "
"this is <stdin>."))
parser.add_argument('-m', '--metric', type=str, default='euclidean', help=(
"String indicating the metric to use to compute the dissimilarity "
"matrix. By default, this is 'euclidean'. Use 'none' if the data set "
"is a dissimilarity matrix."))
parser.add_argument('-o', '--output', type=argparse.FileType('wb'),
default=get_stdout(), help=("File to write the computed outlier "
"probabilities to. By default, this is <stdout>."))
parser.add_argument('-p', '--perplexity', type=float, default=30.0,
help="Float to use as perpexity. By default, this is 30.0.")
args = parser.parse_args()
X = np.loadtxt(args.input, delimiter=args.delimiter, ndmin=2)
O = SOS(args.perplexity, args.metric).predict(X)
if args.threshold is None:
np.savetxt(args.output, O, '%1.8f')
else:
np.savetxt(args.output, O>=args.threshold, b'%1d')
return 0
if __name__ == '__main__':
exit(main())
|
<commit_before><commit_msg>Put command-line code into separate file<commit_after>#!/usr/bin/env python
import argparse
import logging
import sys
import numpy as np
from sos import SOS
def get_stdout():
if sys.version_info.major < 3:
return sys.stdout
else:
return sys.stdout.buffer
def main():
parser = argparse.ArgumentParser(description="Stochastic Outlier Selection")
parser.add_argument('-t', '--threshold', type=float, default=None,
help=("Float between 0.0 and 1.0 to use as threshold for selecting "
"outliers. By default, this is not set, causing the outlier "
"probabilities instead of the classification to be outputted"))
parser.add_argument('-d', '--delimiter', type=str, default=',', help=(
"String to use to separate values. By default, this is a comma."))
parser.add_argument('-i', '--input', type=argparse.FileType('rb'),
default=sys.stdin, help=("File to read data set from. By default, "
"this is <stdin>."))
parser.add_argument('-m', '--metric', type=str, default='euclidean', help=(
"String indicating the metric to use to compute the dissimilarity "
"matrix. By default, this is 'euclidean'. Use 'none' if the data set "
"is a dissimilarity matrix."))
parser.add_argument('-o', '--output', type=argparse.FileType('wb'),
default=get_stdout(), help=("File to write the computed outlier "
"probabilities to. By default, this is <stdout>."))
parser.add_argument('-p', '--perplexity', type=float, default=30.0,
help="Float to use as perpexity. By default, this is 30.0.")
args = parser.parse_args()
X = np.loadtxt(args.input, delimiter=args.delimiter, ndmin=2)
O = SOS(args.perplexity, args.metric).predict(X)
if args.threshold is None:
np.savetxt(args.output, O, '%1.8f')
else:
np.savetxt(args.output, O>=args.threshold, b'%1d')
return 0
if __name__ == '__main__':
exit(main())
|
|
84a6465b5b36989091245e2039912867a27c2773
|
migrations/versions/760_brief_response_submitted_at.py
|
migrations/versions/760_brief_response_submitted_at.py
|
"""brief response submitted at
Revision ID: 760
Revises: 750
Create Date: 2016-10-24 14:16:29.951023
"""
# revision identifiers, used by Alembic.
revision = '760'
down_revision = '750'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('brief_responses', sa.Column('submitted_at', sa.DateTime(), nullable=True))
def downgrade():
op.drop_column('brief_responses', 'submitted_at')
|
Add `submitted_at` migration to `brief_responses` table
|
Add `submitted_at` migration to `brief_responses` table
Represents when a brief response has been submitted and is therefore
a complete response, as opposed to one that may be in a draft form.
Note, we choose not to use the word "published" as a brief response
is not published publicly.
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Add `submitted_at` migration to `brief_responses` table
Represents when a brief response has been submitted and is therefore
a complete response, as opposed to one that may be in a draft form.
Note, we choose not to use the word "published" as a brief response
is not published publicly.
|
"""brief response submitted at
Revision ID: 760
Revises: 750
Create Date: 2016-10-24 14:16:29.951023
"""
# revision identifiers, used by Alembic.
revision = '760'
down_revision = '750'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('brief_responses', sa.Column('submitted_at', sa.DateTime(), nullable=True))
def downgrade():
op.drop_column('brief_responses', 'submitted_at')
|
<commit_before><commit_msg>Add `submitted_at` migration to `brief_responses` table
Represents when a brief response has been submitted and is therefore
a complete response, as opposed to one that may be in a draft form.
Note, we choose not to use the word "published" as a brief response
is not published publicly.<commit_after>
|
"""brief response submitted at
Revision ID: 760
Revises: 750
Create Date: 2016-10-24 14:16:29.951023
"""
# revision identifiers, used by Alembic.
revision = '760'
down_revision = '750'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('brief_responses', sa.Column('submitted_at', sa.DateTime(), nullable=True))
def downgrade():
op.drop_column('brief_responses', 'submitted_at')
|
Add `submitted_at` migration to `brief_responses` table
Represents when a brief response has been submitted and is therefore
a complete response, as opposed to one that may be in a draft form.
Note, we choose not to use the word "published" as a brief response
is not published publicly."""brief response submitted at
Revision ID: 760
Revises: 750
Create Date: 2016-10-24 14:16:29.951023
"""
# revision identifiers, used by Alembic.
revision = '760'
down_revision = '750'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('brief_responses', sa.Column('submitted_at', sa.DateTime(), nullable=True))
def downgrade():
op.drop_column('brief_responses', 'submitted_at')
|
<commit_before><commit_msg>Add `submitted_at` migration to `brief_responses` table
Represents when a brief response has been submitted and is therefore
a complete response, as opposed to one that may be in a draft form.
Note, we choose not to use the word "published" as a brief response
is not published publicly.<commit_after>"""brief response submitted at
Revision ID: 760
Revises: 750
Create Date: 2016-10-24 14:16:29.951023
"""
# revision identifiers, used by Alembic.
revision = '760'
down_revision = '750'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('brief_responses', sa.Column('submitted_at', sa.DateTime(), nullable=True))
def downgrade():
op.drop_column('brief_responses', 'submitted_at')
|
|
a8eaee817f91dec66b186bf9e8ac94084b2e4190
|
nagios/check_b548_temps.py
|
nagios/check_b548_temps.py
|
"""
Gateway for B548 temps to nagios, this way I can setup alerts via it
Array("In Air Handler", "Out Air Handler", "Out Rack", "In Rack")
0 70.25
1 57.88
2 88.25
3 62.04
"""
import sys
data = open('/tmp/onewire.txt', 'r').readlines()
if len(data) != 4:
print 'WARNING - Could not read file!'
sys.exit(1)
v = []
for line in data:
tokens = line.strip().split()
if len(tokens) == 2:
v.append( float(tokens[1]) )
else:
v.append( -99 )
ds = ""
ks = ["in_handler", "out_handler", "out_rack", "in_rack"]
for k,d in zip(ks, v):
ds += "%s=%s;95;100;105 " % (k, d)
if v[2] < 95:
print 'OK - room %s |%s' % (v[0], ds)
sys.exit(0)
elif v[2] < 100:
print 'WARNING - room %s |%s' % (v[0], ds)
sys.exit(1)
else:
print 'CRITICAL - room %s |%s' % (v[0], ds)
sys.exit(2)
|
Add nagios script for room temps
|
Add nagios script for room temps
|
Python
|
mit
|
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
|
Add nagios script for room temps
|
"""
Gateway for B548 temps to nagios, this way I can setup alerts via it
Array("In Air Handler", "Out Air Handler", "Out Rack", "In Rack")
0 70.25
1 57.88
2 88.25
3 62.04
"""
import sys
data = open('/tmp/onewire.txt', 'r').readlines()
if len(data) != 4:
print 'WARNING - Could not read file!'
sys.exit(1)
v = []
for line in data:
tokens = line.strip().split()
if len(tokens) == 2:
v.append( float(tokens[1]) )
else:
v.append( -99 )
ds = ""
ks = ["in_handler", "out_handler", "out_rack", "in_rack"]
for k,d in zip(ks, v):
ds += "%s=%s;95;100;105 " % (k, d)
if v[2] < 95:
print 'OK - room %s |%s' % (v[0], ds)
sys.exit(0)
elif v[2] < 100:
print 'WARNING - room %s |%s' % (v[0], ds)
sys.exit(1)
else:
print 'CRITICAL - room %s |%s' % (v[0], ds)
sys.exit(2)
|
<commit_before><commit_msg>Add nagios script for room temps<commit_after>
|
"""
Gateway for B548 temps to nagios, this way I can setup alerts via it
Array("In Air Handler", "Out Air Handler", "Out Rack", "In Rack")
0 70.25
1 57.88
2 88.25
3 62.04
"""
import sys
data = open('/tmp/onewire.txt', 'r').readlines()
if len(data) != 4:
print 'WARNING - Could not read file!'
sys.exit(1)
v = []
for line in data:
tokens = line.strip().split()
if len(tokens) == 2:
v.append( float(tokens[1]) )
else:
v.append( -99 )
ds = ""
ks = ["in_handler", "out_handler", "out_rack", "in_rack"]
for k,d in zip(ks, v):
ds += "%s=%s;95;100;105 " % (k, d)
if v[2] < 95:
print 'OK - room %s |%s' % (v[0], ds)
sys.exit(0)
elif v[2] < 100:
print 'WARNING - room %s |%s' % (v[0], ds)
sys.exit(1)
else:
print 'CRITICAL - room %s |%s' % (v[0], ds)
sys.exit(2)
|
Add nagios script for room temps"""
Gateway for B548 temps to nagios, this way I can setup alerts via it
Array("In Air Handler", "Out Air Handler", "Out Rack", "In Rack")
0 70.25
1 57.88
2 88.25
3 62.04
"""
import sys
data = open('/tmp/onewire.txt', 'r').readlines()
if len(data) != 4:
print 'WARNING - Could not read file!'
sys.exit(1)
v = []
for line in data:
tokens = line.strip().split()
if len(tokens) == 2:
v.append( float(tokens[1]) )
else:
v.append( -99 )
ds = ""
ks = ["in_handler", "out_handler", "out_rack", "in_rack"]
for k,d in zip(ks, v):
ds += "%s=%s;95;100;105 " % (k, d)
if v[2] < 95:
print 'OK - room %s |%s' % (v[0], ds)
sys.exit(0)
elif v[2] < 100:
print 'WARNING - room %s |%s' % (v[0], ds)
sys.exit(1)
else:
print 'CRITICAL - room %s |%s' % (v[0], ds)
sys.exit(2)
|
<commit_before><commit_msg>Add nagios script for room temps<commit_after>"""
Gateway for B548 temps to nagios, this way I can setup alerts via it
Array("In Air Handler", "Out Air Handler", "Out Rack", "In Rack")
0 70.25
1 57.88
2 88.25
3 62.04
"""
import sys
data = open('/tmp/onewire.txt', 'r').readlines()
if len(data) != 4:
print 'WARNING - Could not read file!'
sys.exit(1)
v = []
for line in data:
tokens = line.strip().split()
if len(tokens) == 2:
v.append( float(tokens[1]) )
else:
v.append( -99 )
ds = ""
ks = ["in_handler", "out_handler", "out_rack", "in_rack"]
for k,d in zip(ks, v):
ds += "%s=%s;95;100;105 " % (k, d)
if v[2] < 95:
print 'OK - room %s |%s' % (v[0], ds)
sys.exit(0)
elif v[2] < 100:
print 'WARNING - room %s |%s' % (v[0], ds)
sys.exit(1)
else:
print 'CRITICAL - room %s |%s' % (v[0], ds)
sys.exit(2)
|
|
46af9016a2a349c0ea7c1f2aec6a3d67eeef3c86
|
tests/st/calicoctl/test_convert.py
|
tests/st/calicoctl/test_convert.py
|
# Copyright (c) 2015-2017 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import copy
from nose_parameterized import parameterized
from tests.st.test_base import TestBase
from tests.st.utils.utils import calicoctl
from tests.st.utils.data import *
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
logger = logging.getLogger(__name__)
filebase = "test-data/v1/"
class TestCalicoctlConvert(TestBase):
"""
Test calicoctl pool
1) Test the CRUD aspects of the pool commands.
2) Test IP assignment from pool.
BGP exported routes are hard to test and aren't expected to change much so
write tests for them (yet)
"""
@parameterized.expand([
("bgppeer-global.yaml",),
("bgppeer-node.yaml",),
("bgppeer-node2.yaml",),
("multi-resource.yaml",),
("node.yaml",),
("test3.yaml",),
("migration/bgppeer.yaml",),
("migration/hostendpoint.yaml",),
("migration/ippool.yaml",),
("migration/node.yaml",),
("migration/policy.yaml",),
("migration/profile.yaml",),
("migration/workloadendpoint.yaml",),
])
def test_convert_successful(self, filename):
"""
Test convert successfully
"""
# Convert the file
rc = calicoctl("convert -f %s" % filebase+filename)
rc.assert_no_error()
# With the converted data to a temp file
with open("/tmp/converted", 'w') as f:
f.write(rc.output)
# Load the converted data
rc = calicoctl("apply -f /tmp/converted")
rc.assert_no_error()
|
Add ST to run manifest convert tests
|
Add ST to run manifest convert tests
|
Python
|
apache-2.0
|
Metaswitch/calico-docker,projectcalico/calico-docker,projectcalico/calico-containers,insequent/calico-docker,insequent/calico-docker,projectcalico/calico-containers,projectcalico/calico-containers,projectcalico/calico-docker,Metaswitch/calico-docker
|
Add ST to run manifest convert tests
|
# Copyright (c) 2015-2017 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import copy
from nose_parameterized import parameterized
from tests.st.test_base import TestBase
from tests.st.utils.utils import calicoctl
from tests.st.utils.data import *
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
logger = logging.getLogger(__name__)
filebase = "test-data/v1/"
class TestCalicoctlConvert(TestBase):
"""
Test calicoctl pool
1) Test the CRUD aspects of the pool commands.
2) Test IP assignment from pool.
BGP exported routes are hard to test and aren't expected to change much so
write tests for them (yet)
"""
@parameterized.expand([
("bgppeer-global.yaml",),
("bgppeer-node.yaml",),
("bgppeer-node2.yaml",),
("multi-resource.yaml",),
("node.yaml",),
("test3.yaml",),
("migration/bgppeer.yaml",),
("migration/hostendpoint.yaml",),
("migration/ippool.yaml",),
("migration/node.yaml",),
("migration/policy.yaml",),
("migration/profile.yaml",),
("migration/workloadendpoint.yaml",),
])
def test_convert_successful(self, filename):
"""
Test convert successfully
"""
# Convert the file
rc = calicoctl("convert -f %s" % filebase+filename)
rc.assert_no_error()
# With the converted data to a temp file
with open("/tmp/converted", 'w') as f:
f.write(rc.output)
# Load the converted data
rc = calicoctl("apply -f /tmp/converted")
rc.assert_no_error()
|
<commit_before><commit_msg>Add ST to run manifest convert tests<commit_after>
|
# Copyright (c) 2015-2017 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import copy
from nose_parameterized import parameterized
from tests.st.test_base import TestBase
from tests.st.utils.utils import calicoctl
from tests.st.utils.data import *
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
logger = logging.getLogger(__name__)
filebase = "test-data/v1/"
class TestCalicoctlConvert(TestBase):
"""
Test calicoctl pool
1) Test the CRUD aspects of the pool commands.
2) Test IP assignment from pool.
BGP exported routes are hard to test and aren't expected to change much so
write tests for them (yet)
"""
@parameterized.expand([
("bgppeer-global.yaml",),
("bgppeer-node.yaml",),
("bgppeer-node2.yaml",),
("multi-resource.yaml",),
("node.yaml",),
("test3.yaml",),
("migration/bgppeer.yaml",),
("migration/hostendpoint.yaml",),
("migration/ippool.yaml",),
("migration/node.yaml",),
("migration/policy.yaml",),
("migration/profile.yaml",),
("migration/workloadendpoint.yaml",),
])
def test_convert_successful(self, filename):
"""
Test convert successfully
"""
# Convert the file
rc = calicoctl("convert -f %s" % filebase+filename)
rc.assert_no_error()
# With the converted data to a temp file
with open("/tmp/converted", 'w') as f:
f.write(rc.output)
# Load the converted data
rc = calicoctl("apply -f /tmp/converted")
rc.assert_no_error()
|
Add ST to run manifest convert tests# Copyright (c) 2015-2017 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import copy
from nose_parameterized import parameterized
from tests.st.test_base import TestBase
from tests.st.utils.utils import calicoctl
from tests.st.utils.data import *
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
logger = logging.getLogger(__name__)
filebase = "test-data/v1/"
class TestCalicoctlConvert(TestBase):
"""
Test calicoctl pool
1) Test the CRUD aspects of the pool commands.
2) Test IP assignment from pool.
BGP exported routes are hard to test and aren't expected to change much so
write tests for them (yet)
"""
@parameterized.expand([
("bgppeer-global.yaml",),
("bgppeer-node.yaml",),
("bgppeer-node2.yaml",),
("multi-resource.yaml",),
("node.yaml",),
("test3.yaml",),
("migration/bgppeer.yaml",),
("migration/hostendpoint.yaml",),
("migration/ippool.yaml",),
("migration/node.yaml",),
("migration/policy.yaml",),
("migration/profile.yaml",),
("migration/workloadendpoint.yaml",),
])
def test_convert_successful(self, filename):
"""
Test convert successfully
"""
# Convert the file
rc = calicoctl("convert -f %s" % filebase+filename)
rc.assert_no_error()
# With the converted data to a temp file
with open("/tmp/converted", 'w') as f:
f.write(rc.output)
# Load the converted data
rc = calicoctl("apply -f /tmp/converted")
rc.assert_no_error()
|
<commit_before><commit_msg>Add ST to run manifest convert tests<commit_after># Copyright (c) 2015-2017 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import copy
from nose_parameterized import parameterized
from tests.st.test_base import TestBase
from tests.st.utils.utils import calicoctl
from tests.st.utils.data import *
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
logger = logging.getLogger(__name__)
filebase = "test-data/v1/"
class TestCalicoctlConvert(TestBase):
"""
Test calicoctl pool
1) Test the CRUD aspects of the pool commands.
2) Test IP assignment from pool.
BGP exported routes are hard to test and aren't expected to change much so
write tests for them (yet)
"""
@parameterized.expand([
("bgppeer-global.yaml",),
("bgppeer-node.yaml",),
("bgppeer-node2.yaml",),
("multi-resource.yaml",),
("node.yaml",),
("test3.yaml",),
("migration/bgppeer.yaml",),
("migration/hostendpoint.yaml",),
("migration/ippool.yaml",),
("migration/node.yaml",),
("migration/policy.yaml",),
("migration/profile.yaml",),
("migration/workloadendpoint.yaml",),
])
def test_convert_successful(self, filename):
"""
Test convert successfully
"""
# Convert the file
rc = calicoctl("convert -f %s" % filebase+filename)
rc.assert_no_error()
# With the converted data to a temp file
with open("/tmp/converted", 'w') as f:
f.write(rc.output)
# Load the converted data
rc = calicoctl("apply -f /tmp/converted")
rc.assert_no_error()
|
|
3cbc45e6496770bb16d3eac01897f23fbc001491
|
turbopump_test.py
|
turbopump_test.py
|
import unittest
import turbopump
class TestStringMethods(unittest.TestCase):
def test_m_dot2gpm(self):
# 100 kg s**-1, 1000 kg m**-3 --> 1584 gal min**-1
self.assertAlmostEqual(1584, turbopump.m_dot2gpm(100, 1000), delta=2)
def test_gpm_m_dot_inverse(self):
for m_dot in [1, 100, 1000, 2434]:
for rho in [1, 100, 1000, 2000]:
self.assertAlmostEqual(m_dot, turbopump.gpm2m_dot(
turbopump.m_dot2gpm(m_dot, rho), rho))
def test_rpm_radsec_inverse(self):
for rpm in [1, 1e3, 1e6]:
self.assertAlmostEqual(rpm, turbopump.radsec2rpm(
turbopump.rpm2radsec(rpm)))
def test_sample61(self):
# Check against sample problem 6-1 from Huzel and Huang.
rho_ox = 1143 # 71.38 lbm ft**-3
rho_fuel = 808.1 # 50.45 lbm ft**-3
dp_ox = 9.997e6 # 1450 psi
dp_fuel = 11.55e6 # 1675 psi
m_dot_ox = 894 # 1971 lbm s**-1
m_dot_fuel = 405 # 892 lbm s**-1
N = turbopump.rpm2radsec(7000)
# Check speed
self.assertAlmostEqual(7000, turbopump.radsec2rpm(N))
# Check pump head
self.assertAlmostEqual(2930, turbopump.dp2head(dp_ox, rho_ox), delta=10)
self.assertAlmostEqual(4790, turbopump.dp2head(dp_fuel, rho_fuel), delta=10)
# Check volume flow rate
self.assertAlmostEqual(12420, turbopump.m_dot2gpm(m_dot_ox, rho_ox), delta=40)
self.assertAlmostEqual(7960, turbopump.m_dot2gpm(m_dot_fuel, rho_fuel), delta=20)
# Check specific speed
self.assertAlmostEqual(1980, turbopump.pump_specific_speed_us(
dp_ox, m_dot_ox, rho_ox, N),
delta=30)
self.assertAlmostEqual(1083, turbopump.pump_specific_speed_us(
dp_fuel, m_dot_fuel, rho_fuel, N),
delta=20)
if __name__ == '__main__':
unittest.main()
|
Add unit tests for turbopump.
|
Add unit tests for turbopump.
|
Python
|
mit
|
mvernacc/proptools
|
Add unit tests for turbopump.
|
import unittest
import turbopump
class TestStringMethods(unittest.TestCase):
def test_m_dot2gpm(self):
# 100 kg s**-1, 1000 kg m**-3 --> 1584 gal min**-1
self.assertAlmostEqual(1584, turbopump.m_dot2gpm(100, 1000), delta=2)
def test_gpm_m_dot_inverse(self):
for m_dot in [1, 100, 1000, 2434]:
for rho in [1, 100, 1000, 2000]:
self.assertAlmostEqual(m_dot, turbopump.gpm2m_dot(
turbopump.m_dot2gpm(m_dot, rho), rho))
def test_rpm_radsec_inverse(self):
for rpm in [1, 1e3, 1e6]:
self.assertAlmostEqual(rpm, turbopump.radsec2rpm(
turbopump.rpm2radsec(rpm)))
def test_sample61(self):
# Check against sample problem 6-1 from Huzel and Huang.
rho_ox = 1143 # 71.38 lbm ft**-3
rho_fuel = 808.1 # 50.45 lbm ft**-3
dp_ox = 9.997e6 # 1450 psi
dp_fuel = 11.55e6 # 1675 psi
m_dot_ox = 894 # 1971 lbm s**-1
m_dot_fuel = 405 # 892 lbm s**-1
N = turbopump.rpm2radsec(7000)
# Check speed
self.assertAlmostEqual(7000, turbopump.radsec2rpm(N))
# Check pump head
self.assertAlmostEqual(2930, turbopump.dp2head(dp_ox, rho_ox), delta=10)
self.assertAlmostEqual(4790, turbopump.dp2head(dp_fuel, rho_fuel), delta=10)
# Check volume flow rate
self.assertAlmostEqual(12420, turbopump.m_dot2gpm(m_dot_ox, rho_ox), delta=40)
self.assertAlmostEqual(7960, turbopump.m_dot2gpm(m_dot_fuel, rho_fuel), delta=20)
# Check specific speed
self.assertAlmostEqual(1980, turbopump.pump_specific_speed_us(
dp_ox, m_dot_ox, rho_ox, N),
delta=30)
self.assertAlmostEqual(1083, turbopump.pump_specific_speed_us(
dp_fuel, m_dot_fuel, rho_fuel, N),
delta=20)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for turbopump.<commit_after>
|
import unittest
import turbopump
class TestStringMethods(unittest.TestCase):
def test_m_dot2gpm(self):
# 100 kg s**-1, 1000 kg m**-3 --> 1584 gal min**-1
self.assertAlmostEqual(1584, turbopump.m_dot2gpm(100, 1000), delta=2)
def test_gpm_m_dot_inverse(self):
for m_dot in [1, 100, 1000, 2434]:
for rho in [1, 100, 1000, 2000]:
self.assertAlmostEqual(m_dot, turbopump.gpm2m_dot(
turbopump.m_dot2gpm(m_dot, rho), rho))
def test_rpm_radsec_inverse(self):
for rpm in [1, 1e3, 1e6]:
self.assertAlmostEqual(rpm, turbopump.radsec2rpm(
turbopump.rpm2radsec(rpm)))
def test_sample61(self):
# Check against sample problem 6-1 from Huzel and Huang.
rho_ox = 1143 # 71.38 lbm ft**-3
rho_fuel = 808.1 # 50.45 lbm ft**-3
dp_ox = 9.997e6 # 1450 psi
dp_fuel = 11.55e6 # 1675 psi
m_dot_ox = 894 # 1971 lbm s**-1
m_dot_fuel = 405 # 892 lbm s**-1
N = turbopump.rpm2radsec(7000)
# Check speed
self.assertAlmostEqual(7000, turbopump.radsec2rpm(N))
# Check pump head
self.assertAlmostEqual(2930, turbopump.dp2head(dp_ox, rho_ox), delta=10)
self.assertAlmostEqual(4790, turbopump.dp2head(dp_fuel, rho_fuel), delta=10)
# Check volume flow rate
self.assertAlmostEqual(12420, turbopump.m_dot2gpm(m_dot_ox, rho_ox), delta=40)
self.assertAlmostEqual(7960, turbopump.m_dot2gpm(m_dot_fuel, rho_fuel), delta=20)
# Check specific speed
self.assertAlmostEqual(1980, turbopump.pump_specific_speed_us(
dp_ox, m_dot_ox, rho_ox, N),
delta=30)
self.assertAlmostEqual(1083, turbopump.pump_specific_speed_us(
dp_fuel, m_dot_fuel, rho_fuel, N),
delta=20)
if __name__ == '__main__':
unittest.main()
|
Add unit tests for turbopump.import unittest
import turbopump
class TestStringMethods(unittest.TestCase):
def test_m_dot2gpm(self):
# 100 kg s**-1, 1000 kg m**-3 --> 1584 gal min**-1
self.assertAlmostEqual(1584, turbopump.m_dot2gpm(100, 1000), delta=2)
def test_gpm_m_dot_inverse(self):
for m_dot in [1, 100, 1000, 2434]:
for rho in [1, 100, 1000, 2000]:
self.assertAlmostEqual(m_dot, turbopump.gpm2m_dot(
turbopump.m_dot2gpm(m_dot, rho), rho))
def test_rpm_radsec_inverse(self):
for rpm in [1, 1e3, 1e6]:
self.assertAlmostEqual(rpm, turbopump.radsec2rpm(
turbopump.rpm2radsec(rpm)))
def test_sample61(self):
# Check against sample problem 6-1 from Huzel and Huang.
rho_ox = 1143 # 71.38 lbm ft**-3
rho_fuel = 808.1 # 50.45 lbm ft**-3
dp_ox = 9.997e6 # 1450 psi
dp_fuel = 11.55e6 # 1675 psi
m_dot_ox = 894 # 1971 lbm s**-1
m_dot_fuel = 405 # 892 lbm s**-1
N = turbopump.rpm2radsec(7000)
# Check speed
self.assertAlmostEqual(7000, turbopump.radsec2rpm(N))
# Check pump head
self.assertAlmostEqual(2930, turbopump.dp2head(dp_ox, rho_ox), delta=10)
self.assertAlmostEqual(4790, turbopump.dp2head(dp_fuel, rho_fuel), delta=10)
# Check volume flow rate
self.assertAlmostEqual(12420, turbopump.m_dot2gpm(m_dot_ox, rho_ox), delta=40)
self.assertAlmostEqual(7960, turbopump.m_dot2gpm(m_dot_fuel, rho_fuel), delta=20)
# Check specific speed
self.assertAlmostEqual(1980, turbopump.pump_specific_speed_us(
dp_ox, m_dot_ox, rho_ox, N),
delta=30)
self.assertAlmostEqual(1083, turbopump.pump_specific_speed_us(
dp_fuel, m_dot_fuel, rho_fuel, N),
delta=20)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for turbopump.<commit_after>import unittest
import turbopump
class TestStringMethods(unittest.TestCase):
def test_m_dot2gpm(self):
# 100 kg s**-1, 1000 kg m**-3 --> 1584 gal min**-1
self.assertAlmostEqual(1584, turbopump.m_dot2gpm(100, 1000), delta=2)
def test_gpm_m_dot_inverse(self):
for m_dot in [1, 100, 1000, 2434]:
for rho in [1, 100, 1000, 2000]:
self.assertAlmostEqual(m_dot, turbopump.gpm2m_dot(
turbopump.m_dot2gpm(m_dot, rho), rho))
def test_rpm_radsec_inverse(self):
for rpm in [1, 1e3, 1e6]:
self.assertAlmostEqual(rpm, turbopump.radsec2rpm(
turbopump.rpm2radsec(rpm)))
def test_sample61(self):
# Check against sample problem 6-1 from Huzel and Huang.
rho_ox = 1143 # 71.38 lbm ft**-3
rho_fuel = 808.1 # 50.45 lbm ft**-3
dp_ox = 9.997e6 # 1450 psi
dp_fuel = 11.55e6 # 1675 psi
m_dot_ox = 894 # 1971 lbm s**-1
m_dot_fuel = 405 # 892 lbm s**-1
N = turbopump.rpm2radsec(7000)
# Check speed
self.assertAlmostEqual(7000, turbopump.radsec2rpm(N))
# Check pump head
self.assertAlmostEqual(2930, turbopump.dp2head(dp_ox, rho_ox), delta=10)
self.assertAlmostEqual(4790, turbopump.dp2head(dp_fuel, rho_fuel), delta=10)
# Check volume flow rate
self.assertAlmostEqual(12420, turbopump.m_dot2gpm(m_dot_ox, rho_ox), delta=40)
self.assertAlmostEqual(7960, turbopump.m_dot2gpm(m_dot_fuel, rho_fuel), delta=20)
# Check specific speed
self.assertAlmostEqual(1980, turbopump.pump_specific_speed_us(
dp_ox, m_dot_ox, rho_ox, N),
delta=30)
self.assertAlmostEqual(1083, turbopump.pump_specific_speed_us(
dp_fuel, m_dot_fuel, rho_fuel, N),
delta=20)
if __name__ == '__main__':
unittest.main()
|
|
28c2e89343db336723b3f36bccd9bca5d6e1dfc7
|
trex/settings_production.py
|
trex/settings_production.py
|
from trex.settings_global import *
DEBUG = False
REST_FRAMEWORK = {
# don't use BrowsableAPIRenderer
"DEFAULT_RENDERER_CLASSES": {
"rest_framework.renderers.JSONRenderer"
},
# deactivate "browser enhancements"
"FORM_CONTENT_OVERRIDE": None,
"FORM_METHOD_OVERRIDE": None,
"FORM_CONTENTTYPE_OVERRIDE": None,
}
|
Add a settings module for production
|
Add a settings module for production
|
Python
|
mit
|
bjoernricks/trex,bjoernricks/trex
|
Add a settings module for production
|
from trex.settings_global import *
DEBUG = False
REST_FRAMEWORK = {
# don't use BrowsableAPIRenderer
"DEFAULT_RENDERER_CLASSES": {
"rest_framework.renderers.JSONRenderer"
},
# deactivate "browser enhancements"
"FORM_CONTENT_OVERRIDE": None,
"FORM_METHOD_OVERRIDE": None,
"FORM_CONTENTTYPE_OVERRIDE": None,
}
|
<commit_before><commit_msg>Add a settings module for production<commit_after>
|
from trex.settings_global import *
DEBUG = False
REST_FRAMEWORK = {
# don't use BrowsableAPIRenderer
"DEFAULT_RENDERER_CLASSES": {
"rest_framework.renderers.JSONRenderer"
},
# deactivate "browser enhancements"
"FORM_CONTENT_OVERRIDE": None,
"FORM_METHOD_OVERRIDE": None,
"FORM_CONTENTTYPE_OVERRIDE": None,
}
|
Add a settings module for productionfrom trex.settings_global import *
DEBUG = False
REST_FRAMEWORK = {
# don't use BrowsableAPIRenderer
"DEFAULT_RENDERER_CLASSES": {
"rest_framework.renderers.JSONRenderer"
},
# deactivate "browser enhancements"
"FORM_CONTENT_OVERRIDE": None,
"FORM_METHOD_OVERRIDE": None,
"FORM_CONTENTTYPE_OVERRIDE": None,
}
|
<commit_before><commit_msg>Add a settings module for production<commit_after>from trex.settings_global import *
DEBUG = False
REST_FRAMEWORK = {
# don't use BrowsableAPIRenderer
"DEFAULT_RENDERER_CLASSES": {
"rest_framework.renderers.JSONRenderer"
},
# deactivate "browser enhancements"
"FORM_CONTENT_OVERRIDE": None,
"FORM_METHOD_OVERRIDE": None,
"FORM_CONTENTTYPE_OVERRIDE": None,
}
|
|
29519614965e6629debcd2d08fd1fe2e0debe08f
|
test/test_paramval.py
|
test/test_paramval.py
|
import logging
import luigi
import sciluigi as sl
import os
import time
import unittest
log = logging.getLogger('sciluigi-interface')
log.setLevel(logging.WARNING)
class IntParamTask(sl.Task):
an_int_param = luigi.IntParameter()
def out_int_val(self):
return sl.TargetInfo(self, '/tmp/intparamtask_intval_%d.txt' % self.an_int_param)
def run(self):
with self.out_int_val().open('w') as outfile:
outfile.write('%d' % self.an_int_param)
class NonStringParamWF(sl.WorkflowTask):
def workflow(self):
intparam_task = self.new_task('intparam_task', IntParamTask,
an_int_param = 123)
return intparam_task
class TestNonStringParameterValues(unittest.TestCase):
def setUp(self):
self.w = luigi.worker.Worker()
self.nsp_wf = NonStringParamWF(instance_name='nonstring_param_wf')
self.w.add(self.nsp_wf)
def test_intparam_gets_set(self):
self.assertEquals(self.nsp_wf._tasks['intparam_task'].an_int_param, 123)
def test_intparam_gets_set(self):
self.w.run()
with self.nsp_wf.workflow().out_int_val().open() as infile:
val = infile.read()
self.assertEquals(val, '123')
def tearDown(self):
pass
|
Add test for non-string (integer) parameter value
|
Add test for non-string (integer) parameter value
|
Python
|
mit
|
pharmbio/sciluigi,pharmbio/sciluigi,samuell/sciluigi
|
Add test for non-string (integer) parameter value
|
import logging
import luigi
import sciluigi as sl
import os
import time
import unittest
log = logging.getLogger('sciluigi-interface')
log.setLevel(logging.WARNING)
class IntParamTask(sl.Task):
an_int_param = luigi.IntParameter()
def out_int_val(self):
return sl.TargetInfo(self, '/tmp/intparamtask_intval_%d.txt' % self.an_int_param)
def run(self):
with self.out_int_val().open('w') as outfile:
outfile.write('%d' % self.an_int_param)
class NonStringParamWF(sl.WorkflowTask):
def workflow(self):
intparam_task = self.new_task('intparam_task', IntParamTask,
an_int_param = 123)
return intparam_task
class TestNonStringParameterValues(unittest.TestCase):
def setUp(self):
self.w = luigi.worker.Worker()
self.nsp_wf = NonStringParamWF(instance_name='nonstring_param_wf')
self.w.add(self.nsp_wf)
def test_intparam_gets_set(self):
self.assertEquals(self.nsp_wf._tasks['intparam_task'].an_int_param, 123)
def test_intparam_gets_set(self):
self.w.run()
with self.nsp_wf.workflow().out_int_val().open() as infile:
val = infile.read()
self.assertEquals(val, '123')
def tearDown(self):
pass
|
<commit_before><commit_msg>Add test for non-string (integer) parameter value<commit_after>
|
import logging
import luigi
import sciluigi as sl
import os
import time
import unittest
log = logging.getLogger('sciluigi-interface')
log.setLevel(logging.WARNING)
class IntParamTask(sl.Task):
an_int_param = luigi.IntParameter()
def out_int_val(self):
return sl.TargetInfo(self, '/tmp/intparamtask_intval_%d.txt' % self.an_int_param)
def run(self):
with self.out_int_val().open('w') as outfile:
outfile.write('%d' % self.an_int_param)
class NonStringParamWF(sl.WorkflowTask):
def workflow(self):
intparam_task = self.new_task('intparam_task', IntParamTask,
an_int_param = 123)
return intparam_task
class TestNonStringParameterValues(unittest.TestCase):
def setUp(self):
self.w = luigi.worker.Worker()
self.nsp_wf = NonStringParamWF(instance_name='nonstring_param_wf')
self.w.add(self.nsp_wf)
def test_intparam_gets_set(self):
self.assertEquals(self.nsp_wf._tasks['intparam_task'].an_int_param, 123)
def test_intparam_gets_set(self):
self.w.run()
with self.nsp_wf.workflow().out_int_val().open() as infile:
val = infile.read()
self.assertEquals(val, '123')
def tearDown(self):
pass
|
Add test for non-string (integer) parameter valueimport logging
import luigi
import sciluigi as sl
import os
import time
import unittest
log = logging.getLogger('sciluigi-interface')
log.setLevel(logging.WARNING)
class IntParamTask(sl.Task):
an_int_param = luigi.IntParameter()
def out_int_val(self):
return sl.TargetInfo(self, '/tmp/intparamtask_intval_%d.txt' % self.an_int_param)
def run(self):
with self.out_int_val().open('w') as outfile:
outfile.write('%d' % self.an_int_param)
class NonStringParamWF(sl.WorkflowTask):
def workflow(self):
intparam_task = self.new_task('intparam_task', IntParamTask,
an_int_param = 123)
return intparam_task
class TestNonStringParameterValues(unittest.TestCase):
def setUp(self):
self.w = luigi.worker.Worker()
self.nsp_wf = NonStringParamWF(instance_name='nonstring_param_wf')
self.w.add(self.nsp_wf)
def test_intparam_gets_set(self):
self.assertEquals(self.nsp_wf._tasks['intparam_task'].an_int_param, 123)
def test_intparam_gets_set(self):
self.w.run()
with self.nsp_wf.workflow().out_int_val().open() as infile:
val = infile.read()
self.assertEquals(val, '123')
def tearDown(self):
pass
|
<commit_before><commit_msg>Add test for non-string (integer) parameter value<commit_after>import logging
import luigi
import sciluigi as sl
import os
import time
import unittest
log = logging.getLogger('sciluigi-interface')
log.setLevel(logging.WARNING)
class IntParamTask(sl.Task):
an_int_param = luigi.IntParameter()
def out_int_val(self):
return sl.TargetInfo(self, '/tmp/intparamtask_intval_%d.txt' % self.an_int_param)
def run(self):
with self.out_int_val().open('w') as outfile:
outfile.write('%d' % self.an_int_param)
class NonStringParamWF(sl.WorkflowTask):
def workflow(self):
intparam_task = self.new_task('intparam_task', IntParamTask,
an_int_param = 123)
return intparam_task
class TestNonStringParameterValues(unittest.TestCase):
def setUp(self):
self.w = luigi.worker.Worker()
self.nsp_wf = NonStringParamWF(instance_name='nonstring_param_wf')
self.w.add(self.nsp_wf)
def test_intparam_gets_set(self):
self.assertEquals(self.nsp_wf._tasks['intparam_task'].an_int_param, 123)
def test_intparam_gets_set(self):
self.w.run()
with self.nsp_wf.workflow().out_int_val().open() as infile:
val = infile.read()
self.assertEquals(val, '123')
def tearDown(self):
pass
|
|
828a12547380091d4183bdf2ae247e6df8574fe7
|
tests/test_filters.py
|
tests/test_filters.py
|
import unittest
from pypercube.filters import Filter
from pypercube.filters import EQ
from pypercube.filters import LT
from pypercube.filters import LE
from pypercube.filters import GT
from pypercube.filters import GE
from pypercube.filters import NE
from pypercube.filters import RE
from pypercube.filters import IN
from pypercube.filters import StartsWith
from pypercube.filters import EndsWith
class TestFilter(unittest.TestCase):
def test_equality(self):
f1 = Filter('eq', 'name', 'test')
f2 = EQ('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('lt', 'name', 'test')
self.assertNotEqual(f1, f2)
f2 = LT('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('le', 'name', 'test')
f2 = LE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('gt', 'name', 'test')
f2 = GT('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('ge', 'name', 'test')
f2 = GE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('ne', 'name', 'test')
f2 = NE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', 'test')
f2 = RE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('in', 'name', ['t', 'e', 's', 't'])
f2 = IN('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('in', 'name', ['test'])
f2 = IN('name', ['test'])
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', '^test')
f2 = StartsWith('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', '.*test$')
f2 = EndsWith('name', 'test')
self.assertEqual(f1, f2)
def test_starts_with(self):
f = StartsWith('name', 'test')
self.assertEqual("%s" % f, '.re(name, "^test")')
def test_ends_with(self):
f = EndsWith('name', 'test')
self.assertEqual("%s" % f, '.re(name, ".*test$")')
def test_re(self):
# FIXME: Regular expressions are broken
f = RE('name', r"\s+([A-Za-z0-9]+)")
self.assertEqual("%s" % f, r'.re(name, "\s+([A-Za-z0-9]+)")')
def test_in(self):
f = IN('name', ['a', 'b', 'c'])
self.assertEqual("%s" % f, '.in(name, ["a", "b", "c"])')
|
Add Filter tests. Regular expressions are broken.
|
Add Filter tests. Regular expressions are broken.
|
Python
|
bsd-3-clause
|
sbuss/pypercube
|
Add Filter tests. Regular expressions are broken.
|
import unittest
from pypercube.filters import Filter
from pypercube.filters import EQ
from pypercube.filters import LT
from pypercube.filters import LE
from pypercube.filters import GT
from pypercube.filters import GE
from pypercube.filters import NE
from pypercube.filters import RE
from pypercube.filters import IN
from pypercube.filters import StartsWith
from pypercube.filters import EndsWith
class TestFilter(unittest.TestCase):
def test_equality(self):
f1 = Filter('eq', 'name', 'test')
f2 = EQ('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('lt', 'name', 'test')
self.assertNotEqual(f1, f2)
f2 = LT('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('le', 'name', 'test')
f2 = LE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('gt', 'name', 'test')
f2 = GT('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('ge', 'name', 'test')
f2 = GE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('ne', 'name', 'test')
f2 = NE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', 'test')
f2 = RE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('in', 'name', ['t', 'e', 's', 't'])
f2 = IN('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('in', 'name', ['test'])
f2 = IN('name', ['test'])
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', '^test')
f2 = StartsWith('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', '.*test$')
f2 = EndsWith('name', 'test')
self.assertEqual(f1, f2)
def test_starts_with(self):
f = StartsWith('name', 'test')
self.assertEqual("%s" % f, '.re(name, "^test")')
def test_ends_with(self):
f = EndsWith('name', 'test')
self.assertEqual("%s" % f, '.re(name, ".*test$")')
def test_re(self):
# FIXME: Regular expressions are broken
f = RE('name', r"\s+([A-Za-z0-9]+)")
self.assertEqual("%s" % f, r'.re(name, "\s+([A-Za-z0-9]+)")')
def test_in(self):
f = IN('name', ['a', 'b', 'c'])
self.assertEqual("%s" % f, '.in(name, ["a", "b", "c"])')
|
<commit_before><commit_msg>Add Filter tests. Regular expressions are broken.<commit_after>
|
import unittest
from pypercube.filters import Filter
from pypercube.filters import EQ
from pypercube.filters import LT
from pypercube.filters import LE
from pypercube.filters import GT
from pypercube.filters import GE
from pypercube.filters import NE
from pypercube.filters import RE
from pypercube.filters import IN
from pypercube.filters import StartsWith
from pypercube.filters import EndsWith
class TestFilter(unittest.TestCase):
def test_equality(self):
f1 = Filter('eq', 'name', 'test')
f2 = EQ('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('lt', 'name', 'test')
self.assertNotEqual(f1, f2)
f2 = LT('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('le', 'name', 'test')
f2 = LE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('gt', 'name', 'test')
f2 = GT('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('ge', 'name', 'test')
f2 = GE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('ne', 'name', 'test')
f2 = NE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', 'test')
f2 = RE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('in', 'name', ['t', 'e', 's', 't'])
f2 = IN('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('in', 'name', ['test'])
f2 = IN('name', ['test'])
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', '^test')
f2 = StartsWith('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', '.*test$')
f2 = EndsWith('name', 'test')
self.assertEqual(f1, f2)
def test_starts_with(self):
f = StartsWith('name', 'test')
self.assertEqual("%s" % f, '.re(name, "^test")')
def test_ends_with(self):
f = EndsWith('name', 'test')
self.assertEqual("%s" % f, '.re(name, ".*test$")')
def test_re(self):
# FIXME: Regular expressions are broken
f = RE('name', r"\s+([A-Za-z0-9]+)")
self.assertEqual("%s" % f, r'.re(name, "\s+([A-Za-z0-9]+)")')
def test_in(self):
f = IN('name', ['a', 'b', 'c'])
self.assertEqual("%s" % f, '.in(name, ["a", "b", "c"])')
|
Add Filter tests. Regular expressions are broken.import unittest
from pypercube.filters import Filter
from pypercube.filters import EQ
from pypercube.filters import LT
from pypercube.filters import LE
from pypercube.filters import GT
from pypercube.filters import GE
from pypercube.filters import NE
from pypercube.filters import RE
from pypercube.filters import IN
from pypercube.filters import StartsWith
from pypercube.filters import EndsWith
class TestFilter(unittest.TestCase):
def test_equality(self):
f1 = Filter('eq', 'name', 'test')
f2 = EQ('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('lt', 'name', 'test')
self.assertNotEqual(f1, f2)
f2 = LT('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('le', 'name', 'test')
f2 = LE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('gt', 'name', 'test')
f2 = GT('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('ge', 'name', 'test')
f2 = GE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('ne', 'name', 'test')
f2 = NE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', 'test')
f2 = RE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('in', 'name', ['t', 'e', 's', 't'])
f2 = IN('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('in', 'name', ['test'])
f2 = IN('name', ['test'])
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', '^test')
f2 = StartsWith('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', '.*test$')
f2 = EndsWith('name', 'test')
self.assertEqual(f1, f2)
def test_starts_with(self):
f = StartsWith('name', 'test')
self.assertEqual("%s" % f, '.re(name, "^test")')
def test_ends_with(self):
f = EndsWith('name', 'test')
self.assertEqual("%s" % f, '.re(name, ".*test$")')
def test_re(self):
# FIXME: Regular expressions are broken
f = RE('name', r"\s+([A-Za-z0-9]+)")
self.assertEqual("%s" % f, r'.re(name, "\s+([A-Za-z0-9]+)")')
def test_in(self):
f = IN('name', ['a', 'b', 'c'])
self.assertEqual("%s" % f, '.in(name, ["a", "b", "c"])')
|
<commit_before><commit_msg>Add Filter tests. Regular expressions are broken.<commit_after>import unittest
from pypercube.filters import Filter
from pypercube.filters import EQ
from pypercube.filters import LT
from pypercube.filters import LE
from pypercube.filters import GT
from pypercube.filters import GE
from pypercube.filters import NE
from pypercube.filters import RE
from pypercube.filters import IN
from pypercube.filters import StartsWith
from pypercube.filters import EndsWith
class TestFilter(unittest.TestCase):
def test_equality(self):
f1 = Filter('eq', 'name', 'test')
f2 = EQ('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('lt', 'name', 'test')
self.assertNotEqual(f1, f2)
f2 = LT('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('le', 'name', 'test')
f2 = LE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('gt', 'name', 'test')
f2 = GT('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('ge', 'name', 'test')
f2 = GE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('ne', 'name', 'test')
f2 = NE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', 'test')
f2 = RE('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('in', 'name', ['t', 'e', 's', 't'])
f2 = IN('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('in', 'name', ['test'])
f2 = IN('name', ['test'])
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', '^test')
f2 = StartsWith('name', 'test')
self.assertEqual(f1, f2)
f1 = Filter('re', 'name', '.*test$')
f2 = EndsWith('name', 'test')
self.assertEqual(f1, f2)
def test_starts_with(self):
f = StartsWith('name', 'test')
self.assertEqual("%s" % f, '.re(name, "^test")')
def test_ends_with(self):
f = EndsWith('name', 'test')
self.assertEqual("%s" % f, '.re(name, ".*test$")')
def test_re(self):
# FIXME: Regular expressions are broken
f = RE('name', r"\s+([A-Za-z0-9]+)")
self.assertEqual("%s" % f, r'.re(name, "\s+([A-Za-z0-9]+)")')
def test_in(self):
f = IN('name', ['a', 'b', 'c'])
self.assertEqual("%s" % f, '.in(name, ["a", "b", "c"])')
|
|
113673a78c633ceffe40b071a8c164bd631255fa
|
tests/test_weekday.py
|
tests/test_weekday.py
|
import pytest
from recurrence import Weekday
def test_init():
assert repr(Weekday(3)) == 'TH'
assert repr(Weekday(3, -2)) == '-2TH'
assert repr(Weekday(3, 3)) == '3TH'
with pytest.raises(ValueError):
Weekday(8)
with pytest.raises(ValueError):
Weekday('fish')
def test_call():
# I'm not sure why this functionality is useful, but this is what
# calling a weekday currently does.
day = Weekday(4, -3)
assert day(2) == Weekday(4, 2)
assert day(-3) is day
assert day(None) == Weekday(4)
|
Add tests to show what the Weekday object does
|
Add tests to show what the Weekday object does
|
Python
|
bsd-3-clause
|
Nikola-K/django-recurrence,linux2400/django-recurrence,Nikola-K/django-recurrence,FrankSalad/django-recurrence,FrankSalad/django-recurrence,django-recurrence/django-recurrence,django-recurrence/django-recurrence,linux2400/django-recurrence
|
Add tests to show what the Weekday object does
|
import pytest
from recurrence import Weekday
def test_init():
assert repr(Weekday(3)) == 'TH'
assert repr(Weekday(3, -2)) == '-2TH'
assert repr(Weekday(3, 3)) == '3TH'
with pytest.raises(ValueError):
Weekday(8)
with pytest.raises(ValueError):
Weekday('fish')
def test_call():
# I'm not sure why this functionality is useful, but this is what
# calling a weekday currently does.
day = Weekday(4, -3)
assert day(2) == Weekday(4, 2)
assert day(-3) is day
assert day(None) == Weekday(4)
|
<commit_before><commit_msg>Add tests to show what the Weekday object does<commit_after>
|
import pytest
from recurrence import Weekday
def test_init():
assert repr(Weekday(3)) == 'TH'
assert repr(Weekday(3, -2)) == '-2TH'
assert repr(Weekday(3, 3)) == '3TH'
with pytest.raises(ValueError):
Weekday(8)
with pytest.raises(ValueError):
Weekday('fish')
def test_call():
# I'm not sure why this functionality is useful, but this is what
# calling a weekday currently does.
day = Weekday(4, -3)
assert day(2) == Weekday(4, 2)
assert day(-3) is day
assert day(None) == Weekday(4)
|
Add tests to show what the Weekday object doesimport pytest
from recurrence import Weekday
def test_init():
assert repr(Weekday(3)) == 'TH'
assert repr(Weekday(3, -2)) == '-2TH'
assert repr(Weekday(3, 3)) == '3TH'
with pytest.raises(ValueError):
Weekday(8)
with pytest.raises(ValueError):
Weekday('fish')
def test_call():
# I'm not sure why this functionality is useful, but this is what
# calling a weekday currently does.
day = Weekday(4, -3)
assert day(2) == Weekday(4, 2)
assert day(-3) is day
assert day(None) == Weekday(4)
|
<commit_before><commit_msg>Add tests to show what the Weekday object does<commit_after>import pytest
from recurrence import Weekday
def test_init():
assert repr(Weekday(3)) == 'TH'
assert repr(Weekday(3, -2)) == '-2TH'
assert repr(Weekday(3, 3)) == '3TH'
with pytest.raises(ValueError):
Weekday(8)
with pytest.raises(ValueError):
Weekday('fish')
def test_call():
# I'm not sure why this functionality is useful, but this is what
# calling a weekday currently does.
day = Weekday(4, -3)
assert day(2) == Weekday(4, 2)
assert day(-3) is day
assert day(None) == Weekday(4)
|
|
857750c5f2fba568c9ad3320d06b4178457be612
|
uwsgi/hello.py
|
uwsgi/hello.py
|
import ujson
def application(environ, start_response):
response = {
"message": "Hello, World!"
}
data = ujson.dumps(response)
response_headers = [
('Content-type', 'text/plain'),
('Content-Length', str(len(data)))
]
start_response('200 OK', response_headers)
return [data]
|
import ujson
def application(environ, start_response):
response = {
"message": "Hello, World!"
}
data = ujson.dumps(response)
response_headers = [
('Content-type', 'application/json'),
('Content-Length', str(len(data)))
]
start_response('200 OK', response_headers)
return [data]
|
Fix test to use proper Content-type for json test
|
uwsgi: Fix test to use proper Content-type for json test
|
Python
|
bsd-3-clause
|
nbrady-techempower/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,valyala/FrameworkBenchmarks,doom369/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,zapov/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,testn/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,dmacd/FB-try1,mfirry/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,Verber/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,grob/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,actframework/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,zapov/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,methane/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,khellang/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,doom369/FrameworkBenchmarks,doom369/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,grob/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,khellang/FrameworkBenchmarks,doom369/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,khellang/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,testn/FrameworkBenchmarks,Verber/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,herloct/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,denkab/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,valyala/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,methane/FrameworkBenchmarks,grob/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,Verber/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,Verber/FrameworkBenchmarks,zloster/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,doom369/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,sgml/FrameworkBenchmarks,valyala/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Verber/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,zapov/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,joshk/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,actframework/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,sxend/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jamming/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,torhve/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,joshk/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,torhve/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,denkab/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jamming/FrameworkBenchmarks,methane/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,valyala/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,sxend/FrameworkBenchmarks,dmacd/FB-try1,mfirry/FrameworkBenchmarks,joshk/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,valyala/FrameworkBenchmarks,jamming/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,zloster/FrameworkBenchmarks,grob/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jamming/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,valyala/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,torhve/FrameworkBenchmarks,denkab/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,zapov/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,doom369/FrameworkBenchmarks,doom369/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,doom369/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,denkab/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,zloster/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,doom369/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,denkab/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jamming/FrameworkBenchmarks,sxend/FrameworkBenchmarks,sgml/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,Verber/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,actframework/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sgml/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,jamming/FrameworkBenchmarks,sgml/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,methane/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,valyala/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,dmacd/FB-try1,kostya-sh/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,herloct/FrameworkBenchmarks,doom369/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,dmacd/FB-try1,testn/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,methane/FrameworkBenchmarks,khellang/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,herloct/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,actframework/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,zloster/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,herloct/FrameworkBenchmarks,dmacd/FB-try1,s-ludwig/FrameworkBenchmarks,zloster/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,leafo/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,jamming/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,testn/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,actframework/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,methane/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,sgml/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,dmacd/FB-try1,greenlaw110/FrameworkBenchmarks,dmacd/FB-try1,F3Community/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,sxend/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,herloct/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,grob/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,doom369/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,sgml/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,Verber/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,herloct/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,zapov/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,joshk/FrameworkBenchmarks,methane/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,joshk/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,Verber/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,methane/FrameworkBenchmarks,methane/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,testn/FrameworkBenchmarks,leafo/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,actframework/FrameworkBenchmarks,grob/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,jamming/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,doom369/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Verber/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,leafo/FrameworkBenchmarks,joshk/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,torhve/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,herloct/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jamming/FrameworkBenchmarks,zapov/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,khellang/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,denkab/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,Verber/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,testn/FrameworkBenchmarks,khellang/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,sxend/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,joshk/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,zapov/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,herloct/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,methane/FrameworkBenchmarks,leafo/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,sgml/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,sxend/FrameworkBenchmarks,sxend/FrameworkBenchmarks,khellang/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,grob/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,joshk/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,sgml/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,khellang/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,sgml/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,valyala/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,methane/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,sxend/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,torhve/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sgml/FrameworkBenchmarks,Verber/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,leafo/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,torhve/FrameworkBenchmarks,actframework/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,torhve/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,dmacd/FB-try1,zane-techempower/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,jamming/FrameworkBenchmarks,sgml/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jamming/FrameworkBenchmarks,zloster/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,denkab/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,doom369/FrameworkBenchmarks,zloster/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,actframework/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,joshk/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,jamming/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,testn/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,doom369/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,torhve/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,zapov/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,zloster/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,grob/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,leafo/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,jamming/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,denkab/FrameworkBenchmarks,herloct/FrameworkBenchmarks,actframework/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,leafo/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,grob/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,denkab/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,torhve/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,denkab/FrameworkBenchmarks,khellang/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,testn/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,zloster/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,Verber/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,khellang/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,grob/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,zapov/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,testn/FrameworkBenchmarks,leafo/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,denkab/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,khellang/FrameworkBenchmarks,khellang/FrameworkBenchmarks,Verber/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,zapov/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,sxend/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,doom369/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,testn/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,valyala/FrameworkBenchmarks,Verber/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,joshk/FrameworkBenchmarks,torhve/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,testn/FrameworkBenchmarks,zapov/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,testn/FrameworkBenchmarks,sxend/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,grob/FrameworkBenchmarks,dmacd/FB-try1,victorbriz/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,herloct/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,grob/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,valyala/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,doom369/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,joshk/FrameworkBenchmarks,actframework/FrameworkBenchmarks,herloct/FrameworkBenchmarks,zapov/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,dmacd/FB-try1,martin-g/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,methane/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sgml/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,leafo/FrameworkBenchmarks,leafo/FrameworkBenchmarks,valyala/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,doom369/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,leafo/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,zapov/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,leafo/FrameworkBenchmarks,sxend/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,denkab/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,valyala/FrameworkBenchmarks,zloster/FrameworkBenchmarks,torhve/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,sxend/FrameworkBenchmarks,dmacd/FB-try1,donovanmuller/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sxend/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,testn/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,khellang/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,khellang/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,testn/FrameworkBenchmarks,sgml/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,actframework/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,actframework/FrameworkBenchmarks,grob/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,zapov/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,grob/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,valyala/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,actframework/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,herloct/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,valyala/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,zapov/FrameworkBenchmarks,methane/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,denkab/FrameworkBenchmarks,jamming/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,denkab/FrameworkBenchmarks,dmacd/FB-try1,ashawnbandy-te-tfb/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,methane/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,herloct/FrameworkBenchmarks,torhve/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,actframework/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,joshk/FrameworkBenchmarks,sgml/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks
|
import ujson
def application(environ, start_response):
response = {
"message": "Hello, World!"
}
data = ujson.dumps(response)
response_headers = [
('Content-type', 'text/plain'),
('Content-Length', str(len(data)))
]
start_response('200 OK', response_headers)
return [data]
uwsgi: Fix test to use proper Content-type for json test
|
import ujson
def application(environ, start_response):
response = {
"message": "Hello, World!"
}
data = ujson.dumps(response)
response_headers = [
('Content-type', 'application/json'),
('Content-Length', str(len(data)))
]
start_response('200 OK', response_headers)
return [data]
|
<commit_before>import ujson
def application(environ, start_response):
response = {
"message": "Hello, World!"
}
data = ujson.dumps(response)
response_headers = [
('Content-type', 'text/plain'),
('Content-Length', str(len(data)))
]
start_response('200 OK', response_headers)
return [data]
<commit_msg>uwsgi: Fix test to use proper Content-type for json test<commit_after>
|
import ujson
def application(environ, start_response):
response = {
"message": "Hello, World!"
}
data = ujson.dumps(response)
response_headers = [
('Content-type', 'application/json'),
('Content-Length', str(len(data)))
]
start_response('200 OK', response_headers)
return [data]
|
import ujson
def application(environ, start_response):
response = {
"message": "Hello, World!"
}
data = ujson.dumps(response)
response_headers = [
('Content-type', 'text/plain'),
('Content-Length', str(len(data)))
]
start_response('200 OK', response_headers)
return [data]
uwsgi: Fix test to use proper Content-type for json testimport ujson
def application(environ, start_response):
response = {
"message": "Hello, World!"
}
data = ujson.dumps(response)
response_headers = [
('Content-type', 'application/json'),
('Content-Length', str(len(data)))
]
start_response('200 OK', response_headers)
return [data]
|
<commit_before>import ujson
def application(environ, start_response):
response = {
"message": "Hello, World!"
}
data = ujson.dumps(response)
response_headers = [
('Content-type', 'text/plain'),
('Content-Length', str(len(data)))
]
start_response('200 OK', response_headers)
return [data]
<commit_msg>uwsgi: Fix test to use proper Content-type for json test<commit_after>import ujson
def application(environ, start_response):
response = {
"message": "Hello, World!"
}
data = ujson.dumps(response)
response_headers = [
('Content-type', 'application/json'),
('Content-Length', str(len(data)))
]
start_response('200 OK', response_headers)
return [data]
|
212aaed11103a9442745715ae88573fa8fcf3a2c
|
trac/upgrades/db43.py
|
trac/upgrades/db43.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from trac.upgrades import backup_config_file
from trac.util.text import printout
from trac.util.translation import _
_old_default = ['ReadonlyWikiPolicy', 'DefaultPermissionPolicy',
'LegacyAttachmentPolicy']
_new_default = ['DefaultWikiPolicy', 'DefaultTicketPolicy',
'DefaultPermissionPolicy', 'LegacyAttachmentPolicy']
def do_upgrade(env, version, cursor):
"""Automatically append DefaultWikiPolicy and DefaultTicketPolicy
if permission_policies is the default value. Otherwise, echo a message
about the need to manually add DefaultWikiPolicy and DefaultTicketPolicy
to the list of permission_policies.
"""
policies = env.config.getlist('trac', 'permission_policies')
if policies == _old_default:
backup_config_file(env, '.db43.bak')
env.config.set('trac', 'permission_policies', ', '.join(_new_default))
env.config.save()
env.log.info("Enabled DefaultWikiPolicy and DefaultTicketPolicy.")
else:
print_upgrade_msg = False
if 'DefaultWikiPolicy' not in policies:
env.log.info("DefaultWikiPolicy must be manually enabled.")
# TRANSLATOR: Wrap message to 80 columns
printout(_("""\
Notice: To enable the default permission policy for the *wiki* system,
trac.ini must be manually edited to add DefaultWikiPolicy to the list
of permission_policies in the [trac] section.
"""))
print_upgrade_msg = True
else:
env.log.info("DefaultWikiPolicy already enabled.")
if 'DefaultTicketPolicy' not in policies:
env.log.info("DefaultTicketPolicy must be manually enabled.")
# TRANSLATOR: Wrap message to 80 columns
printout(_("""\
Notice: To enable the default permission policy for the *ticket* system,
trac.ini must be manually edited to add DefaultTicketPolicy to the list
of permission_policies in the [trac] section.
"""))
print_upgrade_msg = True
else:
env.log.info("DefaultTicketPolicy already enabled.")
if print_upgrade_msg:
printout(_("""\
For more details see: http://trac.edgewall.org/wiki/TracUpgrade
"""))
|
Add upgrade script missing from r15749
|
1.3.2dev: Add upgrade script missing from r15749
Refs #12719.
git-svn-id: 0d96b0c1a6983ccc08b3732614f4d6bfcf9cbb42@15765 af82e41b-90c4-0310-8c96-b1721e28e2e2
|
Python
|
bsd-3-clause
|
rbaumg/trac,rbaumg/trac,rbaumg/trac,rbaumg/trac
|
1.3.2dev: Add upgrade script missing from r15749
Refs #12719.
git-svn-id: 0d96b0c1a6983ccc08b3732614f4d6bfcf9cbb42@15765 af82e41b-90c4-0310-8c96-b1721e28e2e2
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from trac.upgrades import backup_config_file
from trac.util.text import printout
from trac.util.translation import _
_old_default = ['ReadonlyWikiPolicy', 'DefaultPermissionPolicy',
'LegacyAttachmentPolicy']
_new_default = ['DefaultWikiPolicy', 'DefaultTicketPolicy',
'DefaultPermissionPolicy', 'LegacyAttachmentPolicy']
def do_upgrade(env, version, cursor):
"""Automatically append DefaultWikiPolicy and DefaultTicketPolicy
if permission_policies is the default value. Otherwise, echo a message
about the need to manually add DefaultWikiPolicy and DefaultTicketPolicy
to the list of permission_policies.
"""
policies = env.config.getlist('trac', 'permission_policies')
if policies == _old_default:
backup_config_file(env, '.db43.bak')
env.config.set('trac', 'permission_policies', ', '.join(_new_default))
env.config.save()
env.log.info("Enabled DefaultWikiPolicy and DefaultTicketPolicy.")
else:
print_upgrade_msg = False
if 'DefaultWikiPolicy' not in policies:
env.log.info("DefaultWikiPolicy must be manually enabled.")
# TRANSLATOR: Wrap message to 80 columns
printout(_("""\
Notice: To enable the default permission policy for the *wiki* system,
trac.ini must be manually edited to add DefaultWikiPolicy to the list
of permission_policies in the [trac] section.
"""))
print_upgrade_msg = True
else:
env.log.info("DefaultWikiPolicy already enabled.")
if 'DefaultTicketPolicy' not in policies:
env.log.info("DefaultTicketPolicy must be manually enabled.")
# TRANSLATOR: Wrap message to 80 columns
printout(_("""\
Notice: To enable the default permission policy for the *ticket* system,
trac.ini must be manually edited to add DefaultTicketPolicy to the list
of permission_policies in the [trac] section.
"""))
print_upgrade_msg = True
else:
env.log.info("DefaultTicketPolicy already enabled.")
if print_upgrade_msg:
printout(_("""\
For more details see: http://trac.edgewall.org/wiki/TracUpgrade
"""))
|
<commit_before><commit_msg>1.3.2dev: Add upgrade script missing from r15749
Refs #12719.
git-svn-id: 0d96b0c1a6983ccc08b3732614f4d6bfcf9cbb42@15765 af82e41b-90c4-0310-8c96-b1721e28e2e2<commit_after>
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from trac.upgrades import backup_config_file
from trac.util.text import printout
from trac.util.translation import _
_old_default = ['ReadonlyWikiPolicy', 'DefaultPermissionPolicy',
'LegacyAttachmentPolicy']
_new_default = ['DefaultWikiPolicy', 'DefaultTicketPolicy',
'DefaultPermissionPolicy', 'LegacyAttachmentPolicy']
def do_upgrade(env, version, cursor):
"""Automatically append DefaultWikiPolicy and DefaultTicketPolicy
if permission_policies is the default value. Otherwise, echo a message
about the need to manually add DefaultWikiPolicy and DefaultTicketPolicy
to the list of permission_policies.
"""
policies = env.config.getlist('trac', 'permission_policies')
if policies == _old_default:
backup_config_file(env, '.db43.bak')
env.config.set('trac', 'permission_policies', ', '.join(_new_default))
env.config.save()
env.log.info("Enabled DefaultWikiPolicy and DefaultTicketPolicy.")
else:
print_upgrade_msg = False
if 'DefaultWikiPolicy' not in policies:
env.log.info("DefaultWikiPolicy must be manually enabled.")
# TRANSLATOR: Wrap message to 80 columns
printout(_("""\
Notice: To enable the default permission policy for the *wiki* system,
trac.ini must be manually edited to add DefaultWikiPolicy to the list
of permission_policies in the [trac] section.
"""))
print_upgrade_msg = True
else:
env.log.info("DefaultWikiPolicy already enabled.")
if 'DefaultTicketPolicy' not in policies:
env.log.info("DefaultTicketPolicy must be manually enabled.")
# TRANSLATOR: Wrap message to 80 columns
printout(_("""\
Notice: To enable the default permission policy for the *ticket* system,
trac.ini must be manually edited to add DefaultTicketPolicy to the list
of permission_policies in the [trac] section.
"""))
print_upgrade_msg = True
else:
env.log.info("DefaultTicketPolicy already enabled.")
if print_upgrade_msg:
printout(_("""\
For more details see: http://trac.edgewall.org/wiki/TracUpgrade
"""))
|
1.3.2dev: Add upgrade script missing from r15749
Refs #12719.
git-svn-id: 0d96b0c1a6983ccc08b3732614f4d6bfcf9cbb42@15765 af82e41b-90c4-0310-8c96-b1721e28e2e2# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from trac.upgrades import backup_config_file
from trac.util.text import printout
from trac.util.translation import _
_old_default = ['ReadonlyWikiPolicy', 'DefaultPermissionPolicy',
'LegacyAttachmentPolicy']
_new_default = ['DefaultWikiPolicy', 'DefaultTicketPolicy',
'DefaultPermissionPolicy', 'LegacyAttachmentPolicy']
def do_upgrade(env, version, cursor):
"""Automatically append DefaultWikiPolicy and DefaultTicketPolicy
if permission_policies is the default value. Otherwise, echo a message
about the need to manually add DefaultWikiPolicy and DefaultTicketPolicy
to the list of permission_policies.
"""
policies = env.config.getlist('trac', 'permission_policies')
if policies == _old_default:
backup_config_file(env, '.db43.bak')
env.config.set('trac', 'permission_policies', ', '.join(_new_default))
env.config.save()
env.log.info("Enabled DefaultWikiPolicy and DefaultTicketPolicy.")
else:
print_upgrade_msg = False
if 'DefaultWikiPolicy' not in policies:
env.log.info("DefaultWikiPolicy must be manually enabled.")
# TRANSLATOR: Wrap message to 80 columns
printout(_("""\
Notice: To enable the default permission policy for the *wiki* system,
trac.ini must be manually edited to add DefaultWikiPolicy to the list
of permission_policies in the [trac] section.
"""))
print_upgrade_msg = True
else:
env.log.info("DefaultWikiPolicy already enabled.")
if 'DefaultTicketPolicy' not in policies:
env.log.info("DefaultTicketPolicy must be manually enabled.")
# TRANSLATOR: Wrap message to 80 columns
printout(_("""\
Notice: To enable the default permission policy for the *ticket* system,
trac.ini must be manually edited to add DefaultTicketPolicy to the list
of permission_policies in the [trac] section.
"""))
print_upgrade_msg = True
else:
env.log.info("DefaultTicketPolicy already enabled.")
if print_upgrade_msg:
printout(_("""\
For more details see: http://trac.edgewall.org/wiki/TracUpgrade
"""))
|
<commit_before><commit_msg>1.3.2dev: Add upgrade script missing from r15749
Refs #12719.
git-svn-id: 0d96b0c1a6983ccc08b3732614f4d6bfcf9cbb42@15765 af82e41b-90c4-0310-8c96-b1721e28e2e2<commit_after># -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from trac.upgrades import backup_config_file
from trac.util.text import printout
from trac.util.translation import _
_old_default = ['ReadonlyWikiPolicy', 'DefaultPermissionPolicy',
'LegacyAttachmentPolicy']
_new_default = ['DefaultWikiPolicy', 'DefaultTicketPolicy',
'DefaultPermissionPolicy', 'LegacyAttachmentPolicy']
def do_upgrade(env, version, cursor):
"""Automatically append DefaultWikiPolicy and DefaultTicketPolicy
if permission_policies is the default value. Otherwise, echo a message
about the need to manually add DefaultWikiPolicy and DefaultTicketPolicy
to the list of permission_policies.
"""
policies = env.config.getlist('trac', 'permission_policies')
if policies == _old_default:
backup_config_file(env, '.db43.bak')
env.config.set('trac', 'permission_policies', ', '.join(_new_default))
env.config.save()
env.log.info("Enabled DefaultWikiPolicy and DefaultTicketPolicy.")
else:
print_upgrade_msg = False
if 'DefaultWikiPolicy' not in policies:
env.log.info("DefaultWikiPolicy must be manually enabled.")
# TRANSLATOR: Wrap message to 80 columns
printout(_("""\
Notice: To enable the default permission policy for the *wiki* system,
trac.ini must be manually edited to add DefaultWikiPolicy to the list
of permission_policies in the [trac] section.
"""))
print_upgrade_msg = True
else:
env.log.info("DefaultWikiPolicy already enabled.")
if 'DefaultTicketPolicy' not in policies:
env.log.info("DefaultTicketPolicy must be manually enabled.")
# TRANSLATOR: Wrap message to 80 columns
printout(_("""\
Notice: To enable the default permission policy for the *ticket* system,
trac.ini must be manually edited to add DefaultTicketPolicy to the list
of permission_policies in the [trac] section.
"""))
print_upgrade_msg = True
else:
env.log.info("DefaultTicketPolicy already enabled.")
if print_upgrade_msg:
printout(_("""\
For more details see: http://trac.edgewall.org/wiki/TracUpgrade
"""))
|
|
d5bc55e4e643247d959c0e6035f184473c600346
|
pydump.py
|
pydump.py
|
import ConfigParser
import os
import time
import getpass
def get_dump():
print "Enter user:"
user = raw_input()
print "Password will not be visible:"
password = getpass.getpass()
print "Enter host:"
host = raw_input()
print "Enter database name:"
database = raw_input()
filestamp = time.strftime('%Y-%m-%d-%I:%M')
os.popen("mysqldump -u %s -p%s -h %s -e --opt -c %s | gzip -c > %s.gz" % (user,password,host,database,database+"_"+filestamp))
print "\n-- please have a the dump file in "+database+"_"+filestamp+".gz --"
if __name__=="__main__":
get_dump()
|
Add script for taking mysqldump
|
Add script for taking mysqldump
|
Python
|
mit
|
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
|
Add script for taking mysqldump
|
import ConfigParser
import os
import time
import getpass
def get_dump():
print "Enter user:"
user = raw_input()
print "Password will not be visible:"
password = getpass.getpass()
print "Enter host:"
host = raw_input()
print "Enter database name:"
database = raw_input()
filestamp = time.strftime('%Y-%m-%d-%I:%M')
os.popen("mysqldump -u %s -p%s -h %s -e --opt -c %s | gzip -c > %s.gz" % (user,password,host,database,database+"_"+filestamp))
print "\n-- please have a the dump file in "+database+"_"+filestamp+".gz --"
if __name__=="__main__":
get_dump()
|
<commit_before><commit_msg>Add script for taking mysqldump<commit_after>
|
import ConfigParser
import os
import time
import getpass
def get_dump():
print "Enter user:"
user = raw_input()
print "Password will not be visible:"
password = getpass.getpass()
print "Enter host:"
host = raw_input()
print "Enter database name:"
database = raw_input()
filestamp = time.strftime('%Y-%m-%d-%I:%M')
os.popen("mysqldump -u %s -p%s -h %s -e --opt -c %s | gzip -c > %s.gz" % (user,password,host,database,database+"_"+filestamp))
print "\n-- please have a the dump file in "+database+"_"+filestamp+".gz --"
if __name__=="__main__":
get_dump()
|
Add script for taking mysqldumpimport ConfigParser
import os
import time
import getpass
def get_dump():
print "Enter user:"
user = raw_input()
print "Password will not be visible:"
password = getpass.getpass()
print "Enter host:"
host = raw_input()
print "Enter database name:"
database = raw_input()
filestamp = time.strftime('%Y-%m-%d-%I:%M')
os.popen("mysqldump -u %s -p%s -h %s -e --opt -c %s | gzip -c > %s.gz" % (user,password,host,database,database+"_"+filestamp))
print "\n-- please have a the dump file in "+database+"_"+filestamp+".gz --"
if __name__=="__main__":
get_dump()
|
<commit_before><commit_msg>Add script for taking mysqldump<commit_after>import ConfigParser
import os
import time
import getpass
def get_dump():
print "Enter user:"
user = raw_input()
print "Password will not be visible:"
password = getpass.getpass()
print "Enter host:"
host = raw_input()
print "Enter database name:"
database = raw_input()
filestamp = time.strftime('%Y-%m-%d-%I:%M')
os.popen("mysqldump -u %s -p%s -h %s -e --opt -c %s | gzip -c > %s.gz" % (user,password,host,database,database+"_"+filestamp))
print "\n-- please have a the dump file in "+database+"_"+filestamp+".gz --"
if __name__=="__main__":
get_dump()
|
|
4510eccddc5fbb7fef16a4702112545346d171f3
|
heat/utils.py
|
heat/utils.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from heat.common import exception
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
SUCCESS = 0
FAILURE = 1
def catch_error(action):
"""Decorator to provide sensible default error handling for CLI actions."""
def wrap(func):
@functools.wraps(func)
def wrapper(*arguments, **kwargs):
try:
ret = func(*arguments, **kwargs)
return SUCCESS if ret is None else ret
except exception.NotAuthorized:
LOG.error("Not authorized to make this request. Check " +
"your credentials (OS_USERNAME, OS_PASSWORD, " +
"OS_TENANT_NAME, OS_AUTH_URL and OS_AUTH_STRATEGY).")
return FAILURE
except exception.ClientConfigurationError:
raise
except exception.KeystoneError, e:
LOG.error("Keystone did not finish the authentication and "
"returned the following message:\n\n%s"
% e.message)
return FAILURE
except Exception, e:
options = arguments[0]
if options.debug:
raise
LOG.error("Failed to %s. Got error:" % action)
pieces = unicode(e).split('\n')
for piece in pieces:
LOG.error(piece)
return FAILURE
return wrapper
return wrap
|
Add back catch_error which is used for CLI errors
|
Add back catch_error which is used for CLI errors
Change-Id: Ib8b975d22950d5ba6aa9d5f150403a64356a8aa3
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com>
|
Python
|
apache-2.0
|
srznew/heat,dims/heat,noironetworks/heat,JioCloud/heat,rh-s/heat,pshchelo/heat,Triv90/Heat,dims/heat,miguelgrinberg/heat,pratikmallya/heat,dragorosson/heat,NeCTAR-RC/heat,rickerc/heat_audit,steveb/heat,cwolferh/heat-scratch,JioCloud/heat,ntt-sic/heat,varunarya10/heat,cryptickp/heat,redhat-openstack/heat,openstack/heat,varunarya10/heat,redhat-openstack/heat,gonzolino/heat,maestro-hybrid-cloud/heat,steveb/heat,miguelgrinberg/heat,rdo-management/heat,rickerc/heat_audit,jasondunsmore/heat,dragorosson/heat,Triv90/Heat,gonzolino/heat,citrix-openstack-build/heat,cryptickp/heat,citrix-openstack-build/heat,takeshineshiro/heat,rdo-management/heat,takeshineshiro/heat,pshchelo/heat,srznew/heat,openstack/heat,NeCTAR-RC/heat,rh-s/heat,jasondunsmore/heat,Triv90/Heat,ntt-sic/heat,maestro-hybrid-cloud/heat,cwolferh/heat-scratch,pratikmallya/heat,noironetworks/heat
|
Add back catch_error which is used for CLI errors
Change-Id: Ib8b975d22950d5ba6aa9d5f150403a64356a8aa3
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from heat.common import exception
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
SUCCESS = 0
FAILURE = 1
def catch_error(action):
"""Decorator to provide sensible default error handling for CLI actions."""
def wrap(func):
@functools.wraps(func)
def wrapper(*arguments, **kwargs):
try:
ret = func(*arguments, **kwargs)
return SUCCESS if ret is None else ret
except exception.NotAuthorized:
LOG.error("Not authorized to make this request. Check " +
"your credentials (OS_USERNAME, OS_PASSWORD, " +
"OS_TENANT_NAME, OS_AUTH_URL and OS_AUTH_STRATEGY).")
return FAILURE
except exception.ClientConfigurationError:
raise
except exception.KeystoneError, e:
LOG.error("Keystone did not finish the authentication and "
"returned the following message:\n\n%s"
% e.message)
return FAILURE
except Exception, e:
options = arguments[0]
if options.debug:
raise
LOG.error("Failed to %s. Got error:" % action)
pieces = unicode(e).split('\n')
for piece in pieces:
LOG.error(piece)
return FAILURE
return wrapper
return wrap
|
<commit_before><commit_msg>Add back catch_error which is used for CLI errors
Change-Id: Ib8b975d22950d5ba6aa9d5f150403a64356a8aa3
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com><commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from heat.common import exception
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
SUCCESS = 0
FAILURE = 1
def catch_error(action):
"""Decorator to provide sensible default error handling for CLI actions."""
def wrap(func):
@functools.wraps(func)
def wrapper(*arguments, **kwargs):
try:
ret = func(*arguments, **kwargs)
return SUCCESS if ret is None else ret
except exception.NotAuthorized:
LOG.error("Not authorized to make this request. Check " +
"your credentials (OS_USERNAME, OS_PASSWORD, " +
"OS_TENANT_NAME, OS_AUTH_URL and OS_AUTH_STRATEGY).")
return FAILURE
except exception.ClientConfigurationError:
raise
except exception.KeystoneError, e:
LOG.error("Keystone did not finish the authentication and "
"returned the following message:\n\n%s"
% e.message)
return FAILURE
except Exception, e:
options = arguments[0]
if options.debug:
raise
LOG.error("Failed to %s. Got error:" % action)
pieces = unicode(e).split('\n')
for piece in pieces:
LOG.error(piece)
return FAILURE
return wrapper
return wrap
|
Add back catch_error which is used for CLI errors
Change-Id: Ib8b975d22950d5ba6aa9d5f150403a64356a8aa3
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from heat.common import exception
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
SUCCESS = 0
FAILURE = 1
def catch_error(action):
"""Decorator to provide sensible default error handling for CLI actions."""
def wrap(func):
@functools.wraps(func)
def wrapper(*arguments, **kwargs):
try:
ret = func(*arguments, **kwargs)
return SUCCESS if ret is None else ret
except exception.NotAuthorized:
LOG.error("Not authorized to make this request. Check " +
"your credentials (OS_USERNAME, OS_PASSWORD, " +
"OS_TENANT_NAME, OS_AUTH_URL and OS_AUTH_STRATEGY).")
return FAILURE
except exception.ClientConfigurationError:
raise
except exception.KeystoneError, e:
LOG.error("Keystone did not finish the authentication and "
"returned the following message:\n\n%s"
% e.message)
return FAILURE
except Exception, e:
options = arguments[0]
if options.debug:
raise
LOG.error("Failed to %s. Got error:" % action)
pieces = unicode(e).split('\n')
for piece in pieces:
LOG.error(piece)
return FAILURE
return wrapper
return wrap
|
<commit_before><commit_msg>Add back catch_error which is used for CLI errors
Change-Id: Ib8b975d22950d5ba6aa9d5f150403a64356a8aa3
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com><commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from heat.common import exception
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
SUCCESS = 0
FAILURE = 1
def catch_error(action):
"""Decorator to provide sensible default error handling for CLI actions."""
def wrap(func):
@functools.wraps(func)
def wrapper(*arguments, **kwargs):
try:
ret = func(*arguments, **kwargs)
return SUCCESS if ret is None else ret
except exception.NotAuthorized:
LOG.error("Not authorized to make this request. Check " +
"your credentials (OS_USERNAME, OS_PASSWORD, " +
"OS_TENANT_NAME, OS_AUTH_URL and OS_AUTH_STRATEGY).")
return FAILURE
except exception.ClientConfigurationError:
raise
except exception.KeystoneError, e:
LOG.error("Keystone did not finish the authentication and "
"returned the following message:\n\n%s"
% e.message)
return FAILURE
except Exception, e:
options = arguments[0]
if options.debug:
raise
LOG.error("Failed to %s. Got error:" % action)
pieces = unicode(e).split('\n')
for piece in pieces:
LOG.error(piece)
return FAILURE
return wrapper
return wrap
|
|
6ac27ccc4a23b9af6555b624c16bc270aaf849ed
|
ntm/test/test_layers.py
|
ntm/test/test_layers.py
|
import pytest
import theano
import theano.tensor as T
import numpy as np
from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
from lasagne.layers import get_output, get_all_param_values, set_all_param_values
from ntm.layers import NTMLayer
from ntm.heads import WriteHead, ReadHead
from ntm.controllers import DenseController
from ntm.memory import Memory
def model(input_var, batch_size=1):
l_input = InputLayer((batch_size, None, 8), input_var=input_var)
_, seqlen, _ = l_input.input_var.shape
# Neural Turing Machine Layer
memory = Memory((128, 20), name='memory')
controller = DenseController(l_input, memory_shape=(128, 20),
num_units=100, num_reads=1, name='controller')
heads = [
WriteHead(controller, num_shifts=3, memory_shape=(128, 20), name='write'),
ReadHead(controller, num_shifts=3, memory_shape=(128, 20), name='read')
]
l_ntm = NTMLayer(l_input, memory=memory, controller=controller, heads=heads)
# Output Layer
l_output_reshape = ReshapeLayer(l_ntm, (-1, 100))
l_output_dense = DenseLayer(l_output_reshape, num_units=8, name='dense')
l_output = ReshapeLayer(l_output_dense, (batch_size, seqlen, 8))
return l_output
def test_batch_size():
input_var01, input_var16 = T.tensor3s('input01', 'input16')
l_output01 = model(input_var01, batch_size=1)
l_output16 = model(input_var16, batch_size=16)
# Share the parameters for both models
params01 = get_all_param_values(l_output01)
set_all_param_values(l_output16, params01)
posterior_fn01 = theano.function([input_var01], get_output(l_output01))
posterior_fn16 = theano.function([input_var16], get_output(l_output16))
example_input = np.random.rand(16, 30, 8)
example_output16 = posterior_fn16(example_input)
example_output01 = np.zeros_like(example_output16)
for i in range(16):
example_output01[i] = posterior_fn01(example_input[i][np.newaxis, :, :])
assert example_output16.shape == (16, 30, 8)
assert np.allclose(example_output16, example_output01, atol=1e-3)
|
Add test for size of batches
|
Add test for size of batches
|
Python
|
mit
|
snipsco/ntm-lasagne
|
Add test for size of batches
|
import pytest
import theano
import theano.tensor as T
import numpy as np
from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
from lasagne.layers import get_output, get_all_param_values, set_all_param_values
from ntm.layers import NTMLayer
from ntm.heads import WriteHead, ReadHead
from ntm.controllers import DenseController
from ntm.memory import Memory
def model(input_var, batch_size=1):
l_input = InputLayer((batch_size, None, 8), input_var=input_var)
_, seqlen, _ = l_input.input_var.shape
# Neural Turing Machine Layer
memory = Memory((128, 20), name='memory')
controller = DenseController(l_input, memory_shape=(128, 20),
num_units=100, num_reads=1, name='controller')
heads = [
WriteHead(controller, num_shifts=3, memory_shape=(128, 20), name='write'),
ReadHead(controller, num_shifts=3, memory_shape=(128, 20), name='read')
]
l_ntm = NTMLayer(l_input, memory=memory, controller=controller, heads=heads)
# Output Layer
l_output_reshape = ReshapeLayer(l_ntm, (-1, 100))
l_output_dense = DenseLayer(l_output_reshape, num_units=8, name='dense')
l_output = ReshapeLayer(l_output_dense, (batch_size, seqlen, 8))
return l_output
def test_batch_size():
input_var01, input_var16 = T.tensor3s('input01', 'input16')
l_output01 = model(input_var01, batch_size=1)
l_output16 = model(input_var16, batch_size=16)
# Share the parameters for both models
params01 = get_all_param_values(l_output01)
set_all_param_values(l_output16, params01)
posterior_fn01 = theano.function([input_var01], get_output(l_output01))
posterior_fn16 = theano.function([input_var16], get_output(l_output16))
example_input = np.random.rand(16, 30, 8)
example_output16 = posterior_fn16(example_input)
example_output01 = np.zeros_like(example_output16)
for i in range(16):
example_output01[i] = posterior_fn01(example_input[i][np.newaxis, :, :])
assert example_output16.shape == (16, 30, 8)
assert np.allclose(example_output16, example_output01, atol=1e-3)
|
<commit_before><commit_msg>Add test for size of batches<commit_after>
|
import pytest
import theano
import theano.tensor as T
import numpy as np
from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
from lasagne.layers import get_output, get_all_param_values, set_all_param_values
from ntm.layers import NTMLayer
from ntm.heads import WriteHead, ReadHead
from ntm.controllers import DenseController
from ntm.memory import Memory
def model(input_var, batch_size=1):
l_input = InputLayer((batch_size, None, 8), input_var=input_var)
_, seqlen, _ = l_input.input_var.shape
# Neural Turing Machine Layer
memory = Memory((128, 20), name='memory')
controller = DenseController(l_input, memory_shape=(128, 20),
num_units=100, num_reads=1, name='controller')
heads = [
WriteHead(controller, num_shifts=3, memory_shape=(128, 20), name='write'),
ReadHead(controller, num_shifts=3, memory_shape=(128, 20), name='read')
]
l_ntm = NTMLayer(l_input, memory=memory, controller=controller, heads=heads)
# Output Layer
l_output_reshape = ReshapeLayer(l_ntm, (-1, 100))
l_output_dense = DenseLayer(l_output_reshape, num_units=8, name='dense')
l_output = ReshapeLayer(l_output_dense, (batch_size, seqlen, 8))
return l_output
def test_batch_size():
input_var01, input_var16 = T.tensor3s('input01', 'input16')
l_output01 = model(input_var01, batch_size=1)
l_output16 = model(input_var16, batch_size=16)
# Share the parameters for both models
params01 = get_all_param_values(l_output01)
set_all_param_values(l_output16, params01)
posterior_fn01 = theano.function([input_var01], get_output(l_output01))
posterior_fn16 = theano.function([input_var16], get_output(l_output16))
example_input = np.random.rand(16, 30, 8)
example_output16 = posterior_fn16(example_input)
example_output01 = np.zeros_like(example_output16)
for i in range(16):
example_output01[i] = posterior_fn01(example_input[i][np.newaxis, :, :])
assert example_output16.shape == (16, 30, 8)
assert np.allclose(example_output16, example_output01, atol=1e-3)
|
Add test for size of batchesimport pytest
import theano
import theano.tensor as T
import numpy as np
from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
from lasagne.layers import get_output, get_all_param_values, set_all_param_values
from ntm.layers import NTMLayer
from ntm.heads import WriteHead, ReadHead
from ntm.controllers import DenseController
from ntm.memory import Memory
def model(input_var, batch_size=1):
l_input = InputLayer((batch_size, None, 8), input_var=input_var)
_, seqlen, _ = l_input.input_var.shape
# Neural Turing Machine Layer
memory = Memory((128, 20), name='memory')
controller = DenseController(l_input, memory_shape=(128, 20),
num_units=100, num_reads=1, name='controller')
heads = [
WriteHead(controller, num_shifts=3, memory_shape=(128, 20), name='write'),
ReadHead(controller, num_shifts=3, memory_shape=(128, 20), name='read')
]
l_ntm = NTMLayer(l_input, memory=memory, controller=controller, heads=heads)
# Output Layer
l_output_reshape = ReshapeLayer(l_ntm, (-1, 100))
l_output_dense = DenseLayer(l_output_reshape, num_units=8, name='dense')
l_output = ReshapeLayer(l_output_dense, (batch_size, seqlen, 8))
return l_output
def test_batch_size():
input_var01, input_var16 = T.tensor3s('input01', 'input16')
l_output01 = model(input_var01, batch_size=1)
l_output16 = model(input_var16, batch_size=16)
# Share the parameters for both models
params01 = get_all_param_values(l_output01)
set_all_param_values(l_output16, params01)
posterior_fn01 = theano.function([input_var01], get_output(l_output01))
posterior_fn16 = theano.function([input_var16], get_output(l_output16))
example_input = np.random.rand(16, 30, 8)
example_output16 = posterior_fn16(example_input)
example_output01 = np.zeros_like(example_output16)
for i in range(16):
example_output01[i] = posterior_fn01(example_input[i][np.newaxis, :, :])
assert example_output16.shape == (16, 30, 8)
assert np.allclose(example_output16, example_output01, atol=1e-3)
|
<commit_before><commit_msg>Add test for size of batches<commit_after>import pytest
import theano
import theano.tensor as T
import numpy as np
from lasagne.layers import InputLayer, ReshapeLayer, DenseLayer
from lasagne.layers import get_output, get_all_param_values, set_all_param_values
from ntm.layers import NTMLayer
from ntm.heads import WriteHead, ReadHead
from ntm.controllers import DenseController
from ntm.memory import Memory
def model(input_var, batch_size=1):
l_input = InputLayer((batch_size, None, 8), input_var=input_var)
_, seqlen, _ = l_input.input_var.shape
# Neural Turing Machine Layer
memory = Memory((128, 20), name='memory')
controller = DenseController(l_input, memory_shape=(128, 20),
num_units=100, num_reads=1, name='controller')
heads = [
WriteHead(controller, num_shifts=3, memory_shape=(128, 20), name='write'),
ReadHead(controller, num_shifts=3, memory_shape=(128, 20), name='read')
]
l_ntm = NTMLayer(l_input, memory=memory, controller=controller, heads=heads)
# Output Layer
l_output_reshape = ReshapeLayer(l_ntm, (-1, 100))
l_output_dense = DenseLayer(l_output_reshape, num_units=8, name='dense')
l_output = ReshapeLayer(l_output_dense, (batch_size, seqlen, 8))
return l_output
def test_batch_size():
input_var01, input_var16 = T.tensor3s('input01', 'input16')
l_output01 = model(input_var01, batch_size=1)
l_output16 = model(input_var16, batch_size=16)
# Share the parameters for both models
params01 = get_all_param_values(l_output01)
set_all_param_values(l_output16, params01)
posterior_fn01 = theano.function([input_var01], get_output(l_output01))
posterior_fn16 = theano.function([input_var16], get_output(l_output16))
example_input = np.random.rand(16, 30, 8)
example_output16 = posterior_fn16(example_input)
example_output01 = np.zeros_like(example_output16)
for i in range(16):
example_output01[i] = posterior_fn01(example_input[i][np.newaxis, :, :])
assert example_output16.shape == (16, 30, 8)
assert np.allclose(example_output16, example_output01, atol=1e-3)
|
|
97e5a157a56caf7a71ec7b51ecbbfef840bf24c7
|
Program1/Program1-NoLoops.py
|
Program1/Program1-NoLoops.py
|
#
# Explain what this program does
#
numberOne = int(input("Number? "))
action = input("Action (+, -, / or *)? ")
numberTwo = int(input("Number? "))
if action == "+":
print()
print(numberOne + numberTwo)
print()
elif action == "-":
print()
print(numberOne - numberTwo)
print()
elif action == "/":
print()
print(numberOne / numberTwo)
print()
elif action == "*":
print()
print(numberOne * numberTwo)
print()
else:
print()
print("Invalid action! Supported actions are +, -, / and *!")
print()
|
Add no loop version of Program1
|
Add no loop version of Program1
|
Python
|
mit
|
Mrcomputer1/SimplePythonPrograms
|
Add no loop version of Program1
|
#
# Explain what this program does
#
numberOne = int(input("Number? "))
action = input("Action (+, -, / or *)? ")
numberTwo = int(input("Number? "))
if action == "+":
print()
print(numberOne + numberTwo)
print()
elif action == "-":
print()
print(numberOne - numberTwo)
print()
elif action == "/":
print()
print(numberOne / numberTwo)
print()
elif action == "*":
print()
print(numberOne * numberTwo)
print()
else:
print()
print("Invalid action! Supported actions are +, -, / and *!")
print()
|
<commit_before><commit_msg>Add no loop version of Program1<commit_after>
|
#
# Explain what this program does
#
numberOne = int(input("Number? "))
action = input("Action (+, -, / or *)? ")
numberTwo = int(input("Number? "))
if action == "+":
print()
print(numberOne + numberTwo)
print()
elif action == "-":
print()
print(numberOne - numberTwo)
print()
elif action == "/":
print()
print(numberOne / numberTwo)
print()
elif action == "*":
print()
print(numberOne * numberTwo)
print()
else:
print()
print("Invalid action! Supported actions are +, -, / and *!")
print()
|
Add no loop version of Program1#
# Explain what this program does
#
numberOne = int(input("Number? "))
action = input("Action (+, -, / or *)? ")
numberTwo = int(input("Number? "))
if action == "+":
print()
print(numberOne + numberTwo)
print()
elif action == "-":
print()
print(numberOne - numberTwo)
print()
elif action == "/":
print()
print(numberOne / numberTwo)
print()
elif action == "*":
print()
print(numberOne * numberTwo)
print()
else:
print()
print("Invalid action! Supported actions are +, -, / and *!")
print()
|
<commit_before><commit_msg>Add no loop version of Program1<commit_after>#
# Explain what this program does
#
numberOne = int(input("Number? "))
action = input("Action (+, -, / or *)? ")
numberTwo = int(input("Number? "))
if action == "+":
print()
print(numberOne + numberTwo)
print()
elif action == "-":
print()
print(numberOne - numberTwo)
print()
elif action == "/":
print()
print(numberOne / numberTwo)
print()
elif action == "*":
print()
print(numberOne * numberTwo)
print()
else:
print()
print("Invalid action! Supported actions are +, -, / and *!")
print()
|
|
e47264d3bd45034e22923f793b9466114af6a32c
|
gmmp/management/commands/fix_countries.py
|
gmmp/management/commands/fix_countries.py
|
from django.core.management.base import BaseCommand
from django.db.models import F
from forms.models import sheet_models
class Command(BaseCommand):
def handle(self, *args, **options):
for name, model in sheet_models.iteritems():
country_errors_sheets = model.objects.exclude(monitor__country__in=F('country'))
for sheet in country_errors_sheets:
sheet.country = sheet.monitor.country
sheet.save()
self.stdout.write("%s %s" % (name, sheet.id))
|
Add management command to fix sheet countries
|
Add management command to fix sheet countries
|
Python
|
apache-2.0
|
Code4SA/gmmp,Code4SA/gmmp,Code4SA/gmmp
|
Add management command to fix sheet countries
|
from django.core.management.base import BaseCommand
from django.db.models import F
from forms.models import sheet_models
class Command(BaseCommand):
def handle(self, *args, **options):
for name, model in sheet_models.iteritems():
country_errors_sheets = model.objects.exclude(monitor__country__in=F('country'))
for sheet in country_errors_sheets:
sheet.country = sheet.monitor.country
sheet.save()
self.stdout.write("%s %s" % (name, sheet.id))
|
<commit_before><commit_msg>Add management command to fix sheet countries<commit_after>
|
from django.core.management.base import BaseCommand
from django.db.models import F
from forms.models import sheet_models
class Command(BaseCommand):
def handle(self, *args, **options):
for name, model in sheet_models.iteritems():
country_errors_sheets = model.objects.exclude(monitor__country__in=F('country'))
for sheet in country_errors_sheets:
sheet.country = sheet.monitor.country
sheet.save()
self.stdout.write("%s %s" % (name, sheet.id))
|
Add management command to fix sheet countriesfrom django.core.management.base import BaseCommand
from django.db.models import F
from forms.models import sheet_models
class Command(BaseCommand):
def handle(self, *args, **options):
for name, model in sheet_models.iteritems():
country_errors_sheets = model.objects.exclude(monitor__country__in=F('country'))
for sheet in country_errors_sheets:
sheet.country = sheet.monitor.country
sheet.save()
self.stdout.write("%s %s" % (name, sheet.id))
|
<commit_before><commit_msg>Add management command to fix sheet countries<commit_after>from django.core.management.base import BaseCommand
from django.db.models import F
from forms.models import sheet_models
class Command(BaseCommand):
def handle(self, *args, **options):
for name, model in sheet_models.iteritems():
country_errors_sheets = model.objects.exclude(monitor__country__in=F('country'))
for sheet in country_errors_sheets:
sheet.country = sheet.monitor.country
sheet.save()
self.stdout.write("%s %s" % (name, sheet.id))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.