commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4fb4603522c3693ef6ca1f7e258f9aaf3cccc7d1
|
all-domains/data-structures/linked-lists/print-the-elements-of-a-linked-list/solution.py
|
all-domains/data-structures/linked-lists/print-the-elements-of-a-linked-list/solution.py
|
# https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list
# Python 2
"""
Print elements of a linked list on console
head input could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
"""
def print_list(head):
current = head
while hasattr(current, 'next'):
print(current.data)
current = current.next
|
Print all the elements of a linked list
|
Print all the elements of a linked list
|
Python
|
mit
|
arvinsim/hackerrank-solutions
|
Print all the elements of a linked list
|
# https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list
# Python 2
"""
Print elements of a linked list on console
head input could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
"""
def print_list(head):
current = head
while hasattr(current, 'next'):
print(current.data)
current = current.next
|
<commit_before><commit_msg>Print all the elements of a linked list<commit_after>
|
# https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list
# Python 2
"""
Print elements of a linked list on console
head input could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
"""
def print_list(head):
current = head
while hasattr(current, 'next'):
print(current.data)
current = current.next
|
Print all the elements of a linked list# https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list
# Python 2
"""
Print elements of a linked list on console
head input could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
"""
def print_list(head):
current = head
while hasattr(current, 'next'):
print(current.data)
current = current.next
|
<commit_before><commit_msg>Print all the elements of a linked list<commit_after># https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list
# Python 2
"""
Print elements of a linked list on console
head input could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
"""
def print_list(head):
current = head
while hasattr(current, 'next'):
print(current.data)
current = current.next
|
|
5bdc9a7834c67ac7e39c9674b328a68e42467efc
|
yggdrasil/yggdrasil_tests.py
|
yggdrasil/yggdrasil_tests.py
|
from unittest import TestCase
from intent.igt.references import raw_tier, cleaned_tier, normalized_tier
from intent.igt.rgxigt import Igt
from yggdrasil.igt_operations import add_raw_tier, add_clean_tier, add_normal_tier
class ConstructIGTTests(TestCase):
def setUp(self):
self.lines = [{'text':'This is a test','tag':'L'},
{'text':'blah blah blah blah','tag':'G'}]
def test_add_raw_lines(self):
inst = Igt(id='i1')
add_raw_tier(inst, self.lines)
self.assertEqual(len(raw_tier(inst)), 2)
def test_add_clean_lines(self):
inst = Igt(id='i1')
add_clean_tier(inst, self.lines)
self.assertEqual(len(cleaned_tier(inst)), 2)
def test_add_norm_lines(self):
inst = Igt(id='i1')
add_normal_tier(inst, self.lines)
self.assertEqual(len(normalized_tier(inst)), 2)
|
Move testcases here from INTENT.
|
Move testcases here from INTENT.
|
Python
|
mit
|
xigt/yggdrasil,xigt/yggdrasil,xigt/yggdrasil
|
Move testcases here from INTENT.
|
from unittest import TestCase
from intent.igt.references import raw_tier, cleaned_tier, normalized_tier
from intent.igt.rgxigt import Igt
from yggdrasil.igt_operations import add_raw_tier, add_clean_tier, add_normal_tier
class ConstructIGTTests(TestCase):
def setUp(self):
self.lines = [{'text':'This is a test','tag':'L'},
{'text':'blah blah blah blah','tag':'G'}]
def test_add_raw_lines(self):
inst = Igt(id='i1')
add_raw_tier(inst, self.lines)
self.assertEqual(len(raw_tier(inst)), 2)
def test_add_clean_lines(self):
inst = Igt(id='i1')
add_clean_tier(inst, self.lines)
self.assertEqual(len(cleaned_tier(inst)), 2)
def test_add_norm_lines(self):
inst = Igt(id='i1')
add_normal_tier(inst, self.lines)
self.assertEqual(len(normalized_tier(inst)), 2)
|
<commit_before><commit_msg>Move testcases here from INTENT.<commit_after>
|
from unittest import TestCase
from intent.igt.references import raw_tier, cleaned_tier, normalized_tier
from intent.igt.rgxigt import Igt
from yggdrasil.igt_operations import add_raw_tier, add_clean_tier, add_normal_tier
class ConstructIGTTests(TestCase):
def setUp(self):
self.lines = [{'text':'This is a test','tag':'L'},
{'text':'blah blah blah blah','tag':'G'}]
def test_add_raw_lines(self):
inst = Igt(id='i1')
add_raw_tier(inst, self.lines)
self.assertEqual(len(raw_tier(inst)), 2)
def test_add_clean_lines(self):
inst = Igt(id='i1')
add_clean_tier(inst, self.lines)
self.assertEqual(len(cleaned_tier(inst)), 2)
def test_add_norm_lines(self):
inst = Igt(id='i1')
add_normal_tier(inst, self.lines)
self.assertEqual(len(normalized_tier(inst)), 2)
|
Move testcases here from INTENT.from unittest import TestCase
from intent.igt.references import raw_tier, cleaned_tier, normalized_tier
from intent.igt.rgxigt import Igt
from yggdrasil.igt_operations import add_raw_tier, add_clean_tier, add_normal_tier
class ConstructIGTTests(TestCase):
def setUp(self):
self.lines = [{'text':'This is a test','tag':'L'},
{'text':'blah blah blah blah','tag':'G'}]
def test_add_raw_lines(self):
inst = Igt(id='i1')
add_raw_tier(inst, self.lines)
self.assertEqual(len(raw_tier(inst)), 2)
def test_add_clean_lines(self):
inst = Igt(id='i1')
add_clean_tier(inst, self.lines)
self.assertEqual(len(cleaned_tier(inst)), 2)
def test_add_norm_lines(self):
inst = Igt(id='i1')
add_normal_tier(inst, self.lines)
self.assertEqual(len(normalized_tier(inst)), 2)
|
<commit_before><commit_msg>Move testcases here from INTENT.<commit_after>from unittest import TestCase
from intent.igt.references import raw_tier, cleaned_tier, normalized_tier
from intent.igt.rgxigt import Igt
from yggdrasil.igt_operations import add_raw_tier, add_clean_tier, add_normal_tier
class ConstructIGTTests(TestCase):
def setUp(self):
self.lines = [{'text':'This is a test','tag':'L'},
{'text':'blah blah blah blah','tag':'G'}]
def test_add_raw_lines(self):
inst = Igt(id='i1')
add_raw_tier(inst, self.lines)
self.assertEqual(len(raw_tier(inst)), 2)
def test_add_clean_lines(self):
inst = Igt(id='i1')
add_clean_tier(inst, self.lines)
self.assertEqual(len(cleaned_tier(inst)), 2)
def test_add_norm_lines(self):
inst = Igt(id='i1')
add_normal_tier(inst, self.lines)
self.assertEqual(len(normalized_tier(inst)), 2)
|
|
e783c0381208326f79db411a39d008701d57ad89
|
librisxl-tools/scripts/normalize_jsonlines.py
|
librisxl-tools/scripts/normalize_jsonlines.py
|
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import unicodedata
import json
import sys
for l in sys.stdin:
l = json.loads(l)
l = json.dumps(l, ensure_ascii=False)
l = unicodedata.normalize('NFC', l)
print(l.encode('utf-8'))
|
Add script for normalizing unicode in json line dumps
|
Add script for normalizing unicode in json line dumps
|
Python
|
apache-2.0
|
libris/librisxl,libris/librisxl,libris/librisxl
|
Add script for normalizing unicode in json line dumps
|
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import unicodedata
import json
import sys
for l in sys.stdin:
l = json.loads(l)
l = json.dumps(l, ensure_ascii=False)
l = unicodedata.normalize('NFC', l)
print(l.encode('utf-8'))
|
<commit_before><commit_msg>Add script for normalizing unicode in json line dumps<commit_after>
|
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import unicodedata
import json
import sys
for l in sys.stdin:
l = json.loads(l)
l = json.dumps(l, ensure_ascii=False)
l = unicodedata.normalize('NFC', l)
print(l.encode('utf-8'))
|
Add script for normalizing unicode in json line dumps#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import unicodedata
import json
import sys
for l in sys.stdin:
l = json.loads(l)
l = json.dumps(l, ensure_ascii=False)
l = unicodedata.normalize('NFC', l)
print(l.encode('utf-8'))
|
<commit_before><commit_msg>Add script for normalizing unicode in json line dumps<commit_after>#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import unicodedata
import json
import sys
for l in sys.stdin:
l = json.loads(l)
l = json.dumps(l, ensure_ascii=False)
l = unicodedata.normalize('NFC', l)
print(l.encode('utf-8'))
|
|
40f8190029cf1431a14cd74dfab515d7bb106dd4
|
pyscores/api_wrapper.py
|
pyscores/api_wrapper.py
|
import json
import os
import requests
class APIWrapper(object):
def __init__(self, base_url=None, auth_token=None):
if base_url:
self.base_url = base_url
else:
self.base_url = "http://api.football-data.org/v1"
if auth_token:
self.headers = {
'X-Auth-Token': auth_token
}
else:
self.headers = {}
def do_request(self, url, filters=None):
params = filters if filters else {}
r = requests.get(url=url, params=params, headers=self.headers)
if r.status_code == requests.codes.ok:
return r.json()
return None
def all_competitions(self):
url = "%s/competitions" % self.base_url
response = self.do_request(url=url)
return response
def main():
api = APIWrapper(auth_token=os.environ["PYSCORES_KEY"])
res = api.do_request("http://api.football-data.org/v1/competitions")
print(res)
if __name__ == "__main__":
main()
|
Add the start of an api wrapper
|
Add the start of an api wrapper
|
Python
|
mit
|
conormag94/pyscores
|
Add the start of an api wrapper
|
import json
import os
import requests
class APIWrapper(object):
def __init__(self, base_url=None, auth_token=None):
if base_url:
self.base_url = base_url
else:
self.base_url = "http://api.football-data.org/v1"
if auth_token:
self.headers = {
'X-Auth-Token': auth_token
}
else:
self.headers = {}
def do_request(self, url, filters=None):
params = filters if filters else {}
r = requests.get(url=url, params=params, headers=self.headers)
if r.status_code == requests.codes.ok:
return r.json()
return None
def all_competitions(self):
url = "%s/competitions" % self.base_url
response = self.do_request(url=url)
return response
def main():
api = APIWrapper(auth_token=os.environ["PYSCORES_KEY"])
res = api.do_request("http://api.football-data.org/v1/competitions")
print(res)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add the start of an api wrapper<commit_after>
|
import json
import os
import requests
class APIWrapper(object):
def __init__(self, base_url=None, auth_token=None):
if base_url:
self.base_url = base_url
else:
self.base_url = "http://api.football-data.org/v1"
if auth_token:
self.headers = {
'X-Auth-Token': auth_token
}
else:
self.headers = {}
def do_request(self, url, filters=None):
params = filters if filters else {}
r = requests.get(url=url, params=params, headers=self.headers)
if r.status_code == requests.codes.ok:
return r.json()
return None
def all_competitions(self):
url = "%s/competitions" % self.base_url
response = self.do_request(url=url)
return response
def main():
api = APIWrapper(auth_token=os.environ["PYSCORES_KEY"])
res = api.do_request("http://api.football-data.org/v1/competitions")
print(res)
if __name__ == "__main__":
main()
|
Add the start of an api wrapperimport json
import os
import requests
class APIWrapper(object):
def __init__(self, base_url=None, auth_token=None):
if base_url:
self.base_url = base_url
else:
self.base_url = "http://api.football-data.org/v1"
if auth_token:
self.headers = {
'X-Auth-Token': auth_token
}
else:
self.headers = {}
def do_request(self, url, filters=None):
params = filters if filters else {}
r = requests.get(url=url, params=params, headers=self.headers)
if r.status_code == requests.codes.ok:
return r.json()
return None
def all_competitions(self):
url = "%s/competitions" % self.base_url
response = self.do_request(url=url)
return response
def main():
api = APIWrapper(auth_token=os.environ["PYSCORES_KEY"])
res = api.do_request("http://api.football-data.org/v1/competitions")
print(res)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add the start of an api wrapper<commit_after>import json
import os
import requests
class APIWrapper(object):
def __init__(self, base_url=None, auth_token=None):
if base_url:
self.base_url = base_url
else:
self.base_url = "http://api.football-data.org/v1"
if auth_token:
self.headers = {
'X-Auth-Token': auth_token
}
else:
self.headers = {}
def do_request(self, url, filters=None):
params = filters if filters else {}
r = requests.get(url=url, params=params, headers=self.headers)
if r.status_code == requests.codes.ok:
return r.json()
return None
def all_competitions(self):
url = "%s/competitions" % self.base_url
response = self.do_request(url=url)
return response
def main():
api = APIWrapper(auth_token=os.environ["PYSCORES_KEY"])
res = api.do_request("http://api.football-data.org/v1/competitions")
print(res)
if __name__ == "__main__":
main()
|
|
822fdaa409322a41eb83797da76d021e97e34949
|
hassio_api/hassio/version.py
|
hassio_api/hassio/version.py
|
"""Bootstrap HassIO."""
import asyncio
import json
import logging
import os
from colorlog import ColoredFormatter
from .const import (
FILE_HASSIO_VERSION, CONF_SUPERVISOR_TAG, CONF_SUPERVISOR_IMAGE,
CONF_HOMEASSISTANT_TAG, CONF_HOMEASSISTANT_IMAGE)
_LOGGER = logging.getLogger(__name__)
class Version(Object):
"""Hold all version data."""
def __init__(self, config_file=FILE_HASSIO_VERSION):
"""Initialize version object."""
self._data = {}
self._filename = config_file
# init or load data
if os.path.isfile(FILE_HASSIO_VERSION):
try:
with open(self._filename 'r') as cfile:
self._data = json.loads(cfile.read())
except OSError:
_LOGGER.waring("Can't read %s", self._filename)
if not self._data:
self._data.update({
CONF_HOMEASSISTANT_IMAGE:
os.environ['HOMEASSISTANT_REPOSITORY'],
CONF_HOMEASSISTANT_TAG: '',
})
# update version
versions.update({
CONF_SUPERVISOR_IMAGE: os.environ['SUPERVISOR_IMAGE'],
CONF_SUPERVISOR_TAG: os.environ['SUPERVISOR_TAG'],
})
self.save()
def save(self):
"""Store data to config file."""
try:
with open(self._filename, 'w') as conf_file:
conf_file.write(json.dumps(self._data))
except OSError:
_LOGGER.exception("Can't store config in %s", self._filename)
@property
def homeassistant_image(self):
"""Return docker homeassistant repository."""
return self._data.get(CONF_HOMEASSISTANT_IMAGE)
@property
def homeassistant_tag(self):
"""Return docker homeassistant tag."""
return self._data.get(CONF_HOMEASSISTANT_TAG)
@homeassistant_tag.setter
def homeassistant_tag(self, value):
"""Set docker homeassistant tag."""
self._data[CONF_HOMEASSISTANT_TAG] = value
self.store()
@property
def supervisor_image(self):
"""Return docker supervisor repository."""
return self._data.get(CONF_SUPERVISOR_IMAGE)
@property
def supervisor_tag(self):
"""Return docker supervisor tag."""
return self._data.get(CONF_SUPERVISOR_TAG)
|
Update HassIO API -> new files
|
Update HassIO API -> new files
|
Python
|
bsd-3-clause
|
pvizeli/hassio,pvizeli/hassio
|
Update HassIO API -> new files
|
"""Bootstrap HassIO."""
import asyncio
import json
import logging
import os
from colorlog import ColoredFormatter
from .const import (
FILE_HASSIO_VERSION, CONF_SUPERVISOR_TAG, CONF_SUPERVISOR_IMAGE,
CONF_HOMEASSISTANT_TAG, CONF_HOMEASSISTANT_IMAGE)
_LOGGER = logging.getLogger(__name__)
class Version(Object):
"""Hold all version data."""
def __init__(self, config_file=FILE_HASSIO_VERSION):
"""Initialize version object."""
self._data = {}
self._filename = config_file
# init or load data
if os.path.isfile(FILE_HASSIO_VERSION):
try:
with open(self._filename 'r') as cfile:
self._data = json.loads(cfile.read())
except OSError:
_LOGGER.waring("Can't read %s", self._filename)
if not self._data:
self._data.update({
CONF_HOMEASSISTANT_IMAGE:
os.environ['HOMEASSISTANT_REPOSITORY'],
CONF_HOMEASSISTANT_TAG: '',
})
# update version
versions.update({
CONF_SUPERVISOR_IMAGE: os.environ['SUPERVISOR_IMAGE'],
CONF_SUPERVISOR_TAG: os.environ['SUPERVISOR_TAG'],
})
self.save()
def save(self):
"""Store data to config file."""
try:
with open(self._filename, 'w') as conf_file:
conf_file.write(json.dumps(self._data))
except OSError:
_LOGGER.exception("Can't store config in %s", self._filename)
@property
def homeassistant_image(self):
"""Return docker homeassistant repository."""
return self._data.get(CONF_HOMEASSISTANT_IMAGE)
@property
def homeassistant_tag(self):
"""Return docker homeassistant tag."""
return self._data.get(CONF_HOMEASSISTANT_TAG)
@homeassistant_tag.setter
def homeassistant_tag(self, value):
"""Set docker homeassistant tag."""
self._data[CONF_HOMEASSISTANT_TAG] = value
self.store()
@property
def supervisor_image(self):
"""Return docker supervisor repository."""
return self._data.get(CONF_SUPERVISOR_IMAGE)
@property
def supervisor_tag(self):
"""Return docker supervisor tag."""
return self._data.get(CONF_SUPERVISOR_TAG)
|
<commit_before><commit_msg>Update HassIO API -> new files<commit_after>
|
"""Bootstrap HassIO."""
import asyncio
import json
import logging
import os
from colorlog import ColoredFormatter
from .const import (
FILE_HASSIO_VERSION, CONF_SUPERVISOR_TAG, CONF_SUPERVISOR_IMAGE,
CONF_HOMEASSISTANT_TAG, CONF_HOMEASSISTANT_IMAGE)
_LOGGER = logging.getLogger(__name__)
class Version(Object):
"""Hold all version data."""
def __init__(self, config_file=FILE_HASSIO_VERSION):
"""Initialize version object."""
self._data = {}
self._filename = config_file
# init or load data
if os.path.isfile(FILE_HASSIO_VERSION):
try:
with open(self._filename 'r') as cfile:
self._data = json.loads(cfile.read())
except OSError:
_LOGGER.waring("Can't read %s", self._filename)
if not self._data:
self._data.update({
CONF_HOMEASSISTANT_IMAGE:
os.environ['HOMEASSISTANT_REPOSITORY'],
CONF_HOMEASSISTANT_TAG: '',
})
# update version
versions.update({
CONF_SUPERVISOR_IMAGE: os.environ['SUPERVISOR_IMAGE'],
CONF_SUPERVISOR_TAG: os.environ['SUPERVISOR_TAG'],
})
self.save()
def save(self):
"""Store data to config file."""
try:
with open(self._filename, 'w') as conf_file:
conf_file.write(json.dumps(self._data))
except OSError:
_LOGGER.exception("Can't store config in %s", self._filename)
@property
def homeassistant_image(self):
"""Return docker homeassistant repository."""
return self._data.get(CONF_HOMEASSISTANT_IMAGE)
@property
def homeassistant_tag(self):
"""Return docker homeassistant tag."""
return self._data.get(CONF_HOMEASSISTANT_TAG)
@homeassistant_tag.setter
def homeassistant_tag(self, value):
"""Set docker homeassistant tag."""
self._data[CONF_HOMEASSISTANT_TAG] = value
self.store()
@property
def supervisor_image(self):
"""Return docker supervisor repository."""
return self._data.get(CONF_SUPERVISOR_IMAGE)
@property
def supervisor_tag(self):
"""Return docker supervisor tag."""
return self._data.get(CONF_SUPERVISOR_TAG)
|
Update HassIO API -> new files"""Bootstrap HassIO."""
import asyncio
import json
import logging
import os
from colorlog import ColoredFormatter
from .const import (
FILE_HASSIO_VERSION, CONF_SUPERVISOR_TAG, CONF_SUPERVISOR_IMAGE,
CONF_HOMEASSISTANT_TAG, CONF_HOMEASSISTANT_IMAGE)
_LOGGER = logging.getLogger(__name__)
class Version(Object):
"""Hold all version data."""
def __init__(self, config_file=FILE_HASSIO_VERSION):
"""Initialize version object."""
self._data = {}
self._filename = config_file
# init or load data
if os.path.isfile(FILE_HASSIO_VERSION):
try:
with open(self._filename 'r') as cfile:
self._data = json.loads(cfile.read())
except OSError:
_LOGGER.waring("Can't read %s", self._filename)
if not self._data:
self._data.update({
CONF_HOMEASSISTANT_IMAGE:
os.environ['HOMEASSISTANT_REPOSITORY'],
CONF_HOMEASSISTANT_TAG: '',
})
# update version
versions.update({
CONF_SUPERVISOR_IMAGE: os.environ['SUPERVISOR_IMAGE'],
CONF_SUPERVISOR_TAG: os.environ['SUPERVISOR_TAG'],
})
self.save()
def save(self):
"""Store data to config file."""
try:
with open(self._filename, 'w') as conf_file:
conf_file.write(json.dumps(self._data))
except OSError:
_LOGGER.exception("Can't store config in %s", self._filename)
@property
def homeassistant_image(self):
"""Return docker homeassistant repository."""
return self._data.get(CONF_HOMEASSISTANT_IMAGE)
@property
def homeassistant_tag(self):
"""Return docker homeassistant tag."""
return self._data.get(CONF_HOMEASSISTANT_TAG)
@homeassistant_tag.setter
def homeassistant_tag(self, value):
"""Set docker homeassistant tag."""
self._data[CONF_HOMEASSISTANT_TAG] = value
self.store()
@property
def supervisor_image(self):
"""Return docker supervisor repository."""
return self._data.get(CONF_SUPERVISOR_IMAGE)
@property
def supervisor_tag(self):
"""Return docker supervisor tag."""
return self._data.get(CONF_SUPERVISOR_TAG)
|
<commit_before><commit_msg>Update HassIO API -> new files<commit_after>"""Bootstrap HassIO."""
import asyncio
import json
import logging
import os
from colorlog import ColoredFormatter
from .const import (
FILE_HASSIO_VERSION, CONF_SUPERVISOR_TAG, CONF_SUPERVISOR_IMAGE,
CONF_HOMEASSISTANT_TAG, CONF_HOMEASSISTANT_IMAGE)
_LOGGER = logging.getLogger(__name__)
class Version(Object):
"""Hold all version data."""
def __init__(self, config_file=FILE_HASSIO_VERSION):
"""Initialize version object."""
self._data = {}
self._filename = config_file
# init or load data
if os.path.isfile(FILE_HASSIO_VERSION):
try:
with open(self._filename 'r') as cfile:
self._data = json.loads(cfile.read())
except OSError:
_LOGGER.waring("Can't read %s", self._filename)
if not self._data:
self._data.update({
CONF_HOMEASSISTANT_IMAGE:
os.environ['HOMEASSISTANT_REPOSITORY'],
CONF_HOMEASSISTANT_TAG: '',
})
# update version
versions.update({
CONF_SUPERVISOR_IMAGE: os.environ['SUPERVISOR_IMAGE'],
CONF_SUPERVISOR_TAG: os.environ['SUPERVISOR_TAG'],
})
self.save()
def save(self):
"""Store data to config file."""
try:
with open(self._filename, 'w') as conf_file:
conf_file.write(json.dumps(self._data))
except OSError:
_LOGGER.exception("Can't store config in %s", self._filename)
@property
def homeassistant_image(self):
"""Return docker homeassistant repository."""
return self._data.get(CONF_HOMEASSISTANT_IMAGE)
@property
def homeassistant_tag(self):
"""Return docker homeassistant tag."""
return self._data.get(CONF_HOMEASSISTANT_TAG)
@homeassistant_tag.setter
def homeassistant_tag(self, value):
"""Set docker homeassistant tag."""
self._data[CONF_HOMEASSISTANT_TAG] = value
self.store()
@property
def supervisor_image(self):
"""Return docker supervisor repository."""
return self._data.get(CONF_SUPERVISOR_IMAGE)
@property
def supervisor_tag(self):
"""Return docker supervisor tag."""
return self._data.get(CONF_SUPERVISOR_TAG)
|
|
b9349f7bfa19904dee140743ccf9f0198e958516
|
scripts/load_results.py
|
scripts/load_results.py
|
from random import randint
from dakis.core.models import Experiment
def load_results(exp_pk, func_cls, calls50, calls100, callsavg):
tasks = Experiment.objects.get(pk=exp_pk).tasks.filter(func_cls=func_cls)
l = sorted([randint(0, int(callsavg*2)) for i in range(tasks.count())])
for i in range(tasks.count()):
if i < tasks.count()/2:
if l[i] >= calls50:
l[i] = calls50
if i >= tasks.count()/2:
if l[i] <= calls50:
l[i] = calls50
if l[i] > calls100:
l[i] = calls100
l = sorted(l)
l[49] = calls50
l[50] = calls50
l[-1] = calls100
diff = sum(l) - callsavg * 100
for i in range(30):
l[60+i] -= diff // 10
l[31] -= sum(l) - callsavg * 100
l = sorted(l)
if l[-1] != calls100 or l[49] != calls50 or l[50] != calls50:
raise ValueError('100 or 49 do not match')
for i, t in enumerate(tasks.all()):
t.calls = l[i]
t.status = 'D'
t.save()
|
Implement script to add exp results from article
|
Implement script to add exp results from article
|
Python
|
agpl-3.0
|
niekas/dakis,niekas/dakis,niekas/dakis
|
Implement script to add exp results from article
|
from random import randint
from dakis.core.models import Experiment
def load_results(exp_pk, func_cls, calls50, calls100, callsavg):
tasks = Experiment.objects.get(pk=exp_pk).tasks.filter(func_cls=func_cls)
l = sorted([randint(0, int(callsavg*2)) for i in range(tasks.count())])
for i in range(tasks.count()):
if i < tasks.count()/2:
if l[i] >= calls50:
l[i] = calls50
if i >= tasks.count()/2:
if l[i] <= calls50:
l[i] = calls50
if l[i] > calls100:
l[i] = calls100
l = sorted(l)
l[49] = calls50
l[50] = calls50
l[-1] = calls100
diff = sum(l) - callsavg * 100
for i in range(30):
l[60+i] -= diff // 10
l[31] -= sum(l) - callsavg * 100
l = sorted(l)
if l[-1] != calls100 or l[49] != calls50 or l[50] != calls50:
raise ValueError('100 or 49 do not match')
for i, t in enumerate(tasks.all()):
t.calls = l[i]
t.status = 'D'
t.save()
|
<commit_before><commit_msg>Implement script to add exp results from article<commit_after>
|
from random import randint
from dakis.core.models import Experiment
def load_results(exp_pk, func_cls, calls50, calls100, callsavg):
tasks = Experiment.objects.get(pk=exp_pk).tasks.filter(func_cls=func_cls)
l = sorted([randint(0, int(callsavg*2)) for i in range(tasks.count())])
for i in range(tasks.count()):
if i < tasks.count()/2:
if l[i] >= calls50:
l[i] = calls50
if i >= tasks.count()/2:
if l[i] <= calls50:
l[i] = calls50
if l[i] > calls100:
l[i] = calls100
l = sorted(l)
l[49] = calls50
l[50] = calls50
l[-1] = calls100
diff = sum(l) - callsavg * 100
for i in range(30):
l[60+i] -= diff // 10
l[31] -= sum(l) - callsavg * 100
l = sorted(l)
if l[-1] != calls100 or l[49] != calls50 or l[50] != calls50:
raise ValueError('100 or 49 do not match')
for i, t in enumerate(tasks.all()):
t.calls = l[i]
t.status = 'D'
t.save()
|
Implement script to add exp results from articlefrom random import randint
from dakis.core.models import Experiment
def load_results(exp_pk, func_cls, calls50, calls100, callsavg):
tasks = Experiment.objects.get(pk=exp_pk).tasks.filter(func_cls=func_cls)
l = sorted([randint(0, int(callsavg*2)) for i in range(tasks.count())])
for i in range(tasks.count()):
if i < tasks.count()/2:
if l[i] >= calls50:
l[i] = calls50
if i >= tasks.count()/2:
if l[i] <= calls50:
l[i] = calls50
if l[i] > calls100:
l[i] = calls100
l = sorted(l)
l[49] = calls50
l[50] = calls50
l[-1] = calls100
diff = sum(l) - callsavg * 100
for i in range(30):
l[60+i] -= diff // 10
l[31] -= sum(l) - callsavg * 100
l = sorted(l)
if l[-1] != calls100 or l[49] != calls50 or l[50] != calls50:
raise ValueError('100 or 49 do not match')
for i, t in enumerate(tasks.all()):
t.calls = l[i]
t.status = 'D'
t.save()
|
<commit_before><commit_msg>Implement script to add exp results from article<commit_after>from random import randint
from dakis.core.models import Experiment
def load_results(exp_pk, func_cls, calls50, calls100, callsavg):
tasks = Experiment.objects.get(pk=exp_pk).tasks.filter(func_cls=func_cls)
l = sorted([randint(0, int(callsavg*2)) for i in range(tasks.count())])
for i in range(tasks.count()):
if i < tasks.count()/2:
if l[i] >= calls50:
l[i] = calls50
if i >= tasks.count()/2:
if l[i] <= calls50:
l[i] = calls50
if l[i] > calls100:
l[i] = calls100
l = sorted(l)
l[49] = calls50
l[50] = calls50
l[-1] = calls100
diff = sum(l) - callsavg * 100
for i in range(30):
l[60+i] -= diff // 10
l[31] -= sum(l) - callsavg * 100
l = sorted(l)
if l[-1] != calls100 or l[49] != calls50 or l[50] != calls50:
raise ValueError('100 or 49 do not match')
for i, t in enumerate(tasks.all()):
t.calls = l[i]
t.status = 'D'
t.save()
|
|
12f51ce0d5e6c67e15ba3d60c21523a28d29b964
|
polygraph/types/tests/test_type_map.py
|
polygraph/types/tests/test_type_map.py
|
from unittest import TestCase
from polygraph.types.decorators import field
from polygraph.types.object_type import ObjectType
from polygraph.types.scalar import ID, Boolean, Float, Int, String
from polygraph.types.schema import Schema
from polygraph.types.type_builder import List, NonNull, Union
class Person(ObjectType):
@field()
def name(self) -> NonNull(String):
pass
@field()
def age(year: Int) -> String:
pass
class Animal(ObjectType):
@field()
def can_walk(self) -> Boolean:
pass
class Query(ObjectType):
@field()
def characters(self) -> List(Union(Animal, Person)):
pass
class TypeMapTest(TestCase):
def test_type_map_builder(self):
schema = Schema(query=Query, additional_types=[ID])
type_map = schema.type_map
self.assertEqual(type_map["Animal"], Animal)
self.assertEqual(type_map["Person"], Person)
self.assertEqual(type_map["Animal|Person"], Union(Animal, Person))
self.assertEqual(type_map["Boolean"], Boolean)
self.assertEqual(type_map["ID"], ID)
self.assertNotIn(Float, type_map.values()) # Float was not defined anywhere
|
Add tests for the type map builder
|
Add tests for the type map builder
|
Python
|
mit
|
polygraph-python/polygraph
|
Add tests for the type map builder
|
from unittest import TestCase
from polygraph.types.decorators import field
from polygraph.types.object_type import ObjectType
from polygraph.types.scalar import ID, Boolean, Float, Int, String
from polygraph.types.schema import Schema
from polygraph.types.type_builder import List, NonNull, Union
class Person(ObjectType):
@field()
def name(self) -> NonNull(String):
pass
@field()
def age(year: Int) -> String:
pass
class Animal(ObjectType):
@field()
def can_walk(self) -> Boolean:
pass
class Query(ObjectType):
@field()
def characters(self) -> List(Union(Animal, Person)):
pass
class TypeMapTest(TestCase):
def test_type_map_builder(self):
schema = Schema(query=Query, additional_types=[ID])
type_map = schema.type_map
self.assertEqual(type_map["Animal"], Animal)
self.assertEqual(type_map["Person"], Person)
self.assertEqual(type_map["Animal|Person"], Union(Animal, Person))
self.assertEqual(type_map["Boolean"], Boolean)
self.assertEqual(type_map["ID"], ID)
self.assertNotIn(Float, type_map.values()) # Float was not defined anywhere
|
<commit_before><commit_msg>Add tests for the type map builder<commit_after>
|
from unittest import TestCase
from polygraph.types.decorators import field
from polygraph.types.object_type import ObjectType
from polygraph.types.scalar import ID, Boolean, Float, Int, String
from polygraph.types.schema import Schema
from polygraph.types.type_builder import List, NonNull, Union
class Person(ObjectType):
@field()
def name(self) -> NonNull(String):
pass
@field()
def age(year: Int) -> String:
pass
class Animal(ObjectType):
@field()
def can_walk(self) -> Boolean:
pass
class Query(ObjectType):
@field()
def characters(self) -> List(Union(Animal, Person)):
pass
class TypeMapTest(TestCase):
def test_type_map_builder(self):
schema = Schema(query=Query, additional_types=[ID])
type_map = schema.type_map
self.assertEqual(type_map["Animal"], Animal)
self.assertEqual(type_map["Person"], Person)
self.assertEqual(type_map["Animal|Person"], Union(Animal, Person))
self.assertEqual(type_map["Boolean"], Boolean)
self.assertEqual(type_map["ID"], ID)
self.assertNotIn(Float, type_map.values()) # Float was not defined anywhere
|
Add tests for the type map builderfrom unittest import TestCase
from polygraph.types.decorators import field
from polygraph.types.object_type import ObjectType
from polygraph.types.scalar import ID, Boolean, Float, Int, String
from polygraph.types.schema import Schema
from polygraph.types.type_builder import List, NonNull, Union
class Person(ObjectType):
@field()
def name(self) -> NonNull(String):
pass
@field()
def age(year: Int) -> String:
pass
class Animal(ObjectType):
@field()
def can_walk(self) -> Boolean:
pass
class Query(ObjectType):
@field()
def characters(self) -> List(Union(Animal, Person)):
pass
class TypeMapTest(TestCase):
def test_type_map_builder(self):
schema = Schema(query=Query, additional_types=[ID])
type_map = schema.type_map
self.assertEqual(type_map["Animal"], Animal)
self.assertEqual(type_map["Person"], Person)
self.assertEqual(type_map["Animal|Person"], Union(Animal, Person))
self.assertEqual(type_map["Boolean"], Boolean)
self.assertEqual(type_map["ID"], ID)
self.assertNotIn(Float, type_map.values()) # Float was not defined anywhere
|
<commit_before><commit_msg>Add tests for the type map builder<commit_after>from unittest import TestCase
from polygraph.types.decorators import field
from polygraph.types.object_type import ObjectType
from polygraph.types.scalar import ID, Boolean, Float, Int, String
from polygraph.types.schema import Schema
from polygraph.types.type_builder import List, NonNull, Union
class Person(ObjectType):
@field()
def name(self) -> NonNull(String):
pass
@field()
def age(year: Int) -> String:
pass
class Animal(ObjectType):
@field()
def can_walk(self) -> Boolean:
pass
class Query(ObjectType):
@field()
def characters(self) -> List(Union(Animal, Person)):
pass
class TypeMapTest(TestCase):
def test_type_map_builder(self):
schema = Schema(query=Query, additional_types=[ID])
type_map = schema.type_map
self.assertEqual(type_map["Animal"], Animal)
self.assertEqual(type_map["Person"], Person)
self.assertEqual(type_map["Animal|Person"], Union(Animal, Person))
self.assertEqual(type_map["Boolean"], Boolean)
self.assertEqual(type_map["ID"], ID)
self.assertNotIn(Float, type_map.values()) # Float was not defined anywhere
|
|
aa1c6dbede3cbee67d1b2b0a1064fd7cc5662251
|
discovery-diff/discovery-diff.py
|
discovery-diff/discovery-diff.py
|
import json
from subprocess import Popen, PIPE
import os
import sys
def hardware_data(hw_id):
'''Read the discoverd data about the given node from Swift'''
# OS_TENANT_NAME=service swift download --output - ironic-discoverd <ID>
p = Popen(('swift', 'download', '--output', '-', 'ironic-discoverd', hw_id),
stdout=PIPE, stderr=PIPE)
if p.wait() == 0:
return json.loads(p.stdout.read())
def all_equal(coll):
if len(coll) <= 1:
return True
first = coll[0]
for item in coll[1:]:
if item != first:
return False
return True
if __name__ == '__main__':
os.environ['OS_TENANT_NAME'] = 'service'
# OS_TENANT_NAME=service swift list ironic-discoverd
p = Popen(('swift', 'list', 'ironic-discoverd'), stdout=PIPE, stderr=PIPE)
if p.wait() != 0:
print "Error running `swift list ironic-discoverd`"
print p.stderr.read()
sys.exit(1)
hardware_ids = [i.strip() for i in p.stdout.read().splitlines() if i.strip()]
hardware = [hardware_data(i) for i in hardware_ids]
hw_dicts = []
for hw in hardware:
hw_dict = {}
for item in hw:
key = '/'.join(item[:-1])
value = item[-1]
hw_dict[key] = value
hw_dicts.append(hw_dict)
all_keys = set()
for hw in hw_dicts:
all_keys.update(hw.keys())
system_id_key = 'system/product/uuid'
print "System ID by %s:" % system_id_key
for num, hw in enumerate(hw_dicts):
print '[%d]: %s' % (num, hw[system_id_key])
print
for key in all_keys:
values = [hw.get(key) for hw in hw_dicts]
if key != system_id_key and not all_equal(values):
print '%s:' % key
for num, value in enumerate(values):
print '[%d] %s' % (num, value)
print
|
Add a script for diffing the hw discovery data
|
Add a script for diffing the hw discovery data
This will let us spot-check things that should be identical but aren't.
|
Python
|
apache-2.0
|
rthallisey/clapper,larsks/clapper,larsks/clapper,coolsvap/clapper,rthallisey/clapper,coolsvap/clapper,coolsvap/clapper
|
Add a script for diffing the hw discovery data
This will let us spot-check things that should be identical but aren't.
|
import json
from subprocess import Popen, PIPE
import os
import sys
def hardware_data(hw_id):
'''Read the discoverd data about the given node from Swift'''
# OS_TENANT_NAME=service swift download --output - ironic-discoverd <ID>
p = Popen(('swift', 'download', '--output', '-', 'ironic-discoverd', hw_id),
stdout=PIPE, stderr=PIPE)
if p.wait() == 0:
return json.loads(p.stdout.read())
def all_equal(coll):
if len(coll) <= 1:
return True
first = coll[0]
for item in coll[1:]:
if item != first:
return False
return True
if __name__ == '__main__':
os.environ['OS_TENANT_NAME'] = 'service'
# OS_TENANT_NAME=service swift list ironic-discoverd
p = Popen(('swift', 'list', 'ironic-discoverd'), stdout=PIPE, stderr=PIPE)
if p.wait() != 0:
print "Error running `swift list ironic-discoverd`"
print p.stderr.read()
sys.exit(1)
hardware_ids = [i.strip() for i in p.stdout.read().splitlines() if i.strip()]
hardware = [hardware_data(i) for i in hardware_ids]
hw_dicts = []
for hw in hardware:
hw_dict = {}
for item in hw:
key = '/'.join(item[:-1])
value = item[-1]
hw_dict[key] = value
hw_dicts.append(hw_dict)
all_keys = set()
for hw in hw_dicts:
all_keys.update(hw.keys())
system_id_key = 'system/product/uuid'
print "System ID by %s:" % system_id_key
for num, hw in enumerate(hw_dicts):
print '[%d]: %s' % (num, hw[system_id_key])
print
for key in all_keys:
values = [hw.get(key) for hw in hw_dicts]
if key != system_id_key and not all_equal(values):
print '%s:' % key
for num, value in enumerate(values):
print '[%d] %s' % (num, value)
print
|
<commit_before><commit_msg>Add a script for diffing the hw discovery data
This will let us spot-check things that should be identical but aren't.<commit_after>
|
import json
from subprocess import Popen, PIPE
import os
import sys
def hardware_data(hw_id):
'''Read the discoverd data about the given node from Swift'''
# OS_TENANT_NAME=service swift download --output - ironic-discoverd <ID>
p = Popen(('swift', 'download', '--output', '-', 'ironic-discoverd', hw_id),
stdout=PIPE, stderr=PIPE)
if p.wait() == 0:
return json.loads(p.stdout.read())
def all_equal(coll):
if len(coll) <= 1:
return True
first = coll[0]
for item in coll[1:]:
if item != first:
return False
return True
if __name__ == '__main__':
os.environ['OS_TENANT_NAME'] = 'service'
# OS_TENANT_NAME=service swift list ironic-discoverd
p = Popen(('swift', 'list', 'ironic-discoverd'), stdout=PIPE, stderr=PIPE)
if p.wait() != 0:
print "Error running `swift list ironic-discoverd`"
print p.stderr.read()
sys.exit(1)
hardware_ids = [i.strip() for i in p.stdout.read().splitlines() if i.strip()]
hardware = [hardware_data(i) for i in hardware_ids]
hw_dicts = []
for hw in hardware:
hw_dict = {}
for item in hw:
key = '/'.join(item[:-1])
value = item[-1]
hw_dict[key] = value
hw_dicts.append(hw_dict)
all_keys = set()
for hw in hw_dicts:
all_keys.update(hw.keys())
system_id_key = 'system/product/uuid'
print "System ID by %s:" % system_id_key
for num, hw in enumerate(hw_dicts):
print '[%d]: %s' % (num, hw[system_id_key])
print
for key in all_keys:
values = [hw.get(key) for hw in hw_dicts]
if key != system_id_key and not all_equal(values):
print '%s:' % key
for num, value in enumerate(values):
print '[%d] %s' % (num, value)
print
|
Add a script for diffing the hw discovery data
This will let us spot-check things that should be identical but aren't.import json
from subprocess import Popen, PIPE
import os
import sys
def hardware_data(hw_id):
'''Read the discoverd data about the given node from Swift'''
# OS_TENANT_NAME=service swift download --output - ironic-discoverd <ID>
p = Popen(('swift', 'download', '--output', '-', 'ironic-discoverd', hw_id),
stdout=PIPE, stderr=PIPE)
if p.wait() == 0:
return json.loads(p.stdout.read())
def all_equal(coll):
if len(coll) <= 1:
return True
first = coll[0]
for item in coll[1:]:
if item != first:
return False
return True
if __name__ == '__main__':
os.environ['OS_TENANT_NAME'] = 'service'
# OS_TENANT_NAME=service swift list ironic-discoverd
p = Popen(('swift', 'list', 'ironic-discoverd'), stdout=PIPE, stderr=PIPE)
if p.wait() != 0:
print "Error running `swift list ironic-discoverd`"
print p.stderr.read()
sys.exit(1)
hardware_ids = [i.strip() for i in p.stdout.read().splitlines() if i.strip()]
hardware = [hardware_data(i) for i in hardware_ids]
hw_dicts = []
for hw in hardware:
hw_dict = {}
for item in hw:
key = '/'.join(item[:-1])
value = item[-1]
hw_dict[key] = value
hw_dicts.append(hw_dict)
all_keys = set()
for hw in hw_dicts:
all_keys.update(hw.keys())
system_id_key = 'system/product/uuid'
print "System ID by %s:" % system_id_key
for num, hw in enumerate(hw_dicts):
print '[%d]: %s' % (num, hw[system_id_key])
print
for key in all_keys:
values = [hw.get(key) for hw in hw_dicts]
if key != system_id_key and not all_equal(values):
print '%s:' % key
for num, value in enumerate(values):
print '[%d] %s' % (num, value)
print
|
<commit_before><commit_msg>Add a script for diffing the hw discovery data
This will let us spot-check things that should be identical but aren't.<commit_after>import json
from subprocess import Popen, PIPE
import os
import sys
def hardware_data(hw_id):
'''Read the discoverd data about the given node from Swift'''
# OS_TENANT_NAME=service swift download --output - ironic-discoverd <ID>
p = Popen(('swift', 'download', '--output', '-', 'ironic-discoverd', hw_id),
stdout=PIPE, stderr=PIPE)
if p.wait() == 0:
return json.loads(p.stdout.read())
def all_equal(coll):
if len(coll) <= 1:
return True
first = coll[0]
for item in coll[1:]:
if item != first:
return False
return True
if __name__ == '__main__':
os.environ['OS_TENANT_NAME'] = 'service'
# OS_TENANT_NAME=service swift list ironic-discoverd
p = Popen(('swift', 'list', 'ironic-discoverd'), stdout=PIPE, stderr=PIPE)
if p.wait() != 0:
print "Error running `swift list ironic-discoverd`"
print p.stderr.read()
sys.exit(1)
hardware_ids = [i.strip() for i in p.stdout.read().splitlines() if i.strip()]
hardware = [hardware_data(i) for i in hardware_ids]
hw_dicts = []
for hw in hardware:
hw_dict = {}
for item in hw:
key = '/'.join(item[:-1])
value = item[-1]
hw_dict[key] = value
hw_dicts.append(hw_dict)
all_keys = set()
for hw in hw_dicts:
all_keys.update(hw.keys())
system_id_key = 'system/product/uuid'
print "System ID by %s:" % system_id_key
for num, hw in enumerate(hw_dicts):
print '[%d]: %s' % (num, hw[system_id_key])
print
for key in all_keys:
values = [hw.get(key) for hw in hw_dicts]
if key != system_id_key and not all_equal(values):
print '%s:' % key
for num, value in enumerate(values):
print '[%d] %s' % (num, value)
print
|
|
c18519b4f5f7cbac24e053df422d83733d6963f6
|
examples/test_pdf_asserts.py
|
examples/test_pdf_asserts.py
|
from seleniumbase import BaseCase
class PdfTestClass(BaseCase):
def test_assert_pdf_text(self):
# Assert PDF contains the expected text on Page 1
self.assert_pdf_text(
"https://nostarch.com/download/Automate_the_Boring_Stuff_dTOC.pdf",
"Programming Is a Creative Activity", page=1)
# Assert PDF contains the expected text on any of the pages
self.assert_pdf_text(
"https://nostarch.com/download/Automate_the_Boring_Stuff_dTOC.pdf",
"Extracting Text from PDFs")
|
Add a test for asserting text in a PDF file
|
Add a test for asserting text in a PDF file
|
Python
|
mit
|
seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase
|
Add a test for asserting text in a PDF file
|
from seleniumbase import BaseCase
class PdfTestClass(BaseCase):
def test_assert_pdf_text(self):
# Assert PDF contains the expected text on Page 1
self.assert_pdf_text(
"https://nostarch.com/download/Automate_the_Boring_Stuff_dTOC.pdf",
"Programming Is a Creative Activity", page=1)
# Assert PDF contains the expected text on any of the pages
self.assert_pdf_text(
"https://nostarch.com/download/Automate_the_Boring_Stuff_dTOC.pdf",
"Extracting Text from PDFs")
|
<commit_before><commit_msg>Add a test for asserting text in a PDF file<commit_after>
|
from seleniumbase import BaseCase
class PdfTestClass(BaseCase):
def test_assert_pdf_text(self):
# Assert PDF contains the expected text on Page 1
self.assert_pdf_text(
"https://nostarch.com/download/Automate_the_Boring_Stuff_dTOC.pdf",
"Programming Is a Creative Activity", page=1)
# Assert PDF contains the expected text on any of the pages
self.assert_pdf_text(
"https://nostarch.com/download/Automate_the_Boring_Stuff_dTOC.pdf",
"Extracting Text from PDFs")
|
Add a test for asserting text in a PDF filefrom seleniumbase import BaseCase
class PdfTestClass(BaseCase):
def test_assert_pdf_text(self):
# Assert PDF contains the expected text on Page 1
self.assert_pdf_text(
"https://nostarch.com/download/Automate_the_Boring_Stuff_dTOC.pdf",
"Programming Is a Creative Activity", page=1)
# Assert PDF contains the expected text on any of the pages
self.assert_pdf_text(
"https://nostarch.com/download/Automate_the_Boring_Stuff_dTOC.pdf",
"Extracting Text from PDFs")
|
<commit_before><commit_msg>Add a test for asserting text in a PDF file<commit_after>from seleniumbase import BaseCase
class PdfTestClass(BaseCase):
def test_assert_pdf_text(self):
# Assert PDF contains the expected text on Page 1
self.assert_pdf_text(
"https://nostarch.com/download/Automate_the_Boring_Stuff_dTOC.pdf",
"Programming Is a Creative Activity", page=1)
# Assert PDF contains the expected text on any of the pages
self.assert_pdf_text(
"https://nostarch.com/download/Automate_the_Boring_Stuff_dTOC.pdf",
"Extracting Text from PDFs")
|
|
1f6c7b33a150096dad4c856516b6dd23dd1571ae
|
read-coverage.py
|
read-coverage.py
|
"""Translate coverage.py data files to JSON."""
import argparse
import json
import pathlib
import sys
from coverage import coverage as Coverage
def parse_args(argv=None):
"""Parse command-line argument."""
parser = argparse.ArgumentParser()
parser.add_argument(
"coverage",
nargs=1,
type=pathlib.Path,
help="The path to a coverage data file."
)
parser.add_argument(
"file",
nargs=1,
type=pathlib.Path,
help="The path to a file to return coverage information for."
)
return parser.parse_args(argv)
def main(argv=None):
"""Application entry-point.
This will read the given coverage data file and prints a JSON object with
the following fields to stdout:
``covered``
An array of line numbers that are covered.
``excluded``
An array of line numbers that have been excluded from being subject
to coverage checks.
``missing``
An array of line numbers that have not been covered.
"""
args = parse_args(argv)
coverage = Coverage(data_file=str(args.coverage[0]))
_, covered, excluded, missing, _ = coverage.analysis2(str(args.file[0]))
print(json.dumps({
"covered": covered,
"excluded": excluded,
"missing": missing,
}))
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add Python script to read coverage data files
|
Add Python script to read coverage data files
|
Python
|
mit
|
Holiverh/atom-coverage-python
|
Add Python script to read coverage data files
|
"""Translate coverage.py data files to JSON."""
import argparse
import json
import pathlib
import sys
from coverage import coverage as Coverage
def parse_args(argv=None):
"""Parse command-line argument."""
parser = argparse.ArgumentParser()
parser.add_argument(
"coverage",
nargs=1,
type=pathlib.Path,
help="The path to a coverage data file."
)
parser.add_argument(
"file",
nargs=1,
type=pathlib.Path,
help="The path to a file to return coverage information for."
)
return parser.parse_args(argv)
def main(argv=None):
"""Application entry-point.
This will read the given coverage data file and prints a JSON object with
the following fields to stdout:
``covered``
An array of line numbers that are covered.
``excluded``
An array of line numbers that have been excluded from being subject
to coverage checks.
``missing``
An array of line numbers that have not been covered.
"""
args = parse_args(argv)
coverage = Coverage(data_file=str(args.coverage[0]))
_, covered, excluded, missing, _ = coverage.analysis2(str(args.file[0]))
print(json.dumps({
"covered": covered,
"excluded": excluded,
"missing": missing,
}))
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add Python script to read coverage data files<commit_after>
|
"""Translate coverage.py data files to JSON."""
import argparse
import json
import pathlib
import sys
from coverage import coverage as Coverage
def parse_args(argv=None):
"""Parse command-line argument."""
parser = argparse.ArgumentParser()
parser.add_argument(
"coverage",
nargs=1,
type=pathlib.Path,
help="The path to a coverage data file."
)
parser.add_argument(
"file",
nargs=1,
type=pathlib.Path,
help="The path to a file to return coverage information for."
)
return parser.parse_args(argv)
def main(argv=None):
"""Application entry-point.
This will read the given coverage data file and prints a JSON object with
the following fields to stdout:
``covered``
An array of line numbers that are covered.
``excluded``
An array of line numbers that have been excluded from being subject
to coverage checks.
``missing``
An array of line numbers that have not been covered.
"""
args = parse_args(argv)
coverage = Coverage(data_file=str(args.coverage[0]))
_, covered, excluded, missing, _ = coverage.analysis2(str(args.file[0]))
print(json.dumps({
"covered": covered,
"excluded": excluded,
"missing": missing,
}))
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add Python script to read coverage data files"""Translate coverage.py data files to JSON."""
import argparse
import json
import pathlib
import sys
from coverage import coverage as Coverage
def parse_args(argv=None):
"""Parse command-line argument."""
parser = argparse.ArgumentParser()
parser.add_argument(
"coverage",
nargs=1,
type=pathlib.Path,
help="The path to a coverage data file."
)
parser.add_argument(
"file",
nargs=1,
type=pathlib.Path,
help="The path to a file to return coverage information for."
)
return parser.parse_args(argv)
def main(argv=None):
"""Application entry-point.
This will read the given coverage data file and prints a JSON object with
the following fields to stdout:
``covered``
An array of line numbers that are covered.
``excluded``
An array of line numbers that have been excluded from being subject
to coverage checks.
``missing``
An array of line numbers that have not been covered.
"""
args = parse_args(argv)
coverage = Coverage(data_file=str(args.coverage[0]))
_, covered, excluded, missing, _ = coverage.analysis2(str(args.file[0]))
print(json.dumps({
"covered": covered,
"excluded": excluded,
"missing": missing,
}))
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add Python script to read coverage data files<commit_after>"""Translate coverage.py data files to JSON."""
import argparse
import json
import pathlib
import sys
from coverage import coverage as Coverage
def parse_args(argv=None):
"""Parse command-line argument."""
parser = argparse.ArgumentParser()
parser.add_argument(
"coverage",
nargs=1,
type=pathlib.Path,
help="The path to a coverage data file."
)
parser.add_argument(
"file",
nargs=1,
type=pathlib.Path,
help="The path to a file to return coverage information for."
)
return parser.parse_args(argv)
def main(argv=None):
"""Application entry-point.
This will read the given coverage data file and prints a JSON object with
the following fields to stdout:
``covered``
An array of line numbers that are covered.
``excluded``
An array of line numbers that have been excluded from being subject
to coverage checks.
``missing``
An array of line numbers that have not been covered.
"""
args = parse_args(argv)
coverage = Coverage(data_file=str(args.coverage[0]))
_, covered, excluded, missing, _ = coverage.analysis2(str(args.file[0]))
print(json.dumps({
"covered": covered,
"excluded": excluded,
"missing": missing,
}))
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
597938ad9fd4117fd5bd43f492983a0364cdf276
|
tests/cupy_tests/sparse_tests/test_construct.py
|
tests/cupy_tests/sparse_tests/test_construct.py
|
import unittest
import numpy
from cupy import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'format': ['csr', 'csc', 'coo'],
'm': [3],
'n': [None, 3],
}))
@testing.with_requires('scipy')
class TestEye(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.eye(self.m, n=self.n, dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x.toarray()
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'format': ['csr', 'csc', 'coo'],
}))
@testing.with_requires('scipy')
class TestIdentity(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.identity(3, dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x.toarray()
|
Add test for eye and identity
|
Add test for eye and identity
|
Python
|
mit
|
cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy
|
Add test for eye and identity
|
import unittest
import numpy
from cupy import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'format': ['csr', 'csc', 'coo'],
'm': [3],
'n': [None, 3],
}))
@testing.with_requires('scipy')
class TestEye(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.eye(self.m, n=self.n, dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x.toarray()
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'format': ['csr', 'csc', 'coo'],
}))
@testing.with_requires('scipy')
class TestIdentity(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.identity(3, dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x.toarray()
|
<commit_before><commit_msg>Add test for eye and identity<commit_after>
|
import unittest
import numpy
from cupy import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'format': ['csr', 'csc', 'coo'],
'm': [3],
'n': [None, 3],
}))
@testing.with_requires('scipy')
class TestEye(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.eye(self.m, n=self.n, dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x.toarray()
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'format': ['csr', 'csc', 'coo'],
}))
@testing.with_requires('scipy')
class TestIdentity(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.identity(3, dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x.toarray()
|
Add test for eye and identityimport unittest
import numpy
from cupy import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'format': ['csr', 'csc', 'coo'],
'm': [3],
'n': [None, 3],
}))
@testing.with_requires('scipy')
class TestEye(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.eye(self.m, n=self.n, dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x.toarray()
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'format': ['csr', 'csc', 'coo'],
}))
@testing.with_requires('scipy')
class TestIdentity(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.identity(3, dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x.toarray()
|
<commit_before><commit_msg>Add test for eye and identity<commit_after>import unittest
import numpy
from cupy import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'format': ['csr', 'csc', 'coo'],
'm': [3],
'n': [None, 3],
}))
@testing.with_requires('scipy')
class TestEye(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.eye(self.m, n=self.n, dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x.toarray()
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'format': ['csr', 'csc', 'coo'],
}))
@testing.with_requires('scipy')
class TestIdentity(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_eye(self, xp, sp):
x = sp.identity(3, dtype=self.dtype, format=self.format)
self.assertIsInstance(x, sp.spmatrix)
self.assertEqual(x.format, self.format)
return x.toarray()
|
|
038bd9745cda294491589bbca60b1b3431ce39cc
|
llvmlite/tests/test_setup.py
|
llvmlite/tests/test_setup.py
|
"""
Tests for setup.py behavior
"""
import distutils.core
try:
import setuptools
except ImportError:
setuptools = None
import sys
import unittest
from collections import namedtuple
from importlib.util import spec_from_file_location, module_from_spec
from pathlib import Path
from . import TestCase
setup_path = Path(__file__).parent.parent.parent / "setup.py"
class TestSetup(TestCase):
@unittest.skipUnless(setup_path.is_file(), 'Need setup.py from source tree')
def test_guard_py_ver(self):
"""
Ensure setup.py's _guard_py_ver aborts setup for an unsupported version
"""
# NOTE: Adjust this when max_python_version in setup.py changes.
unsupported_version = (3, 11, 0)
unsupported_version_info = namedtuple(
"version_info",
(
"major", "minor", "micro", "releaselevel",
"serial", "n_fields", "n_sequence_fields", "n_unnamed_fields"
)
)(*unsupported_version, 'final', 0, 5, 5, 0)
sys_version_info = sys.version_info
# We run setup.py code! Since _guard_py_ver should fail, setup() should
# never be invoked. But let's be extra sure it isn't and replace it.
def failing_setup(*args, **kwargs):
raise RuntimeError("This should not be reached!")
distutils_core_setup = distutils.core.setup
if setuptools:
setuptools_setup = setuptools.setup
spec = spec_from_file_location("__main__", str(setup_path))
setup_module = module_from_spec(spec)
try:
sys.version_info = unsupported_version_info
distutils.core.setup = failing_setup
if setuptools:
setuptools.setup = failing_setup
msg = ("Cannot install on Python version {}; only versions "
">=[0-9]+\\.[0-9]+,<[0-9]+\\.[0-9]+ are supported"
).format(".".join(map(str, unsupported_version_info[:3])))
with self.assertRaisesRegex(RuntimeError, msg):
spec.loader.exec_module(setup_module)
finally:
# Restore anything we replaced.
sys.version_info = sys_version_info
distutils.core.setup = distutils_core_setup
if setuptools:
setuptools.setup = setuptools_setup
if __name__ == '__main__':
unittest.main()
|
Add test for setup.py's _guard_py_ver
|
Add test for setup.py's _guard_py_ver
|
Python
|
bsd-2-clause
|
numba/llvmlite,numba/llvmlite,numba/llvmlite,numba/llvmlite
|
Add test for setup.py's _guard_py_ver
|
"""
Tests for setup.py behavior
"""
import distutils.core
try:
import setuptools
except ImportError:
setuptools = None
import sys
import unittest
from collections import namedtuple
from importlib.util import spec_from_file_location, module_from_spec
from pathlib import Path
from . import TestCase
setup_path = Path(__file__).parent.parent.parent / "setup.py"
class TestSetup(TestCase):
@unittest.skipUnless(setup_path.is_file(), 'Need setup.py from source tree')
def test_guard_py_ver(self):
"""
Ensure setup.py's _guard_py_ver aborts setup for an unsupported version
"""
# NOTE: Adjust this when max_python_version in setup.py changes.
unsupported_version = (3, 11, 0)
unsupported_version_info = namedtuple(
"version_info",
(
"major", "minor", "micro", "releaselevel",
"serial", "n_fields", "n_sequence_fields", "n_unnamed_fields"
)
)(*unsupported_version, 'final', 0, 5, 5, 0)
sys_version_info = sys.version_info
# We run setup.py code! Since _guard_py_ver should fail, setup() should
# never be invoked. But let's be extra sure it isn't and replace it.
def failing_setup(*args, **kwargs):
raise RuntimeError("This should not be reached!")
distutils_core_setup = distutils.core.setup
if setuptools:
setuptools_setup = setuptools.setup
spec = spec_from_file_location("__main__", str(setup_path))
setup_module = module_from_spec(spec)
try:
sys.version_info = unsupported_version_info
distutils.core.setup = failing_setup
if setuptools:
setuptools.setup = failing_setup
msg = ("Cannot install on Python version {}; only versions "
">=[0-9]+\\.[0-9]+,<[0-9]+\\.[0-9]+ are supported"
).format(".".join(map(str, unsupported_version_info[:3])))
with self.assertRaisesRegex(RuntimeError, msg):
spec.loader.exec_module(setup_module)
finally:
# Restore anything we replaced.
sys.version_info = sys_version_info
distutils.core.setup = distutils_core_setup
if setuptools:
setuptools.setup = setuptools_setup
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for setup.py's _guard_py_ver<commit_after>
|
"""
Tests for setup.py behavior
"""
import distutils.core
try:
import setuptools
except ImportError:
setuptools = None
import sys
import unittest
from collections import namedtuple
from importlib.util import spec_from_file_location, module_from_spec
from pathlib import Path
from . import TestCase
setup_path = Path(__file__).parent.parent.parent / "setup.py"
class TestSetup(TestCase):
@unittest.skipUnless(setup_path.is_file(), 'Need setup.py from source tree')
def test_guard_py_ver(self):
"""
Ensure setup.py's _guard_py_ver aborts setup for an unsupported version
"""
# NOTE: Adjust this when max_python_version in setup.py changes.
unsupported_version = (3, 11, 0)
unsupported_version_info = namedtuple(
"version_info",
(
"major", "minor", "micro", "releaselevel",
"serial", "n_fields", "n_sequence_fields", "n_unnamed_fields"
)
)(*unsupported_version, 'final', 0, 5, 5, 0)
sys_version_info = sys.version_info
# We run setup.py code! Since _guard_py_ver should fail, setup() should
# never be invoked. But let's be extra sure it isn't and replace it.
def failing_setup(*args, **kwargs):
raise RuntimeError("This should not be reached!")
distutils_core_setup = distutils.core.setup
if setuptools:
setuptools_setup = setuptools.setup
spec = spec_from_file_location("__main__", str(setup_path))
setup_module = module_from_spec(spec)
try:
sys.version_info = unsupported_version_info
distutils.core.setup = failing_setup
if setuptools:
setuptools.setup = failing_setup
msg = ("Cannot install on Python version {}; only versions "
">=[0-9]+\\.[0-9]+,<[0-9]+\\.[0-9]+ are supported"
).format(".".join(map(str, unsupported_version_info[:3])))
with self.assertRaisesRegex(RuntimeError, msg):
spec.loader.exec_module(setup_module)
finally:
# Restore anything we replaced.
sys.version_info = sys_version_info
distutils.core.setup = distutils_core_setup
if setuptools:
setuptools.setup = setuptools_setup
if __name__ == '__main__':
unittest.main()
|
Add test for setup.py's _guard_py_ver"""
Tests for setup.py behavior
"""
import distutils.core
try:
import setuptools
except ImportError:
setuptools = None
import sys
import unittest
from collections import namedtuple
from importlib.util import spec_from_file_location, module_from_spec
from pathlib import Path
from . import TestCase
setup_path = Path(__file__).parent.parent.parent / "setup.py"
class TestSetup(TestCase):
@unittest.skipUnless(setup_path.is_file(), 'Need setup.py from source tree')
def test_guard_py_ver(self):
"""
Ensure setup.py's _guard_py_ver aborts setup for an unsupported version
"""
# NOTE: Adjust this when max_python_version in setup.py changes.
unsupported_version = (3, 11, 0)
unsupported_version_info = namedtuple(
"version_info",
(
"major", "minor", "micro", "releaselevel",
"serial", "n_fields", "n_sequence_fields", "n_unnamed_fields"
)
)(*unsupported_version, 'final', 0, 5, 5, 0)
sys_version_info = sys.version_info
# We run setup.py code! Since _guard_py_ver should fail, setup() should
# never be invoked. But let's be extra sure it isn't and replace it.
def failing_setup(*args, **kwargs):
raise RuntimeError("This should not be reached!")
distutils_core_setup = distutils.core.setup
if setuptools:
setuptools_setup = setuptools.setup
spec = spec_from_file_location("__main__", str(setup_path))
setup_module = module_from_spec(spec)
try:
sys.version_info = unsupported_version_info
distutils.core.setup = failing_setup
if setuptools:
setuptools.setup = failing_setup
msg = ("Cannot install on Python version {}; only versions "
">=[0-9]+\\.[0-9]+,<[0-9]+\\.[0-9]+ are supported"
).format(".".join(map(str, unsupported_version_info[:3])))
with self.assertRaisesRegex(RuntimeError, msg):
spec.loader.exec_module(setup_module)
finally:
# Restore anything we replaced.
sys.version_info = sys_version_info
distutils.core.setup = distutils_core_setup
if setuptools:
setuptools.setup = setuptools_setup
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for setup.py's _guard_py_ver<commit_after>"""
Tests for setup.py behavior
"""
import distutils.core
try:
import setuptools
except ImportError:
setuptools = None
import sys
import unittest
from collections import namedtuple
from importlib.util import spec_from_file_location, module_from_spec
from pathlib import Path
from . import TestCase
setup_path = Path(__file__).parent.parent.parent / "setup.py"
class TestSetup(TestCase):
@unittest.skipUnless(setup_path.is_file(), 'Need setup.py from source tree')
def test_guard_py_ver(self):
"""
Ensure setup.py's _guard_py_ver aborts setup for an unsupported version
"""
# NOTE: Adjust this when max_python_version in setup.py changes.
unsupported_version = (3, 11, 0)
unsupported_version_info = namedtuple(
"version_info",
(
"major", "minor", "micro", "releaselevel",
"serial", "n_fields", "n_sequence_fields", "n_unnamed_fields"
)
)(*unsupported_version, 'final', 0, 5, 5, 0)
sys_version_info = sys.version_info
# We run setup.py code! Since _guard_py_ver should fail, setup() should
# never be invoked. But let's be extra sure it isn't and replace it.
def failing_setup(*args, **kwargs):
raise RuntimeError("This should not be reached!")
distutils_core_setup = distutils.core.setup
if setuptools:
setuptools_setup = setuptools.setup
spec = spec_from_file_location("__main__", str(setup_path))
setup_module = module_from_spec(spec)
try:
sys.version_info = unsupported_version_info
distutils.core.setup = failing_setup
if setuptools:
setuptools.setup = failing_setup
msg = ("Cannot install on Python version {}; only versions "
">=[0-9]+\\.[0-9]+,<[0-9]+\\.[0-9]+ are supported"
).format(".".join(map(str, unsupported_version_info[:3])))
with self.assertRaisesRegex(RuntimeError, msg):
spec.loader.exec_module(setup_module)
finally:
# Restore anything we replaced.
sys.version_info = sys_version_info
distutils.core.setup = distutils_core_setup
if setuptools:
setuptools.setup = setuptools_setup
if __name__ == '__main__':
unittest.main()
|
|
a4aaa91209d8ab3e819bbf22e45f22e62af99426
|
pymatgen/symmetry/tests/test_spacegroup.py
|
pymatgen/symmetry/tests/test_spacegroup.py
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add a unittest for spacegroup. Still very basic.
|
Add a unittest for spacegroup. Still very basic.
Former-commit-id: 1a63abe5e2a85b220d8b4fd5fa4cd25a1c739e49 [formerly 5f925f837f4ae3ba136f0d6e271848b06467ea8b]
Former-commit-id: 3112d8f3738a35c4572c9384f8860a6de69a9b6a
|
Python
|
mit
|
mbkumar/pymatgen,davidwaroquiers/pymatgen,Bismarrck/pymatgen,Bismarrck/pymatgen,blondegeek/pymatgen,gpetretto/pymatgen,tschaume/pymatgen,xhqu1981/pymatgen,ndardenne/pymatgen,dongsenfo/pymatgen,ndardenne/pymatgen,Bismarrck/pymatgen,davidwaroquiers/pymatgen,matk86/pymatgen,montoyjh/pymatgen,nisse3000/pymatgen,johnson1228/pymatgen,matk86/pymatgen,czhengsci/pymatgen,mbkumar/pymatgen,Bismarrck/pymatgen,tschaume/pymatgen,aykol/pymatgen,davidwaroquiers/pymatgen,czhengsci/pymatgen,ndardenne/pymatgen,gpetretto/pymatgen,gVallverdu/pymatgen,blondegeek/pymatgen,blondegeek/pymatgen,gmatteo/pymatgen,tschaume/pymatgen,aykol/pymatgen,vorwerkc/pymatgen,xhqu1981/pymatgen,vorwerkc/pymatgen,gmatteo/pymatgen,vorwerkc/pymatgen,tschaume/pymatgen,gVallverdu/pymatgen,matk86/pymatgen,dongsenfo/pymatgen,Bismarrck/pymatgen,fraricci/pymatgen,tallakahath/pymatgen,fraricci/pymatgen,richardtran415/pymatgen,aykol/pymatgen,fraricci/pymatgen,montoyjh/pymatgen,gpetretto/pymatgen,tallakahath/pymatgen,czhengsci/pymatgen,tallakahath/pymatgen,vorwerkc/pymatgen,gpetretto/pymatgen,nisse3000/pymatgen,gVallverdu/pymatgen,montoyjh/pymatgen,richardtran415/pymatgen,nisse3000/pymatgen,mbkumar/pymatgen,montoyjh/pymatgen,setten/pymatgen,nisse3000/pymatgen,johnson1228/pymatgen,matk86/pymatgen,dongsenfo/pymatgen,tschaume/pymatgen,xhqu1981/pymatgen,gVallverdu/pymatgen,setten/pymatgen,davidwaroquiers/pymatgen,mbkumar/pymatgen,setten/pymatgen,setten/pymatgen,blondegeek/pymatgen,dongsenfo/pymatgen,fraricci/pymatgen,richardtran415/pymatgen,johnson1228/pymatgen,richardtran415/pymatgen,czhengsci/pymatgen,johnson1228/pymatgen
|
Add a unittest for spacegroup. Still very basic.
Former-commit-id: 1a63abe5e2a85b220d8b4fd5fa4cd25a1c739e49 [formerly 5f925f837f4ae3ba136f0d6e271848b06467ea8b]
Former-commit-id: 3112d8f3738a35c4572c9384f8860a6de69a9b6a
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add a unittest for spacegroup. Still very basic.
Former-commit-id: 1a63abe5e2a85b220d8b4fd5fa4cd25a1c739e49 [formerly 5f925f837f4ae3ba136f0d6e271848b06467ea8b]
Former-commit-id: 3112d8f3738a35c4572c9384f8860a6de69a9b6a<commit_after>
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add a unittest for spacegroup. Still very basic.
Former-commit-id: 1a63abe5e2a85b220d8b4fd5fa4cd25a1c739e49 [formerly 5f925f837f4ae3ba136f0d6e271848b06467ea8b]
Former-commit-id: 3112d8f3738a35c4572c9384f8860a6de69a9b6a#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add a unittest for spacegroup. Still very basic.
Former-commit-id: 1a63abe5e2a85b220d8b4fd5fa4cd25a1c739e49 [formerly 5f925f837f4ae3ba136f0d6e271848b06467ea8b]
Former-commit-id: 3112d8f3738a35c4572c9384f8860a6de69a9b6a<commit_after>#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
|
4f6fde8329b0873f3568ce7153dc64017f5bc0cb
|
boto/beanstalk/__init__.py
|
boto/beanstalk/__init__.py
|
# Copyright (c) 2013 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from layer1 import Layer1
from boto.regioninfo import RegionInfo
RegionData = {
'us-east-1': 'elasticbeanstalk.us-east-1.amazonaws.com',
'us-west-1': 'elasticbeanstalk.us-west-1.amazonaws.com',
'us-west-2': 'elasticbeanstalk.us-west-2.amazonaws.com',
'sa-east-1': 'elasticbeanstalk.sa-east-1.amazonaws.com',
'eu-west-1': 'elasticbeanstalk.eu-west-1.amazonaws.com',
'ap-northeast-1': 'elasticbeanstalk.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'elasticbeanstalk.ap-southeast-1.amazonaws.com',
'ap-southeast-2': 'elasticbeanstalk.ap-southeast-2.amazonaws.com',
}
def regions():
"""
Get all available regions for the Elastic Beanstalk service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
regions = []
for region_name in RegionData:
region = RegionInfo(name=region_name,
endpoint=RegionData[region_name],
connection_cls=Layer1)
regions.append(region)
return regions
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.beanstalk.Layer1`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.beanstalk.Layer1` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
Add connect_to_region to beanstalk module.
|
Add connect_to_region to beanstalk module.
|
Python
|
mit
|
jamesls/boto,appneta/boto,rjschwei/boto,SaranyaKarthikeyan/boto,drbild/boto,lra/boto,weebygames/boto,revmischa/boto,s0enke/boto,pfhayes/boto,drbild/boto,khagler/boto,dimdung/boto,Timus1712/boto,trademob/boto,alex/boto,nishigori/boto,jindongh/boto,janslow/boto,garnaat/boto,alfredodeza/boto,jameslegg/boto,disruptek/boto,dablak/boto,bleib1dj/boto,israelbenatar/boto,kouk/boto,nikhilraog/boto,dablak/boto,darjus-amzn/boto,shaunbrady/boto,elainexmas/boto,TiVoMaker/boto,lochiiconnectivity/boto,shipci/boto,bryx-inc/boto,weka-io/boto,rjschwei/boto,lochiiconnectivity/boto,j-carl/boto,ryansb/boto,alex/boto,serviceagility/boto,stevenbrichards/boto,appneta/boto,FATruden/boto,jamesls/boto,yangchaogit/boto,ddzialak/boto,cyclecomputing/boto,campenberger/boto,tpodowd/boto,vishnugonela/boto,nexusz99/boto,Asana/boto,awatts/boto,varunarya10/boto,jotes/boto,vijaylbais/boto,andresriancho/boto,abridgett/boto,andresriancho/boto,ric03uec/boto,Pretio/boto,ramitsurana/boto,acourtney2015/boto,tpodowd/boto,rosmo/boto,jameslegg/boto,disruptek/boto,podhmo/boto,ekalosak/boto,ocadotechnology/boto,felix-d/boto,kouk/boto,zachmullen/boto,rayluo/boto,clouddocx/boto,zzzirk/boto
|
Add connect_to_region to beanstalk module.
|
# Copyright (c) 2013 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from layer1 import Layer1
from boto.regioninfo import RegionInfo
RegionData = {
'us-east-1': 'elasticbeanstalk.us-east-1.amazonaws.com',
'us-west-1': 'elasticbeanstalk.us-west-1.amazonaws.com',
'us-west-2': 'elasticbeanstalk.us-west-2.amazonaws.com',
'sa-east-1': 'elasticbeanstalk.sa-east-1.amazonaws.com',
'eu-west-1': 'elasticbeanstalk.eu-west-1.amazonaws.com',
'ap-northeast-1': 'elasticbeanstalk.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'elasticbeanstalk.ap-southeast-1.amazonaws.com',
'ap-southeast-2': 'elasticbeanstalk.ap-southeast-2.amazonaws.com',
}
def regions():
"""
Get all available regions for the Elastic Beanstalk service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
regions = []
for region_name in RegionData:
region = RegionInfo(name=region_name,
endpoint=RegionData[region_name],
connection_cls=Layer1)
regions.append(region)
return regions
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.beanstalk.Layer1`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.beanstalk.Layer1` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
<commit_before><commit_msg>Add connect_to_region to beanstalk module.<commit_after>
|
# Copyright (c) 2013 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from layer1 import Layer1
from boto.regioninfo import RegionInfo
RegionData = {
'us-east-1': 'elasticbeanstalk.us-east-1.amazonaws.com',
'us-west-1': 'elasticbeanstalk.us-west-1.amazonaws.com',
'us-west-2': 'elasticbeanstalk.us-west-2.amazonaws.com',
'sa-east-1': 'elasticbeanstalk.sa-east-1.amazonaws.com',
'eu-west-1': 'elasticbeanstalk.eu-west-1.amazonaws.com',
'ap-northeast-1': 'elasticbeanstalk.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'elasticbeanstalk.ap-southeast-1.amazonaws.com',
'ap-southeast-2': 'elasticbeanstalk.ap-southeast-2.amazonaws.com',
}
def regions():
"""
Get all available regions for the Elastic Beanstalk service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
regions = []
for region_name in RegionData:
region = RegionInfo(name=region_name,
endpoint=RegionData[region_name],
connection_cls=Layer1)
regions.append(region)
return regions
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.beanstalk.Layer1`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.beanstalk.Layer1` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
Add connect_to_region to beanstalk module.# Copyright (c) 2013 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from layer1 import Layer1
from boto.regioninfo import RegionInfo
RegionData = {
'us-east-1': 'elasticbeanstalk.us-east-1.amazonaws.com',
'us-west-1': 'elasticbeanstalk.us-west-1.amazonaws.com',
'us-west-2': 'elasticbeanstalk.us-west-2.amazonaws.com',
'sa-east-1': 'elasticbeanstalk.sa-east-1.amazonaws.com',
'eu-west-1': 'elasticbeanstalk.eu-west-1.amazonaws.com',
'ap-northeast-1': 'elasticbeanstalk.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'elasticbeanstalk.ap-southeast-1.amazonaws.com',
'ap-southeast-2': 'elasticbeanstalk.ap-southeast-2.amazonaws.com',
}
def regions():
"""
Get all available regions for the Elastic Beanstalk service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
regions = []
for region_name in RegionData:
region = RegionInfo(name=region_name,
endpoint=RegionData[region_name],
connection_cls=Layer1)
regions.append(region)
return regions
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.beanstalk.Layer1`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.beanstalk.Layer1` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
<commit_before><commit_msg>Add connect_to_region to beanstalk module.<commit_after># Copyright (c) 2013 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from layer1 import Layer1
from boto.regioninfo import RegionInfo
RegionData = {
'us-east-1': 'elasticbeanstalk.us-east-1.amazonaws.com',
'us-west-1': 'elasticbeanstalk.us-west-1.amazonaws.com',
'us-west-2': 'elasticbeanstalk.us-west-2.amazonaws.com',
'sa-east-1': 'elasticbeanstalk.sa-east-1.amazonaws.com',
'eu-west-1': 'elasticbeanstalk.eu-west-1.amazonaws.com',
'ap-northeast-1': 'elasticbeanstalk.ap-northeast-1.amazonaws.com',
'ap-southeast-1': 'elasticbeanstalk.ap-southeast-1.amazonaws.com',
'ap-southeast-2': 'elasticbeanstalk.ap-southeast-2.amazonaws.com',
}
def regions():
"""
Get all available regions for the Elastic Beanstalk service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
regions = []
for region_name in RegionData:
region = RegionInfo(name=region_name,
endpoint=RegionData[region_name],
connection_cls=Layer1)
regions.append(region)
return regions
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.beanstalk.Layer1`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.beanstalk.Layer1` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
|
915b3f3ca7008faa22bbe9ef32cd652d371dbe4a
|
bongo/apps/bongo/tests/cache_tests.py
|
bongo/apps/bongo/tests/cache_tests.py
|
from django.test import TestCase
from django.core.cache import cache
class CacheTestCase(TestCase):
def test_cache_task(self):
""" Test memcached or LocMemCache, just so we know it's working """
cached = cache.get("cache_key")
if not cached:
cache.set("cache_key", "cache_value", 30)
self.assertEqual(cache.get("cache_key"), "cache_value")
|
Add a test to test the cache is working
|
Add a test to test the cache is working
|
Python
|
mit
|
BowdoinOrient/bongo,BowdoinOrient/bongo,BowdoinOrient/bongo,BowdoinOrient/bongo
|
Add a test to test the cache is working
|
from django.test import TestCase
from django.core.cache import cache
class CacheTestCase(TestCase):
def test_cache_task(self):
""" Test memcached or LocMemCache, just so we know it's working """
cached = cache.get("cache_key")
if not cached:
cache.set("cache_key", "cache_value", 30)
self.assertEqual(cache.get("cache_key"), "cache_value")
|
<commit_before><commit_msg>Add a test to test the cache is working<commit_after>
|
from django.test import TestCase
from django.core.cache import cache
class CacheTestCase(TestCase):
def test_cache_task(self):
""" Test memcached or LocMemCache, just so we know it's working """
cached = cache.get("cache_key")
if not cached:
cache.set("cache_key", "cache_value", 30)
self.assertEqual(cache.get("cache_key"), "cache_value")
|
Add a test to test the cache is workingfrom django.test import TestCase
from django.core.cache import cache
class CacheTestCase(TestCase):
def test_cache_task(self):
""" Test memcached or LocMemCache, just so we know it's working """
cached = cache.get("cache_key")
if not cached:
cache.set("cache_key", "cache_value", 30)
self.assertEqual(cache.get("cache_key"), "cache_value")
|
<commit_before><commit_msg>Add a test to test the cache is working<commit_after>from django.test import TestCase
from django.core.cache import cache
class CacheTestCase(TestCase):
def test_cache_task(self):
""" Test memcached or LocMemCache, just so we know it's working """
cached = cache.get("cache_key")
if not cached:
cache.set("cache_key", "cache_value", 30)
self.assertEqual(cache.get("cache_key"), "cache_value")
|
|
8e19937489f4481c860487a8f06440cfe5c7bea2
|
switchy/apps/routers.py
|
switchy/apps/routers.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Routing apps
"""
from collections import Counter
from ..marks import event_callback
from ..utils import get_logger
class Bridger(object):
'''Bridge sessions within a call an arbitrary number of times.
'''
def prepost(self):
self.log = get_logger(self.__class__.__name__)
self.call2entries = Counter()
self.count2bridgeargs = { # leg count to codec spec
1: 'proxy' # default is to proxy the call using the request uri
}
@event_callback("CHANNEL_PARK")
def on_park(self, sess):
'''Bridge per session for a given call using the argument spec
provided in `count2bridgeargs`. If the value for a given count is
`proxy` then simply proxy the session to the initial request uri
destination.
'''
call = sess.call
self.call2entries[call] += 1
args = self.count2bridgeargs.get(self.call2entries[call])
if args == 'proxy': # proxy to dest using request uri
sess.bridge()
elif args: # a dict of kwargs to pass to the bridge cmd
sess.bridge(**args)
@event_callback('CHANNEL_BRIDGE')
def on_bridge(self, sess):
self.log.debug("Bridged session '{}'".format(sess.uuid))
|
Add a generic bridging app - `Bridger`
|
Add a generic bridging app - `Bridger`
Allows for defining bridge args per profile entry count.
By default 'proxy' routing is implemented as described in the docs.
|
Python
|
mpl-2.0
|
wwezhuimeng/switch,sangoma/switchy
|
Add a generic bridging app - `Bridger`
Allows for defining bridge args per profile entry count.
By default 'proxy' routing is implemented as described in the docs.
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Routing apps
"""
from collections import Counter
from ..marks import event_callback
from ..utils import get_logger
class Bridger(object):
'''Bridge sessions within a call an arbitrary number of times.
'''
def prepost(self):
self.log = get_logger(self.__class__.__name__)
self.call2entries = Counter()
self.count2bridgeargs = { # leg count to codec spec
1: 'proxy' # default is to proxy the call using the request uri
}
@event_callback("CHANNEL_PARK")
def on_park(self, sess):
'''Bridge per session for a given call using the argument spec
provided in `count2bridgeargs`. If the value for a given count is
`proxy` then simply proxy the session to the initial request uri
destination.
'''
call = sess.call
self.call2entries[call] += 1
args = self.count2bridgeargs.get(self.call2entries[call])
if args == 'proxy': # proxy to dest using request uri
sess.bridge()
elif args: # a dict of kwargs to pass to the bridge cmd
sess.bridge(**args)
@event_callback('CHANNEL_BRIDGE')
def on_bridge(self, sess):
self.log.debug("Bridged session '{}'".format(sess.uuid))
|
<commit_before><commit_msg>Add a generic bridging app - `Bridger`
Allows for defining bridge args per profile entry count.
By default 'proxy' routing is implemented as described in the docs.<commit_after>
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Routing apps
"""
from collections import Counter
from ..marks import event_callback
from ..utils import get_logger
class Bridger(object):
'''Bridge sessions within a call an arbitrary number of times.
'''
def prepost(self):
self.log = get_logger(self.__class__.__name__)
self.call2entries = Counter()
self.count2bridgeargs = { # leg count to codec spec
1: 'proxy' # default is to proxy the call using the request uri
}
@event_callback("CHANNEL_PARK")
def on_park(self, sess):
'''Bridge per session for a given call using the argument spec
provided in `count2bridgeargs`. If the value for a given count is
`proxy` then simply proxy the session to the initial request uri
destination.
'''
call = sess.call
self.call2entries[call] += 1
args = self.count2bridgeargs.get(self.call2entries[call])
if args == 'proxy': # proxy to dest using request uri
sess.bridge()
elif args: # a dict of kwargs to pass to the bridge cmd
sess.bridge(**args)
@event_callback('CHANNEL_BRIDGE')
def on_bridge(self, sess):
self.log.debug("Bridged session '{}'".format(sess.uuid))
|
Add a generic bridging app - `Bridger`
Allows for defining bridge args per profile entry count.
By default 'proxy' routing is implemented as described in the docs.# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Routing apps
"""
from collections import Counter
from ..marks import event_callback
from ..utils import get_logger
class Bridger(object):
'''Bridge sessions within a call an arbitrary number of times.
'''
def prepost(self):
self.log = get_logger(self.__class__.__name__)
self.call2entries = Counter()
self.count2bridgeargs = { # leg count to codec spec
1: 'proxy' # default is to proxy the call using the request uri
}
@event_callback("CHANNEL_PARK")
def on_park(self, sess):
'''Bridge per session for a given call using the argument spec
provided in `count2bridgeargs`. If the value for a given count is
`proxy` then simply proxy the session to the initial request uri
destination.
'''
call = sess.call
self.call2entries[call] += 1
args = self.count2bridgeargs.get(self.call2entries[call])
if args == 'proxy': # proxy to dest using request uri
sess.bridge()
elif args: # a dict of kwargs to pass to the bridge cmd
sess.bridge(**args)
@event_callback('CHANNEL_BRIDGE')
def on_bridge(self, sess):
self.log.debug("Bridged session '{}'".format(sess.uuid))
|
<commit_before><commit_msg>Add a generic bridging app - `Bridger`
Allows for defining bridge args per profile entry count.
By default 'proxy' routing is implemented as described in the docs.<commit_after># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Routing apps
"""
from collections import Counter
from ..marks import event_callback
from ..utils import get_logger
class Bridger(object):
'''Bridge sessions within a call an arbitrary number of times.
'''
def prepost(self):
self.log = get_logger(self.__class__.__name__)
self.call2entries = Counter()
self.count2bridgeargs = { # leg count to codec spec
1: 'proxy' # default is to proxy the call using the request uri
}
@event_callback("CHANNEL_PARK")
def on_park(self, sess):
'''Bridge per session for a given call using the argument spec
provided in `count2bridgeargs`. If the value for a given count is
`proxy` then simply proxy the session to the initial request uri
destination.
'''
call = sess.call
self.call2entries[call] += 1
args = self.count2bridgeargs.get(self.call2entries[call])
if args == 'proxy': # proxy to dest using request uri
sess.bridge()
elif args: # a dict of kwargs to pass to the bridge cmd
sess.bridge(**args)
@event_callback('CHANNEL_BRIDGE')
def on_bridge(self, sess):
self.log.debug("Bridged session '{}'".format(sess.uuid))
|
|
9b38539c1828af1c45ac736fccda907cded65c24
|
examples/vds_simple.py
|
examples/vds_simple.py
|
'''A simple example of building a virtual dataset.
This makes four 'source' HDF5 files, each with a 1D dataset of 100 numbers.
Then it makes a single 4x100 virtual dataset in a separate file, exposing
the four sources as one dataset.
'''
import h5py
from h5py._hl.vds import vmlist_to_kwawrgs
import numpy as np
# Create source files (1.h5 to 4.h5)
for n in range(1, 5):
with h5py.File('{}.h5'.format(n), 'w') as f:
d = f.create_dataset('data', (100,), 'i4')
d[:] = np.arange(100) + n
# Assemble virtual dataset
target = h5py.VirtualTarget("VDS.h5", "data", shape=(4, 100))
vm_list = []
for n in range(1, 5):
filename = "{}.h5".format(n)
vsource = h5py.VirtualSource(filename, 'data', shape=(100,))
vm = h5py.VirtualMap(vsource[0:100], target[n - 1, :], dtype='i4')
vm_list.append(vm)
# Add virtual dataset to output file
with h5py.File("VDS.h5", 'w', libver='latest') as f:
f.create_virtual_dataset(**vmlist_to_kwawrgs(vm_list, fillvalue=0))
print("Virtual dataset:")
print(f['data'][:, :10])
|
Add simple example of using VDS
|
Add simple example of using VDS
|
Python
|
bsd-3-clause
|
h5py/h5py,h5py/h5py,h5py/h5py
|
Add simple example of using VDS
|
'''A simple example of building a virtual dataset.
This makes four 'source' HDF5 files, each with a 1D dataset of 100 numbers.
Then it makes a single 4x100 virtual dataset in a separate file, exposing
the four sources as one dataset.
'''
import h5py
from h5py._hl.vds import vmlist_to_kwawrgs
import numpy as np
# Create source files (1.h5 to 4.h5)
for n in range(1, 5):
with h5py.File('{}.h5'.format(n), 'w') as f:
d = f.create_dataset('data', (100,), 'i4')
d[:] = np.arange(100) + n
# Assemble virtual dataset
target = h5py.VirtualTarget("VDS.h5", "data", shape=(4, 100))
vm_list = []
for n in range(1, 5):
filename = "{}.h5".format(n)
vsource = h5py.VirtualSource(filename, 'data', shape=(100,))
vm = h5py.VirtualMap(vsource[0:100], target[n - 1, :], dtype='i4')
vm_list.append(vm)
# Add virtual dataset to output file
with h5py.File("VDS.h5", 'w', libver='latest') as f:
f.create_virtual_dataset(**vmlist_to_kwawrgs(vm_list, fillvalue=0))
print("Virtual dataset:")
print(f['data'][:, :10])
|
<commit_before><commit_msg>Add simple example of using VDS<commit_after>
|
'''A simple example of building a virtual dataset.
This makes four 'source' HDF5 files, each with a 1D dataset of 100 numbers.
Then it makes a single 4x100 virtual dataset in a separate file, exposing
the four sources as one dataset.
'''
import h5py
from h5py._hl.vds import vmlist_to_kwawrgs
import numpy as np
# Create source files (1.h5 to 4.h5)
for n in range(1, 5):
with h5py.File('{}.h5'.format(n), 'w') as f:
d = f.create_dataset('data', (100,), 'i4')
d[:] = np.arange(100) + n
# Assemble virtual dataset
target = h5py.VirtualTarget("VDS.h5", "data", shape=(4, 100))
vm_list = []
for n in range(1, 5):
filename = "{}.h5".format(n)
vsource = h5py.VirtualSource(filename, 'data', shape=(100,))
vm = h5py.VirtualMap(vsource[0:100], target[n - 1, :], dtype='i4')
vm_list.append(vm)
# Add virtual dataset to output file
with h5py.File("VDS.h5", 'w', libver='latest') as f:
f.create_virtual_dataset(**vmlist_to_kwawrgs(vm_list, fillvalue=0))
print("Virtual dataset:")
print(f['data'][:, :10])
|
Add simple example of using VDS'''A simple example of building a virtual dataset.
This makes four 'source' HDF5 files, each with a 1D dataset of 100 numbers.
Then it makes a single 4x100 virtual dataset in a separate file, exposing
the four sources as one dataset.
'''
import h5py
from h5py._hl.vds import vmlist_to_kwawrgs
import numpy as np
# Create source files (1.h5 to 4.h5)
for n in range(1, 5):
with h5py.File('{}.h5'.format(n), 'w') as f:
d = f.create_dataset('data', (100,), 'i4')
d[:] = np.arange(100) + n
# Assemble virtual dataset
target = h5py.VirtualTarget("VDS.h5", "data", shape=(4, 100))
vm_list = []
for n in range(1, 5):
filename = "{}.h5".format(n)
vsource = h5py.VirtualSource(filename, 'data', shape=(100,))
vm = h5py.VirtualMap(vsource[0:100], target[n - 1, :], dtype='i4')
vm_list.append(vm)
# Add virtual dataset to output file
with h5py.File("VDS.h5", 'w', libver='latest') as f:
f.create_virtual_dataset(**vmlist_to_kwawrgs(vm_list, fillvalue=0))
print("Virtual dataset:")
print(f['data'][:, :10])
|
<commit_before><commit_msg>Add simple example of using VDS<commit_after>'''A simple example of building a virtual dataset.
This makes four 'source' HDF5 files, each with a 1D dataset of 100 numbers.
Then it makes a single 4x100 virtual dataset in a separate file, exposing
the four sources as one dataset.
'''
import h5py
from h5py._hl.vds import vmlist_to_kwawrgs
import numpy as np
# Create source files (1.h5 to 4.h5)
for n in range(1, 5):
with h5py.File('{}.h5'.format(n), 'w') as f:
d = f.create_dataset('data', (100,), 'i4')
d[:] = np.arange(100) + n
# Assemble virtual dataset
target = h5py.VirtualTarget("VDS.h5", "data", shape=(4, 100))
vm_list = []
for n in range(1, 5):
filename = "{}.h5".format(n)
vsource = h5py.VirtualSource(filename, 'data', shape=(100,))
vm = h5py.VirtualMap(vsource[0:100], target[n - 1, :], dtype='i4')
vm_list.append(vm)
# Add virtual dataset to output file
with h5py.File("VDS.h5", 'w', libver='latest') as f:
f.create_virtual_dataset(**vmlist_to_kwawrgs(vm_list, fillvalue=0))
print("Virtual dataset:")
print(f['data'][:, :10])
|
|
9e8bce87c10e14a1961cab1a6a0275d8008b590e
|
djangae/contrib/auth/backends.py
|
djangae/contrib/auth/backends.py
|
from django.contrib.auth.models import User, UserPermissionStorage
class AppEngineUserAPI(object):
"""
A custom Django authentication backend, which lets us authenticate against the Google
users API
"""
supports_anonymous_user = True
def authenticate(self, **credentials):
"""
Handles authentication of a user from the given credentials.
Credentials must be a combination of 'request' and 'google_user'.
If any other combination of credentials are given then we raise a TypeError, see authenticate() in django.contrib.auth.__init__.py.
"""
if len(credentials) != 2:
raise TypeError()
request = credentials.get('request', None)
google_user = credentials.get('google_user', None)
if request and google_user:
username = google_user.user_id()
email = google_user.email().lower()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User.objects.create_user(username, email)
return user
else:
raise TypeError() # Django expects to be able to pass in whatever credentials it has, and for you to raise a TypeError if they mean nothing to you
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
def _get_ups_attr(self, attr, user_obj, obj=None):
""" Collects either `all_permissions` or `group_permissions` from all matching
UserPermissionStorage objects (the specific UPS for the given row/obj
and the generic global UPS of the user).
"""
perms = []
for ups in UserPermissionStorage.get_for(user_obj, obj=obj):
perms.extend(getattr(ups, attr))
return perms
def get_group_permissions(self, user_obj, obj=None, user_perm_obj=None):
""" Returns a set of permission strings that this user has through his/her groups. """
return self._get_ups_attr('group_permissions', user_obj, obj=obj)
def get_all_permissions(self, user_obj, obj=None):
#FIXME: the caching attr should take into account the obj param!
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set(self._get_ups_attr('all_permissions', user_obj, obj=obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
return perm in self.get_all_permissions(user_obj, obj=obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
Note that in Engage we use this to check permissions on a section of the CMS,
e.g. 'content', 'agents', rather than an actual django app.
"""
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
|
Add an appengine users API auth backend
|
Add an appengine users API auth backend
|
Python
|
bsd-3-clause
|
armirusco/djangae,jscissr/djangae,stucox/djangae,stucox/djangae,leekchan/djangae,jscissr/djangae,pablorecio/djangae,asendecka/djangae,kirberich/djangae,martinogden/djangae,pablorecio/djangae,potatolondon/djangae,jscissr/djangae,chargrizzle/djangae,pablorecio/djangae,chargrizzle/djangae,trik/djangae,leekchan/djangae,grzes/djangae,chargrizzle/djangae,wangjun/djangae,leekchan/djangae,trik/djangae,SiPiggles/djangae,martinogden/djangae,martinogden/djangae,armirusco/djangae,trik/djangae,asendecka/djangae,SiPiggles/djangae,armirusco/djangae,asendecka/djangae,wangjun/djangae,b-cannon/my_djae,nealedj/djangae,grzes/djangae,nealedj/djangae,potatolondon/djangae,grzes/djangae,wangjun/djangae,kirberich/djangae,nealedj/djangae,SiPiggles/djangae,stucox/djangae,kirberich/djangae
|
Add an appengine users API auth backend
|
from django.contrib.auth.models import User, UserPermissionStorage
class AppEngineUserAPI(object):
"""
A custom Django authentication backend, which lets us authenticate against the Google
users API
"""
supports_anonymous_user = True
def authenticate(self, **credentials):
"""
Handles authentication of a user from the given credentials.
Credentials must be a combination of 'request' and 'google_user'.
If any other combination of credentials are given then we raise a TypeError, see authenticate() in django.contrib.auth.__init__.py.
"""
if len(credentials) != 2:
raise TypeError()
request = credentials.get('request', None)
google_user = credentials.get('google_user', None)
if request and google_user:
username = google_user.user_id()
email = google_user.email().lower()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User.objects.create_user(username, email)
return user
else:
raise TypeError() # Django expects to be able to pass in whatever credentials it has, and for you to raise a TypeError if they mean nothing to you
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
def _get_ups_attr(self, attr, user_obj, obj=None):
""" Collects either `all_permissions` or `group_permissions` from all matching
UserPermissionStorage objects (the specific UPS for the given row/obj
and the generic global UPS of the user).
"""
perms = []
for ups in UserPermissionStorage.get_for(user_obj, obj=obj):
perms.extend(getattr(ups, attr))
return perms
def get_group_permissions(self, user_obj, obj=None, user_perm_obj=None):
""" Returns a set of permission strings that this user has through his/her groups. """
return self._get_ups_attr('group_permissions', user_obj, obj=obj)
def get_all_permissions(self, user_obj, obj=None):
#FIXME: the caching attr should take into account the obj param!
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set(self._get_ups_attr('all_permissions', user_obj, obj=obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
return perm in self.get_all_permissions(user_obj, obj=obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
Note that in Engage we use this to check permissions on a section of the CMS,
e.g. 'content', 'agents', rather than an actual django app.
"""
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
|
<commit_before><commit_msg>Add an appengine users API auth backend<commit_after>
|
from django.contrib.auth.models import User, UserPermissionStorage
class AppEngineUserAPI(object):
"""
A custom Django authentication backend, which lets us authenticate against the Google
users API
"""
supports_anonymous_user = True
def authenticate(self, **credentials):
"""
Handles authentication of a user from the given credentials.
Credentials must be a combination of 'request' and 'google_user'.
If any other combination of credentials are given then we raise a TypeError, see authenticate() in django.contrib.auth.__init__.py.
"""
if len(credentials) != 2:
raise TypeError()
request = credentials.get('request', None)
google_user = credentials.get('google_user', None)
if request and google_user:
username = google_user.user_id()
email = google_user.email().lower()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User.objects.create_user(username, email)
return user
else:
raise TypeError() # Django expects to be able to pass in whatever credentials it has, and for you to raise a TypeError if they mean nothing to you
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
def _get_ups_attr(self, attr, user_obj, obj=None):
""" Collects either `all_permissions` or `group_permissions` from all matching
UserPermissionStorage objects (the specific UPS for the given row/obj
and the generic global UPS of the user).
"""
perms = []
for ups in UserPermissionStorage.get_for(user_obj, obj=obj):
perms.extend(getattr(ups, attr))
return perms
def get_group_permissions(self, user_obj, obj=None, user_perm_obj=None):
""" Returns a set of permission strings that this user has through his/her groups. """
return self._get_ups_attr('group_permissions', user_obj, obj=obj)
def get_all_permissions(self, user_obj, obj=None):
#FIXME: the caching attr should take into account the obj param!
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set(self._get_ups_attr('all_permissions', user_obj, obj=obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
return perm in self.get_all_permissions(user_obj, obj=obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
Note that in Engage we use this to check permissions on a section of the CMS,
e.g. 'content', 'agents', rather than an actual django app.
"""
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
|
Add an appengine users API auth backendfrom django.contrib.auth.models import User, UserPermissionStorage
class AppEngineUserAPI(object):
"""
A custom Django authentication backend, which lets us authenticate against the Google
users API
"""
supports_anonymous_user = True
def authenticate(self, **credentials):
"""
Handles authentication of a user from the given credentials.
Credentials must be a combination of 'request' and 'google_user'.
If any other combination of credentials are given then we raise a TypeError, see authenticate() in django.contrib.auth.__init__.py.
"""
if len(credentials) != 2:
raise TypeError()
request = credentials.get('request', None)
google_user = credentials.get('google_user', None)
if request and google_user:
username = google_user.user_id()
email = google_user.email().lower()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User.objects.create_user(username, email)
return user
else:
raise TypeError() # Django expects to be able to pass in whatever credentials it has, and for you to raise a TypeError if they mean nothing to you
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
def _get_ups_attr(self, attr, user_obj, obj=None):
""" Collects either `all_permissions` or `group_permissions` from all matching
UserPermissionStorage objects (the specific UPS for the given row/obj
and the generic global UPS of the user).
"""
perms = []
for ups in UserPermissionStorage.get_for(user_obj, obj=obj):
perms.extend(getattr(ups, attr))
return perms
def get_group_permissions(self, user_obj, obj=None, user_perm_obj=None):
""" Returns a set of permission strings that this user has through his/her groups. """
return self._get_ups_attr('group_permissions', user_obj, obj=obj)
def get_all_permissions(self, user_obj, obj=None):
#FIXME: the caching attr should take into account the obj param!
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set(self._get_ups_attr('all_permissions', user_obj, obj=obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
return perm in self.get_all_permissions(user_obj, obj=obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
Note that in Engage we use this to check permissions on a section of the CMS,
e.g. 'content', 'agents', rather than an actual django app.
"""
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
|
<commit_before><commit_msg>Add an appengine users API auth backend<commit_after>from django.contrib.auth.models import User, UserPermissionStorage
class AppEngineUserAPI(object):
"""
A custom Django authentication backend, which lets us authenticate against the Google
users API
"""
supports_anonymous_user = True
def authenticate(self, **credentials):
"""
Handles authentication of a user from the given credentials.
Credentials must be a combination of 'request' and 'google_user'.
If any other combination of credentials are given then we raise a TypeError, see authenticate() in django.contrib.auth.__init__.py.
"""
if len(credentials) != 2:
raise TypeError()
request = credentials.get('request', None)
google_user = credentials.get('google_user', None)
if request and google_user:
username = google_user.user_id()
email = google_user.email().lower()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User.objects.create_user(username, email)
return user
else:
raise TypeError() # Django expects to be able to pass in whatever credentials it has, and for you to raise a TypeError if they mean nothing to you
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
def _get_ups_attr(self, attr, user_obj, obj=None):
""" Collects either `all_permissions` or `group_permissions` from all matching
UserPermissionStorage objects (the specific UPS for the given row/obj
and the generic global UPS of the user).
"""
perms = []
for ups in UserPermissionStorage.get_for(user_obj, obj=obj):
perms.extend(getattr(ups, attr))
return perms
def get_group_permissions(self, user_obj, obj=None, user_perm_obj=None):
""" Returns a set of permission strings that this user has through his/her groups. """
return self._get_ups_attr('group_permissions', user_obj, obj=obj)
def get_all_permissions(self, user_obj, obj=None):
#FIXME: the caching attr should take into account the obj param!
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set(self._get_ups_attr('all_permissions', user_obj, obj=obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
return perm in self.get_all_permissions(user_obj, obj=obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
Note that in Engage we use this to check permissions on a section of the CMS,
e.g. 'content', 'agents', rather than an actual django app.
"""
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
|
|
58c55d65cc32f673a378a65772e7ae447907994e
|
test/MSVC/pch-basics.py
|
test/MSVC/pch-basics.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify PCH works to build a simple exe and a simple dll.
"""
import time
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re)
test.skip_if_not_msvc()
test.write('Main.cpp', """\
#include "Precompiled.h"
int main()
{
return testf();
}
""")
test.write('Precompiled.cpp', """\
#include "Precompiled.h"
""")
test.write('Precompiled.h', """\
#pragma once
static int testf()
{
return 0;
}
""")
test.write('SConstruct', """\
env = Environment()
env['PCHSTOP'] = 'Precompiled.h'
env['PCH'] = env.PCH('Precompiled.cpp')[0]
env.SharedLibrary('pch_dll', 'Main.cpp')
env.Program('pch_exe', 'Main.cpp')
""")
test.run(arguments='.', stderr=None)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Add a test on basic PCH behavior: bulid a simple executable and a simple shared lib
|
Add a test on basic PCH behavior: bulid a simple executable and a simple shared lib
|
Python
|
mit
|
timj/scons,timj/scons,timj/scons,andrewyoung1991/scons,andrewyoung1991/scons,timj/scons,timj/scons,timj/scons,timj/scons,andrewyoung1991/scons,andrewyoung1991/scons,timj/scons,andrewyoung1991/scons,andrewyoung1991/scons,timj/scons,andrewyoung1991/scons,andrewyoung1991/scons,andrewyoung1991/scons
|
Add a test on basic PCH behavior: bulid a simple executable and a simple shared lib
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify PCH works to build a simple exe and a simple dll.
"""
import time
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re)
test.skip_if_not_msvc()
test.write('Main.cpp', """\
#include "Precompiled.h"
int main()
{
return testf();
}
""")
test.write('Precompiled.cpp', """\
#include "Precompiled.h"
""")
test.write('Precompiled.h', """\
#pragma once
static int testf()
{
return 0;
}
""")
test.write('SConstruct', """\
env = Environment()
env['PCHSTOP'] = 'Precompiled.h'
env['PCH'] = env.PCH('Precompiled.cpp')[0]
env.SharedLibrary('pch_dll', 'Main.cpp')
env.Program('pch_exe', 'Main.cpp')
""")
test.run(arguments='.', stderr=None)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
<commit_before><commit_msg>Add a test on basic PCH behavior: bulid a simple executable and a simple shared lib<commit_after>
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify PCH works to build a simple exe and a simple dll.
"""
import time
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re)
test.skip_if_not_msvc()
test.write('Main.cpp', """\
#include "Precompiled.h"
int main()
{
return testf();
}
""")
test.write('Precompiled.cpp', """\
#include "Precompiled.h"
""")
test.write('Precompiled.h', """\
#pragma once
static int testf()
{
return 0;
}
""")
test.write('SConstruct', """\
env = Environment()
env['PCHSTOP'] = 'Precompiled.h'
env['PCH'] = env.PCH('Precompiled.cpp')[0]
env.SharedLibrary('pch_dll', 'Main.cpp')
env.Program('pch_exe', 'Main.cpp')
""")
test.run(arguments='.', stderr=None)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Add a test on basic PCH behavior: bulid a simple executable and a simple shared lib#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify PCH works to build a simple exe and a simple dll.
"""
import time
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re)
test.skip_if_not_msvc()
test.write('Main.cpp', """\
#include "Precompiled.h"
int main()
{
return testf();
}
""")
test.write('Precompiled.cpp', """\
#include "Precompiled.h"
""")
test.write('Precompiled.h', """\
#pragma once
static int testf()
{
return 0;
}
""")
test.write('SConstruct', """\
env = Environment()
env['PCHSTOP'] = 'Precompiled.h'
env['PCH'] = env.PCH('Precompiled.cpp')[0]
env.SharedLibrary('pch_dll', 'Main.cpp')
env.Program('pch_exe', 'Main.cpp')
""")
test.run(arguments='.', stderr=None)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
<commit_before><commit_msg>Add a test on basic PCH behavior: bulid a simple executable and a simple shared lib<commit_after>#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify PCH works to build a simple exe and a simple dll.
"""
import time
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re)
test.skip_if_not_msvc()
test.write('Main.cpp', """\
#include "Precompiled.h"
int main()
{
return testf();
}
""")
test.write('Precompiled.cpp', """\
#include "Precompiled.h"
""")
test.write('Precompiled.h', """\
#pragma once
static int testf()
{
return 0;
}
""")
test.write('SConstruct', """\
env = Environment()
env['PCHSTOP'] = 'Precompiled.h'
env['PCH'] = env.PCH('Precompiled.cpp')[0]
env.SharedLibrary('pch_dll', 'Main.cpp')
env.Program('pch_exe', 'Main.cpp')
""")
test.run(arguments='.', stderr=None)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
27d975bf84122ec62f96ddae4777e177d562bf7e
|
thinc/extra/load_nlp.py
|
thinc/extra/load_nlp.py
|
import spacy
SPACY_MODELS = {}
def get_spacy(lang, parser=False, tagger=False, entity=False):
global SPACY_MODELS
if spacy is None:
raise ImportError("Could not import spacy. Is it installed?")
if lang not in SPACY_MODELS:
SPACY_MODELS[lang] = spacy.load(
lang, parser=parser, tagger=tagger, entity=entity)
return SPACY_MODELS[lang]
|
Add loader for spaCy, with singleton
|
Add loader for spaCy, with singleton
|
Python
|
mit
|
spacy-io/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc
|
Add loader for spaCy, with singleton
|
import spacy
SPACY_MODELS = {}
def get_spacy(lang, parser=False, tagger=False, entity=False):
global SPACY_MODELS
if spacy is None:
raise ImportError("Could not import spacy. Is it installed?")
if lang not in SPACY_MODELS:
SPACY_MODELS[lang] = spacy.load(
lang, parser=parser, tagger=tagger, entity=entity)
return SPACY_MODELS[lang]
|
<commit_before><commit_msg>Add loader for spaCy, with singleton<commit_after>
|
import spacy
SPACY_MODELS = {}
def get_spacy(lang, parser=False, tagger=False, entity=False):
global SPACY_MODELS
if spacy is None:
raise ImportError("Could not import spacy. Is it installed?")
if lang not in SPACY_MODELS:
SPACY_MODELS[lang] = spacy.load(
lang, parser=parser, tagger=tagger, entity=entity)
return SPACY_MODELS[lang]
|
Add loader for spaCy, with singletonimport spacy
SPACY_MODELS = {}
def get_spacy(lang, parser=False, tagger=False, entity=False):
global SPACY_MODELS
if spacy is None:
raise ImportError("Could not import spacy. Is it installed?")
if lang not in SPACY_MODELS:
SPACY_MODELS[lang] = spacy.load(
lang, parser=parser, tagger=tagger, entity=entity)
return SPACY_MODELS[lang]
|
<commit_before><commit_msg>Add loader for spaCy, with singleton<commit_after>import spacy
SPACY_MODELS = {}
def get_spacy(lang, parser=False, tagger=False, entity=False):
global SPACY_MODELS
if spacy is None:
raise ImportError("Could not import spacy. Is it installed?")
if lang not in SPACY_MODELS:
SPACY_MODELS[lang] = spacy.load(
lang, parser=parser, tagger=tagger, entity=entity)
return SPACY_MODELS[lang]
|
|
31a42391445846cf2c8c4ace5319df92df8e5e96
|
plugins/vars/default_vars.py
|
plugins/vars/default_vars.py
|
# -*- coding: utf-8 -*-
# (c) 2014, Craig Tracey <craigtracey@gmail.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import collections
import yaml
from ansible.constants import DEFAULTS, get_config, load_config_file
from ansible.inventory.vars_plugins.group_vars import VarsModule \
as GroupVarsModule
def deep_update_dict(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = deep_update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
class VarsModule(GroupVarsModule):
def init(self, inventory):
super(VarsModule, self).__init__(inventory)
def _get_defaults(self):
p = load_config_file()
defaults_file = get_config(p, DEFAULTS, 'var_defaults_file',
'ANSIBLE_VAR_DEFAULTS_FILE', None)
if defaults_file:
return yaml.load(open(defaults_file))
def run(self, host):
default_vars = self._get_defaults()
group_vars = super(VarsModule, self).run(host)
if default_vars:
return deep_update_dict(default_vars, group_vars)
return group_vars
|
Add a default variable plugin
|
Add a default variable plugin
As it stands right now, there are a number of scenarios where we would
like to be more DRY. We have multiple environments that we support and
copying and pasting variables from one environment to the next is a
recipe for disaster. Therefore provide an optional group_vars wrapper
plugin.
|
Python
|
mit
|
nirajdp76/ursula,narengan/ursula,pbannister/ursula,nirajdp76/ursula,pgraziano/ursula,davidcusatis/ursula,edtubillara/ursula,j2sol/ursula,MaheshIBM/ursula,masteinhauser/ursula,knandya/ursula,blueboxjesse/ursula,twaldrop/ursula,sivakom/ursula,nirajdp76/ursula,blueboxgroup/ursula,greghaynes/ursula,rongzhus/ursula,blueboxjesse/ursula,twaldrop/ursula,masteinhauser/ursula,jwaibel/ursula,j2sol/ursula,pgraziano/ursula,rongzhus/ursula,edtubillara/ursula,masteinhauser/ursula,pgraziano/ursula,EricCrosson/ursula,msambol/ursula,allomov/ursula,persistent-ursula/ursula,nirajdp76/ursula,ddaskal/ursula,narengan/ursula,greghaynes/ursula,wupeiran/ursula,jwaibel/ursula,ryshah/ursula,pbannister/ursula,msambol/ursula,blueboxgroup/ursula,twaldrop/ursula,j2sol/ursula,panxia6679/ursula,jwaibel/ursula,lihkin213/ursula,wupeiran/ursula,zrs233/ursula,davidcusatis/ursula,mjbrewer/ursula,channus/ursula,MaheshIBM/ursula,ryshah/ursula,fancyhe/ursula,narengan/ursula,pgraziano/ursula,aldevigi/ursula,kennjason/ursula,ddaskal/ursula,andrewrothstein/ursula,dlundquist/ursula,mjbrewer/ursula,andrewrothstein/ursula,wupeiran/ursula,blueboxgroup/ursula,fancyhe/ursula,zrs233/ursula,ddaskal/ursula,edtubillara/ursula,lihkin213/ursula,knandya/ursula,zrs233/ursula,blueboxjesse/ursula,panxia6679/ursula,wupeiran/ursula,fancyhe/ursula,davidcusatis/ursula,masteinhauser/ursula,channus/ursula,EricCrosson/ursula,paulczar/ursula,msambol/ursula,paulczar/ursula,aldevigi/ursula,dlundquist/ursula,channus/ursula,blueboxgroup/ursula,fancyhe/ursula,allomov/ursula,sivakom/ursula,narengan/ursula,lihkin213/ursula,allomov/ursula,panxia6679/ursula,blueboxjesse/ursula,paulczar/ursula,knandya/ursula,knandya/ursula,dlundquist/ursula,persistent-ursula/ursula,rongzhus/ursula,twaldrop/ursula,ryshah/ursula,MaheshIBM/ursula,j2sol/ursula,channus/ursula,edtubillara/ursula,zrs233/ursula,pbannister/ursula,EricCrosson/ursula,ryshah/ursula,lihkin213/ursula,kennjason/ursula,persistent-ursula/ursula,rongzhus/ursula,ddaskal/ursula,persistent-ursula/ursula,mjbrewer/ursula,aldevigi/ursula,andrewrothstein/ursula,panxia6679/ursula,sivakom/ursula,kennjason/ursula,greghaynes/ursula
|
Add a default variable plugin
As it stands right now, there are a number of scenarios where we would
like to be more DRY. We have multiple environments that we support and
copying and pasting variables from one environment to the next is a
recipe for disaster. Therefore provide an optional group_vars wrapper
plugin.
|
# -*- coding: utf-8 -*-
# (c) 2014, Craig Tracey <craigtracey@gmail.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import collections
import yaml
from ansible.constants import DEFAULTS, get_config, load_config_file
from ansible.inventory.vars_plugins.group_vars import VarsModule \
as GroupVarsModule
def deep_update_dict(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = deep_update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
class VarsModule(GroupVarsModule):
def init(self, inventory):
super(VarsModule, self).__init__(inventory)
def _get_defaults(self):
p = load_config_file()
defaults_file = get_config(p, DEFAULTS, 'var_defaults_file',
'ANSIBLE_VAR_DEFAULTS_FILE', None)
if defaults_file:
return yaml.load(open(defaults_file))
def run(self, host):
default_vars = self._get_defaults()
group_vars = super(VarsModule, self).run(host)
if default_vars:
return deep_update_dict(default_vars, group_vars)
return group_vars
|
<commit_before><commit_msg>Add a default variable plugin
As it stands right now, there are a number of scenarios where we would
like to be more DRY. We have multiple environments that we support and
copying and pasting variables from one environment to the next is a
recipe for disaster. Therefore provide an optional group_vars wrapper
plugin.<commit_after>
|
# -*- coding: utf-8 -*-
# (c) 2014, Craig Tracey <craigtracey@gmail.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import collections
import yaml
from ansible.constants import DEFAULTS, get_config, load_config_file
from ansible.inventory.vars_plugins.group_vars import VarsModule \
as GroupVarsModule
def deep_update_dict(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = deep_update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
class VarsModule(GroupVarsModule):
def init(self, inventory):
super(VarsModule, self).__init__(inventory)
def _get_defaults(self):
p = load_config_file()
defaults_file = get_config(p, DEFAULTS, 'var_defaults_file',
'ANSIBLE_VAR_DEFAULTS_FILE', None)
if defaults_file:
return yaml.load(open(defaults_file))
def run(self, host):
default_vars = self._get_defaults()
group_vars = super(VarsModule, self).run(host)
if default_vars:
return deep_update_dict(default_vars, group_vars)
return group_vars
|
Add a default variable plugin
As it stands right now, there are a number of scenarios where we would
like to be more DRY. We have multiple environments that we support and
copying and pasting variables from one environment to the next is a
recipe for disaster. Therefore provide an optional group_vars wrapper
plugin.# -*- coding: utf-8 -*-
# (c) 2014, Craig Tracey <craigtracey@gmail.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import collections
import yaml
from ansible.constants import DEFAULTS, get_config, load_config_file
from ansible.inventory.vars_plugins.group_vars import VarsModule \
as GroupVarsModule
def deep_update_dict(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = deep_update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
class VarsModule(GroupVarsModule):
def init(self, inventory):
super(VarsModule, self).__init__(inventory)
def _get_defaults(self):
p = load_config_file()
defaults_file = get_config(p, DEFAULTS, 'var_defaults_file',
'ANSIBLE_VAR_DEFAULTS_FILE', None)
if defaults_file:
return yaml.load(open(defaults_file))
def run(self, host):
default_vars = self._get_defaults()
group_vars = super(VarsModule, self).run(host)
if default_vars:
return deep_update_dict(default_vars, group_vars)
return group_vars
|
<commit_before><commit_msg>Add a default variable plugin
As it stands right now, there are a number of scenarios where we would
like to be more DRY. We have multiple environments that we support and
copying and pasting variables from one environment to the next is a
recipe for disaster. Therefore provide an optional group_vars wrapper
plugin.<commit_after># -*- coding: utf-8 -*-
# (c) 2014, Craig Tracey <craigtracey@gmail.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import collections
import yaml
from ansible.constants import DEFAULTS, get_config, load_config_file
from ansible.inventory.vars_plugins.group_vars import VarsModule \
as GroupVarsModule
def deep_update_dict(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = deep_update_dict(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
class VarsModule(GroupVarsModule):
def init(self, inventory):
super(VarsModule, self).__init__(inventory)
def _get_defaults(self):
p = load_config_file()
defaults_file = get_config(p, DEFAULTS, 'var_defaults_file',
'ANSIBLE_VAR_DEFAULTS_FILE', None)
if defaults_file:
return yaml.load(open(defaults_file))
def run(self, host):
default_vars = self._get_defaults()
group_vars = super(VarsModule, self).run(host)
if default_vars:
return deep_update_dict(default_vars, group_vars)
return group_vars
|
|
79dc92f7089e26f7751fdd8ef53543922750e41d
|
jarn/mkrelease/tests/test_scp.py
|
jarn/mkrelease/tests/test_scp.py
|
import unittest
from jarn.mkrelease.scp import SCP
class HasHostTests(unittest.TestCase):
def test_simple(self):
scp = SCP()
self.assertEqual(scp.has_host('foo:bar'), True)
def test_slash(self):
scp = SCP()
self.assertEqual(scp.has_host('foo:bar/baz'), True)
def test_no_colon(self):
scp = SCP()
self.assertEqual(scp.has_host('foo/bar/baz'), False)
def test_slash_before_colon(self):
scp = SCP()
self.assertEqual(scp.has_host('foo/bar:baz'), False)
class JoinTests(unittest.TestCase):
def test_simple(self):
scp = SCP()
self.assertEqual(scp.join('foo:', 'bar'), 'foo:bar')
def test_slash(self):
scp = SCP()
self.assertEqual(scp.join('foo:/', 'bar'), 'foo:/bar')
def test_inserted_slash(self):
scp = SCP()
self.assertEqual(scp.join('foo:/bar', 'baz'), 'foo:/bar/baz')
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
Add tests for SCP operations.
|
Add tests for SCP operations.
|
Python
|
bsd-2-clause
|
Jarn/jarn.mkrelease
|
Add tests for SCP operations.
|
import unittest
from jarn.mkrelease.scp import SCP
class HasHostTests(unittest.TestCase):
def test_simple(self):
scp = SCP()
self.assertEqual(scp.has_host('foo:bar'), True)
def test_slash(self):
scp = SCP()
self.assertEqual(scp.has_host('foo:bar/baz'), True)
def test_no_colon(self):
scp = SCP()
self.assertEqual(scp.has_host('foo/bar/baz'), False)
def test_slash_before_colon(self):
scp = SCP()
self.assertEqual(scp.has_host('foo/bar:baz'), False)
class JoinTests(unittest.TestCase):
def test_simple(self):
scp = SCP()
self.assertEqual(scp.join('foo:', 'bar'), 'foo:bar')
def test_slash(self):
scp = SCP()
self.assertEqual(scp.join('foo:/', 'bar'), 'foo:/bar')
def test_inserted_slash(self):
scp = SCP()
self.assertEqual(scp.join('foo:/bar', 'baz'), 'foo:/bar/baz')
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
<commit_before><commit_msg>Add tests for SCP operations.<commit_after>
|
import unittest
from jarn.mkrelease.scp import SCP
class HasHostTests(unittest.TestCase):
def test_simple(self):
scp = SCP()
self.assertEqual(scp.has_host('foo:bar'), True)
def test_slash(self):
scp = SCP()
self.assertEqual(scp.has_host('foo:bar/baz'), True)
def test_no_colon(self):
scp = SCP()
self.assertEqual(scp.has_host('foo/bar/baz'), False)
def test_slash_before_colon(self):
scp = SCP()
self.assertEqual(scp.has_host('foo/bar:baz'), False)
class JoinTests(unittest.TestCase):
def test_simple(self):
scp = SCP()
self.assertEqual(scp.join('foo:', 'bar'), 'foo:bar')
def test_slash(self):
scp = SCP()
self.assertEqual(scp.join('foo:/', 'bar'), 'foo:/bar')
def test_inserted_slash(self):
scp = SCP()
self.assertEqual(scp.join('foo:/bar', 'baz'), 'foo:/bar/baz')
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
Add tests for SCP operations.import unittest
from jarn.mkrelease.scp import SCP
class HasHostTests(unittest.TestCase):
def test_simple(self):
scp = SCP()
self.assertEqual(scp.has_host('foo:bar'), True)
def test_slash(self):
scp = SCP()
self.assertEqual(scp.has_host('foo:bar/baz'), True)
def test_no_colon(self):
scp = SCP()
self.assertEqual(scp.has_host('foo/bar/baz'), False)
def test_slash_before_colon(self):
scp = SCP()
self.assertEqual(scp.has_host('foo/bar:baz'), False)
class JoinTests(unittest.TestCase):
def test_simple(self):
scp = SCP()
self.assertEqual(scp.join('foo:', 'bar'), 'foo:bar')
def test_slash(self):
scp = SCP()
self.assertEqual(scp.join('foo:/', 'bar'), 'foo:/bar')
def test_inserted_slash(self):
scp = SCP()
self.assertEqual(scp.join('foo:/bar', 'baz'), 'foo:/bar/baz')
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
<commit_before><commit_msg>Add tests for SCP operations.<commit_after>import unittest
from jarn.mkrelease.scp import SCP
class HasHostTests(unittest.TestCase):
def test_simple(self):
scp = SCP()
self.assertEqual(scp.has_host('foo:bar'), True)
def test_slash(self):
scp = SCP()
self.assertEqual(scp.has_host('foo:bar/baz'), True)
def test_no_colon(self):
scp = SCP()
self.assertEqual(scp.has_host('foo/bar/baz'), False)
def test_slash_before_colon(self):
scp = SCP()
self.assertEqual(scp.has_host('foo/bar:baz'), False)
class JoinTests(unittest.TestCase):
def test_simple(self):
scp = SCP()
self.assertEqual(scp.join('foo:', 'bar'), 'foo:bar')
def test_slash(self):
scp = SCP()
self.assertEqual(scp.join('foo:/', 'bar'), 'foo:/bar')
def test_inserted_slash(self):
scp = SCP()
self.assertEqual(scp.join('foo:/bar', 'baz'), 'foo:/bar/baz')
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
|
da1a7ceb0dafdeb14b366139e0ec13fe95c55c00
|
scripts/generate_manifest.py
|
scripts/generate_manifest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import yaml
def merge_dicts(a, b):
if not (isinstance(a, dict) and isinstance(b, dict)):
raise ValueError("Error merging variables: '{}' and '{}'".format(
type(a).__name__, type(b).__name__
))
result = a.copy()
for key, val in b.items():
if isinstance(result.get(key), dict):
result[key] = merge_dicts(a[key], b[key])
else:
result[key] = val
return result
def load_manifest(manifest_file):
with open(manifest_file) as f:
manifest = yaml.load(f)
if 'inherit' in manifest:
inherit_file = os.path.join(os.path.dirname(manifest_file), manifest.pop('inherit'))
manifest = merge_dicts(load_manifest(inherit_file), manifest)
return manifest
def load_variables(vars_files):
variables = {}
for vars_file in vars_files:
with open(vars_file) as f:
variables = merge_dicts(variables, json.load(f))
return {
k.upper(): json.dumps(v) if isinstance(v, (dict, list)) else v
for k, v in variables.items()
}
def paas_manifest(manifest_file, *vars_files):
"""Generate a PaaS manifest file from a Jinja2 template"""
manifest = load_manifest(manifest_file)
variables = load_variables(vars_files)
manifest['env'].update(variables)
return yaml.dump(manifest, default_flow_style=False, allow_unicode=True)
if __name__ == "__main__":
print(paas_manifest(*sys.argv[1:]))
|
Add a script to generate PaaS manifest with environment variables
|
Add a script to generate PaaS manifest with environment variables
`./scripts/generate_manifest.py` takes a path to a PaaS manifest file
and a list of variable files and prints a single CloudFoundry manifest.
The generated manifest replaces all `inherit` keys by loading the data
from parent manifests. This allows us to pipe the script output directly
to CF CLI, without saving it to disk, which minimises the risk of it
being accidentally included in the deployment artefact. The combined
manifest might differ from the results produced by CF CLI itself, so
the original manifest shouldn't normally be used on its own.
After combining the manifests the script will load and parse all listed
variable files and add them to the manifest's `env` by merging the files
together in the order they were listed (so in case of any key conflicts
the latest file overwrites previous values), upper-casing keys and
processing any list or dictionary values with `json.dumps`, so that they
can be set as environment variables.
This gives us a full list of environment variables that were previously
parsed from the CloudFoundry user services data.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add a script to generate PaaS manifest with environment variables
`./scripts/generate_manifest.py` takes a path to a PaaS manifest file
and a list of variable files and prints a single CloudFoundry manifest.
The generated manifest replaces all `inherit` keys by loading the data
from parent manifests. This allows us to pipe the script output directly
to CF CLI, without saving it to disk, which minimises the risk of it
being accidentally included in the deployment artefact. The combined
manifest might differ from the results produced by CF CLI itself, so
the original manifest shouldn't normally be used on its own.
After combining the manifests the script will load and parse all listed
variable files and add them to the manifest's `env` by merging the files
together in the order they were listed (so in case of any key conflicts
the latest file overwrites previous values), upper-casing keys and
processing any list or dictionary values with `json.dumps`, so that they
can be set as environment variables.
This gives us a full list of environment variables that were previously
parsed from the CloudFoundry user services data.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import yaml
def merge_dicts(a, b):
if not (isinstance(a, dict) and isinstance(b, dict)):
raise ValueError("Error merging variables: '{}' and '{}'".format(
type(a).__name__, type(b).__name__
))
result = a.copy()
for key, val in b.items():
if isinstance(result.get(key), dict):
result[key] = merge_dicts(a[key], b[key])
else:
result[key] = val
return result
def load_manifest(manifest_file):
with open(manifest_file) as f:
manifest = yaml.load(f)
if 'inherit' in manifest:
inherit_file = os.path.join(os.path.dirname(manifest_file), manifest.pop('inherit'))
manifest = merge_dicts(load_manifest(inherit_file), manifest)
return manifest
def load_variables(vars_files):
variables = {}
for vars_file in vars_files:
with open(vars_file) as f:
variables = merge_dicts(variables, json.load(f))
return {
k.upper(): json.dumps(v) if isinstance(v, (dict, list)) else v
for k, v in variables.items()
}
def paas_manifest(manifest_file, *vars_files):
"""Generate a PaaS manifest file from a Jinja2 template"""
manifest = load_manifest(manifest_file)
variables = load_variables(vars_files)
manifest['env'].update(variables)
return yaml.dump(manifest, default_flow_style=False, allow_unicode=True)
if __name__ == "__main__":
print(paas_manifest(*sys.argv[1:]))
|
<commit_before><commit_msg>Add a script to generate PaaS manifest with environment variables
`./scripts/generate_manifest.py` takes a path to a PaaS manifest file
and a list of variable files and prints a single CloudFoundry manifest.
The generated manifest replaces all `inherit` keys by loading the data
from parent manifests. This allows us to pipe the script output directly
to CF CLI, without saving it to disk, which minimises the risk of it
being accidentally included in the deployment artefact. The combined
manifest might differ from the results produced by CF CLI itself, so
the original manifest shouldn't normally be used on its own.
After combining the manifests the script will load and parse all listed
variable files and add them to the manifest's `env` by merging the files
together in the order they were listed (so in case of any key conflicts
the latest file overwrites previous values), upper-casing keys and
processing any list or dictionary values with `json.dumps`, so that they
can be set as environment variables.
This gives us a full list of environment variables that were previously
parsed from the CloudFoundry user services data.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import yaml
def merge_dicts(a, b):
if not (isinstance(a, dict) and isinstance(b, dict)):
raise ValueError("Error merging variables: '{}' and '{}'".format(
type(a).__name__, type(b).__name__
))
result = a.copy()
for key, val in b.items():
if isinstance(result.get(key), dict):
result[key] = merge_dicts(a[key], b[key])
else:
result[key] = val
return result
def load_manifest(manifest_file):
with open(manifest_file) as f:
manifest = yaml.load(f)
if 'inherit' in manifest:
inherit_file = os.path.join(os.path.dirname(manifest_file), manifest.pop('inherit'))
manifest = merge_dicts(load_manifest(inherit_file), manifest)
return manifest
def load_variables(vars_files):
variables = {}
for vars_file in vars_files:
with open(vars_file) as f:
variables = merge_dicts(variables, json.load(f))
return {
k.upper(): json.dumps(v) if isinstance(v, (dict, list)) else v
for k, v in variables.items()
}
def paas_manifest(manifest_file, *vars_files):
"""Generate a PaaS manifest file from a Jinja2 template"""
manifest = load_manifest(manifest_file)
variables = load_variables(vars_files)
manifest['env'].update(variables)
return yaml.dump(manifest, default_flow_style=False, allow_unicode=True)
if __name__ == "__main__":
print(paas_manifest(*sys.argv[1:]))
|
Add a script to generate PaaS manifest with environment variables
`./scripts/generate_manifest.py` takes a path to a PaaS manifest file
and a list of variable files and prints a single CloudFoundry manifest.
The generated manifest replaces all `inherit` keys by loading the data
from parent manifests. This allows us to pipe the script output directly
to CF CLI, without saving it to disk, which minimises the risk of it
being accidentally included in the deployment artefact. The combined
manifest might differ from the results produced by CF CLI itself, so
the original manifest shouldn't normally be used on its own.
After combining the manifests the script will load and parse all listed
variable files and add them to the manifest's `env` by merging the files
together in the order they were listed (so in case of any key conflicts
the latest file overwrites previous values), upper-casing keys and
processing any list or dictionary values with `json.dumps`, so that they
can be set as environment variables.
This gives us a full list of environment variables that were previously
parsed from the CloudFoundry user services data.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import yaml
def merge_dicts(a, b):
if not (isinstance(a, dict) and isinstance(b, dict)):
raise ValueError("Error merging variables: '{}' and '{}'".format(
type(a).__name__, type(b).__name__
))
result = a.copy()
for key, val in b.items():
if isinstance(result.get(key), dict):
result[key] = merge_dicts(a[key], b[key])
else:
result[key] = val
return result
def load_manifest(manifest_file):
with open(manifest_file) as f:
manifest = yaml.load(f)
if 'inherit' in manifest:
inherit_file = os.path.join(os.path.dirname(manifest_file), manifest.pop('inherit'))
manifest = merge_dicts(load_manifest(inherit_file), manifest)
return manifest
def load_variables(vars_files):
variables = {}
for vars_file in vars_files:
with open(vars_file) as f:
variables = merge_dicts(variables, json.load(f))
return {
k.upper(): json.dumps(v) if isinstance(v, (dict, list)) else v
for k, v in variables.items()
}
def paas_manifest(manifest_file, *vars_files):
"""Generate a PaaS manifest file from a Jinja2 template"""
manifest = load_manifest(manifest_file)
variables = load_variables(vars_files)
manifest['env'].update(variables)
return yaml.dump(manifest, default_flow_style=False, allow_unicode=True)
if __name__ == "__main__":
print(paas_manifest(*sys.argv[1:]))
|
<commit_before><commit_msg>Add a script to generate PaaS manifest with environment variables
`./scripts/generate_manifest.py` takes a path to a PaaS manifest file
and a list of variable files and prints a single CloudFoundry manifest.
The generated manifest replaces all `inherit` keys by loading the data
from parent manifests. This allows us to pipe the script output directly
to CF CLI, without saving it to disk, which minimises the risk of it
being accidentally included in the deployment artefact. The combined
manifest might differ from the results produced by CF CLI itself, so
the original manifest shouldn't normally be used on its own.
After combining the manifests the script will load and parse all listed
variable files and add them to the manifest's `env` by merging the files
together in the order they were listed (so in case of any key conflicts
the latest file overwrites previous values), upper-casing keys and
processing any list or dictionary values with `json.dumps`, so that they
can be set as environment variables.
This gives us a full list of environment variables that were previously
parsed from the CloudFoundry user services data.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import yaml
def merge_dicts(a, b):
if not (isinstance(a, dict) and isinstance(b, dict)):
raise ValueError("Error merging variables: '{}' and '{}'".format(
type(a).__name__, type(b).__name__
))
result = a.copy()
for key, val in b.items():
if isinstance(result.get(key), dict):
result[key] = merge_dicts(a[key], b[key])
else:
result[key] = val
return result
def load_manifest(manifest_file):
with open(manifest_file) as f:
manifest = yaml.load(f)
if 'inherit' in manifest:
inherit_file = os.path.join(os.path.dirname(manifest_file), manifest.pop('inherit'))
manifest = merge_dicts(load_manifest(inherit_file), manifest)
return manifest
def load_variables(vars_files):
variables = {}
for vars_file in vars_files:
with open(vars_file) as f:
variables = merge_dicts(variables, json.load(f))
return {
k.upper(): json.dumps(v) if isinstance(v, (dict, list)) else v
for k, v in variables.items()
}
def paas_manifest(manifest_file, *vars_files):
"""Generate a PaaS manifest file from a Jinja2 template"""
manifest = load_manifest(manifest_file)
variables = load_variables(vars_files)
manifest['env'].update(variables)
return yaml.dump(manifest, default_flow_style=False, allow_unicode=True)
if __name__ == "__main__":
print(paas_manifest(*sys.argv[1:]))
|
|
b1af7e4d1a6ee0b954406af5a65936a011db3269
|
openstack/common/fixture/mockpatch.py
|
openstack/common/fixture/mockpatch.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
class PatchObject(fixtures.Fixture):
"""Deal with code around mock."""
def __init__(self, obj, attr, **kwargs):
self.obj = obj
self.attr = attr
self.kwargs = kwargs
def setUp(self):
super(PatchObject, self).setUp()
_p = mock.patch.object(self.obj, self.attr, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
class Patch(fixtures.Fixture):
"""Deal with code around mock.patch."""
def __init__(self, obj, **kwargs):
self.obj = obj
self.kwargs = kwargs
def setUp(self):
super(Patch, self).setUp()
_p = mock.patch(self.obj, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
|
Add a fixture for dealing with mock patching.
|
Add a fixture for dealing with mock patching.
Quantum uses a pattern all over their test base, which can be
collapsed down into a simple re-usable fixture.
Change-Id: I5944505ce44ce8b79a685c3ea392f001307b5319
|
Python
|
apache-2.0
|
openstack/oslotest,openstack/oslotest
|
Add a fixture for dealing with mock patching.
Quantum uses a pattern all over their test base, which can be
collapsed down into a simple re-usable fixture.
Change-Id: I5944505ce44ce8b79a685c3ea392f001307b5319
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
class PatchObject(fixtures.Fixture):
"""Deal with code around mock."""
def __init__(self, obj, attr, **kwargs):
self.obj = obj
self.attr = attr
self.kwargs = kwargs
def setUp(self):
super(PatchObject, self).setUp()
_p = mock.patch.object(self.obj, self.attr, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
class Patch(fixtures.Fixture):
"""Deal with code around mock.patch."""
def __init__(self, obj, **kwargs):
self.obj = obj
self.kwargs = kwargs
def setUp(self):
super(Patch, self).setUp()
_p = mock.patch(self.obj, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
|
<commit_before><commit_msg>Add a fixture for dealing with mock patching.
Quantum uses a pattern all over their test base, which can be
collapsed down into a simple re-usable fixture.
Change-Id: I5944505ce44ce8b79a685c3ea392f001307b5319<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
class PatchObject(fixtures.Fixture):
"""Deal with code around mock."""
def __init__(self, obj, attr, **kwargs):
self.obj = obj
self.attr = attr
self.kwargs = kwargs
def setUp(self):
super(PatchObject, self).setUp()
_p = mock.patch.object(self.obj, self.attr, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
class Patch(fixtures.Fixture):
"""Deal with code around mock.patch."""
def __init__(self, obj, **kwargs):
self.obj = obj
self.kwargs = kwargs
def setUp(self):
super(Patch, self).setUp()
_p = mock.patch(self.obj, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
|
Add a fixture for dealing with mock patching.
Quantum uses a pattern all over their test base, which can be
collapsed down into a simple re-usable fixture.
Change-Id: I5944505ce44ce8b79a685c3ea392f001307b5319# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
class PatchObject(fixtures.Fixture):
"""Deal with code around mock."""
def __init__(self, obj, attr, **kwargs):
self.obj = obj
self.attr = attr
self.kwargs = kwargs
def setUp(self):
super(PatchObject, self).setUp()
_p = mock.patch.object(self.obj, self.attr, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
class Patch(fixtures.Fixture):
"""Deal with code around mock.patch."""
def __init__(self, obj, **kwargs):
self.obj = obj
self.kwargs = kwargs
def setUp(self):
super(Patch, self).setUp()
_p = mock.patch(self.obj, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
|
<commit_before><commit_msg>Add a fixture for dealing with mock patching.
Quantum uses a pattern all over their test base, which can be
collapsed down into a simple re-usable fixture.
Change-Id: I5944505ce44ce8b79a685c3ea392f001307b5319<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
class PatchObject(fixtures.Fixture):
"""Deal with code around mock."""
def __init__(self, obj, attr, **kwargs):
self.obj = obj
self.attr = attr
self.kwargs = kwargs
def setUp(self):
super(PatchObject, self).setUp()
_p = mock.patch.object(self.obj, self.attr, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
class Patch(fixtures.Fixture):
"""Deal with code around mock.patch."""
def __init__(self, obj, **kwargs):
self.obj = obj
self.kwargs = kwargs
def setUp(self):
super(Patch, self).setUp()
_p = mock.patch(self.obj, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
|
|
d70f973b8b77ad30f6c6d893adda90d2a084d7bb
|
scripts/mc_monitor_correlator.py
|
scripts/mc_monitor_correlator.py
|
#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2018 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Gather correlator status info and log them into M&C
"""
from __future__ import absolute_import, division, print_function
import sqlalchemy.exc
import sys
import time
import traceback
from hera_mc import mc
MONITORING_INTERVAL = 60 # seconds
parser = mc.get_mc_argument_parser()
args = parser.parse_args()
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
try:
while True:
time.sleep(MONITORING_INTERVAL)
try:
session.add_correlator_control_state_from_corrcm()
except Exception as e:
print('%s -- error adding correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
continue
try:
session.commit()
except sqlalchemy.exc.SQLAlchemyError as e:
print('%s -- SQL error committing new correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
session.rollback()
continue
except Exception as e:
print('%s -- error committing new correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
continue
except KeyboardInterrupt:
print("exiting on SIGINT")
sys.exit()
|
Add monitoring daemon for correlator control state
|
Add monitoring daemon for correlator control state
|
Python
|
bsd-2-clause
|
HERA-Team/hera_mc,HERA-Team/Monitor_and_Control,HERA-Team/hera_mc
|
Add monitoring daemon for correlator control state
|
#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2018 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Gather correlator status info and log them into M&C
"""
from __future__ import absolute_import, division, print_function
import sqlalchemy.exc
import sys
import time
import traceback
from hera_mc import mc
MONITORING_INTERVAL = 60 # seconds
parser = mc.get_mc_argument_parser()
args = parser.parse_args()
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
try:
while True:
time.sleep(MONITORING_INTERVAL)
try:
session.add_correlator_control_state_from_corrcm()
except Exception as e:
print('%s -- error adding correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
continue
try:
session.commit()
except sqlalchemy.exc.SQLAlchemyError as e:
print('%s -- SQL error committing new correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
session.rollback()
continue
except Exception as e:
print('%s -- error committing new correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
continue
except KeyboardInterrupt:
print("exiting on SIGINT")
sys.exit()
|
<commit_before><commit_msg>Add monitoring daemon for correlator control state<commit_after>
|
#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2018 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Gather correlator status info and log them into M&C
"""
from __future__ import absolute_import, division, print_function
import sqlalchemy.exc
import sys
import time
import traceback
from hera_mc import mc
MONITORING_INTERVAL = 60 # seconds
parser = mc.get_mc_argument_parser()
args = parser.parse_args()
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
try:
while True:
time.sleep(MONITORING_INTERVAL)
try:
session.add_correlator_control_state_from_corrcm()
except Exception as e:
print('%s -- error adding correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
continue
try:
session.commit()
except sqlalchemy.exc.SQLAlchemyError as e:
print('%s -- SQL error committing new correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
session.rollback()
continue
except Exception as e:
print('%s -- error committing new correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
continue
except KeyboardInterrupt:
print("exiting on SIGINT")
sys.exit()
|
Add monitoring daemon for correlator control state#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2018 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Gather correlator status info and log them into M&C
"""
from __future__ import absolute_import, division, print_function
import sqlalchemy.exc
import sys
import time
import traceback
from hera_mc import mc
MONITORING_INTERVAL = 60 # seconds
parser = mc.get_mc_argument_parser()
args = parser.parse_args()
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
try:
while True:
time.sleep(MONITORING_INTERVAL)
try:
session.add_correlator_control_state_from_corrcm()
except Exception as e:
print('%s -- error adding correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
continue
try:
session.commit()
except sqlalchemy.exc.SQLAlchemyError as e:
print('%s -- SQL error committing new correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
session.rollback()
continue
except Exception as e:
print('%s -- error committing new correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
continue
except KeyboardInterrupt:
print("exiting on SIGINT")
sys.exit()
|
<commit_before><commit_msg>Add monitoring daemon for correlator control state<commit_after>#! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2018 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Gather correlator status info and log them into M&C
"""
from __future__ import absolute_import, division, print_function
import sqlalchemy.exc
import sys
import time
import traceback
from hera_mc import mc
MONITORING_INTERVAL = 60 # seconds
parser = mc.get_mc_argument_parser()
args = parser.parse_args()
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
try:
while True:
time.sleep(MONITORING_INTERVAL)
try:
session.add_correlator_control_state_from_corrcm()
except Exception as e:
print('%s -- error adding correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
continue
try:
session.commit()
except sqlalchemy.exc.SQLAlchemyError as e:
print('%s -- SQL error committing new correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
session.rollback()
continue
except Exception as e:
print('%s -- error committing new correlator control state' % time.asctime(), file=sys.stderr)
traceback.print_exc(file=sys.stderr)
continue
except KeyboardInterrupt:
print("exiting on SIGINT")
sys.exit()
|
|
2d88b2ffaa3c6348fcc246185e48abc52ee8804d
|
backends.py
|
backends.py
|
from django.contrib.auth.backends import ModelBackend
from .decorators import monitor_login
class MonitoredModelBackend(ModelBackend):
@monitor_login
def authenticate(self, **credentials):
super(MonitoredModelBackend, self).authenticate(**credentials)
|
Add monitored model auth backend
|
Add monitored model auth backend
|
Python
|
bsd-3-clause
|
mysociety/django-failedloginblocker
|
Add monitored model auth backend
|
from django.contrib.auth.backends import ModelBackend
from .decorators import monitor_login
class MonitoredModelBackend(ModelBackend):
@monitor_login
def authenticate(self, **credentials):
super(MonitoredModelBackend, self).authenticate(**credentials)
|
<commit_before><commit_msg>Add monitored model auth backend<commit_after>
|
from django.contrib.auth.backends import ModelBackend
from .decorators import monitor_login
class MonitoredModelBackend(ModelBackend):
@monitor_login
def authenticate(self, **credentials):
super(MonitoredModelBackend, self).authenticate(**credentials)
|
Add monitored model auth backendfrom django.contrib.auth.backends import ModelBackend
from .decorators import monitor_login
class MonitoredModelBackend(ModelBackend):
@monitor_login
def authenticate(self, **credentials):
super(MonitoredModelBackend, self).authenticate(**credentials)
|
<commit_before><commit_msg>Add monitored model auth backend<commit_after>from django.contrib.auth.backends import ModelBackend
from .decorators import monitor_login
class MonitoredModelBackend(ModelBackend):
@monitor_login
def authenticate(self, **credentials):
super(MonitoredModelBackend, self).authenticate(**credentials)
|
|
f665fd7c8e3100427d589c9f82c847526aa41c86
|
test_aversion.py
|
test_aversion.py
|
# Copyright 2013 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import unittest2
import aversion
class QuotedSplitTest(unittest2.TestCase):
def test_simple_comma(self):
result = list(aversion.quoted_split(",value1,value2 , value 3 ,", ','))
self.assertEqual(result,
['', 'value1', 'value2 ', ' value 3 '])
def test_complex_comma(self):
result = list(aversion.quoted_split(
'application/example;q=1;version="2,3\\"",'
'application/example;q=0.5;version="3;4"', ','))
self.assertEqual(result, [
'application/example;q=1;version="2,3\\""',
'application/example;q=0.5;version="3;4"',
])
def test_simple_semicolon(self):
result = list(aversion.quoted_split(";value1;value2 ; value 3 ;", ';'))
self.assertEqual(result,
['', 'value1', 'value2 ', ' value 3 '])
def test_complex_semicolon(self):
result = list(aversion.quoted_split(
'application/example;q=1;version="2;3\\""', ';'))
self.assertEqual(result, [
'application/example',
'q=1',
'version="2;3\\""',
])
|
Set up test file and add tests on quoted_split().
|
Set up test file and add tests on quoted_split().
|
Python
|
apache-2.0
|
klmitch/aversion
|
Set up test file and add tests on quoted_split().
|
# Copyright 2013 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import unittest2
import aversion
class QuotedSplitTest(unittest2.TestCase):
def test_simple_comma(self):
result = list(aversion.quoted_split(",value1,value2 , value 3 ,", ','))
self.assertEqual(result,
['', 'value1', 'value2 ', ' value 3 '])
def test_complex_comma(self):
result = list(aversion.quoted_split(
'application/example;q=1;version="2,3\\"",'
'application/example;q=0.5;version="3;4"', ','))
self.assertEqual(result, [
'application/example;q=1;version="2,3\\""',
'application/example;q=0.5;version="3;4"',
])
def test_simple_semicolon(self):
result = list(aversion.quoted_split(";value1;value2 ; value 3 ;", ';'))
self.assertEqual(result,
['', 'value1', 'value2 ', ' value 3 '])
def test_complex_semicolon(self):
result = list(aversion.quoted_split(
'application/example;q=1;version="2;3\\""', ';'))
self.assertEqual(result, [
'application/example',
'q=1',
'version="2;3\\""',
])
|
<commit_before><commit_msg>Set up test file and add tests on quoted_split().<commit_after>
|
# Copyright 2013 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import unittest2
import aversion
class QuotedSplitTest(unittest2.TestCase):
def test_simple_comma(self):
result = list(aversion.quoted_split(",value1,value2 , value 3 ,", ','))
self.assertEqual(result,
['', 'value1', 'value2 ', ' value 3 '])
def test_complex_comma(self):
result = list(aversion.quoted_split(
'application/example;q=1;version="2,3\\"",'
'application/example;q=0.5;version="3;4"', ','))
self.assertEqual(result, [
'application/example;q=1;version="2,3\\""',
'application/example;q=0.5;version="3;4"',
])
def test_simple_semicolon(self):
result = list(aversion.quoted_split(";value1;value2 ; value 3 ;", ';'))
self.assertEqual(result,
['', 'value1', 'value2 ', ' value 3 '])
def test_complex_semicolon(self):
result = list(aversion.quoted_split(
'application/example;q=1;version="2;3\\""', ';'))
self.assertEqual(result, [
'application/example',
'q=1',
'version="2;3\\""',
])
|
Set up test file and add tests on quoted_split().# Copyright 2013 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import unittest2
import aversion
class QuotedSplitTest(unittest2.TestCase):
def test_simple_comma(self):
result = list(aversion.quoted_split(",value1,value2 , value 3 ,", ','))
self.assertEqual(result,
['', 'value1', 'value2 ', ' value 3 '])
def test_complex_comma(self):
result = list(aversion.quoted_split(
'application/example;q=1;version="2,3\\"",'
'application/example;q=0.5;version="3;4"', ','))
self.assertEqual(result, [
'application/example;q=1;version="2,3\\""',
'application/example;q=0.5;version="3;4"',
])
def test_simple_semicolon(self):
result = list(aversion.quoted_split(";value1;value2 ; value 3 ;", ';'))
self.assertEqual(result,
['', 'value1', 'value2 ', ' value 3 '])
def test_complex_semicolon(self):
result = list(aversion.quoted_split(
'application/example;q=1;version="2;3\\""', ';'))
self.assertEqual(result, [
'application/example',
'q=1',
'version="2;3\\""',
])
|
<commit_before><commit_msg>Set up test file and add tests on quoted_split().<commit_after># Copyright 2013 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import unittest2
import aversion
class QuotedSplitTest(unittest2.TestCase):
def test_simple_comma(self):
result = list(aversion.quoted_split(",value1,value2 , value 3 ,", ','))
self.assertEqual(result,
['', 'value1', 'value2 ', ' value 3 '])
def test_complex_comma(self):
result = list(aversion.quoted_split(
'application/example;q=1;version="2,3\\"",'
'application/example;q=0.5;version="3;4"', ','))
self.assertEqual(result, [
'application/example;q=1;version="2,3\\""',
'application/example;q=0.5;version="3;4"',
])
def test_simple_semicolon(self):
result = list(aversion.quoted_split(";value1;value2 ; value 3 ;", ';'))
self.assertEqual(result,
['', 'value1', 'value2 ', ' value 3 '])
def test_complex_semicolon(self):
result = list(aversion.quoted_split(
'application/example;q=1;version="2;3\\""', ';'))
self.assertEqual(result, [
'application/example',
'q=1',
'version="2;3\\""',
])
|
|
cc7ea8b4e51e1f5d8e580dea89141dcca7032c32
|
ureport/polls/migrations/0052_auto_20180327_2024.py
|
ureport/polls/migrations/0052_auto_20180327_2024.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-27 20:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0051_auto_20180316_0912'),
]
operations = [
migrations.AlterField(
model_name='pollresult',
name='backend',
field=models.CharField(default='rapidpro', max_length=16),
),
]
|
Add default value and constraints migrations
|
Add default value and constraints migrations
|
Python
|
agpl-3.0
|
Ilhasoft/ureport,rapidpro/ureport,rapidpro/ureport,rapidpro/ureport,Ilhasoft/ureport,Ilhasoft/ureport,Ilhasoft/ureport,rapidpro/ureport
|
Add default value and constraints migrations
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-27 20:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0051_auto_20180316_0912'),
]
operations = [
migrations.AlterField(
model_name='pollresult',
name='backend',
field=models.CharField(default='rapidpro', max_length=16),
),
]
|
<commit_before><commit_msg>Add default value and constraints migrations<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-27 20:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0051_auto_20180316_0912'),
]
operations = [
migrations.AlterField(
model_name='pollresult',
name='backend',
field=models.CharField(default='rapidpro', max_length=16),
),
]
|
Add default value and constraints migrations# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-27 20:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0051_auto_20180316_0912'),
]
operations = [
migrations.AlterField(
model_name='pollresult',
name='backend',
field=models.CharField(default='rapidpro', max_length=16),
),
]
|
<commit_before><commit_msg>Add default value and constraints migrations<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-27 20:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0051_auto_20180316_0912'),
]
operations = [
migrations.AlterField(
model_name='pollresult',
name='backend',
field=models.CharField(default='rapidpro', max_length=16),
),
]
|
|
c466242d652683f8d22164f7b322399100961bcc
|
anytownlib/maps.py
|
anytownlib/maps.py
|
"""Functions to retrieve images for the maps."""
def _retrieve_google_maps_image_url(coords, zoom_level):
lat, lng = coords
return (
'http://maps.googleapis.com/maps/api/staticmap?center={0},{1}&zoom={2}'
'&scale=false&size=600x300&maptype=roadmap&format=png'
'&markers=size:small%7Ccolor:0xff0000%7Clabel:%7C{0},{1}'
).format(lat, lng, zoom_level)
def retrieve_continent_level_image(coords):
"""Get the Google Maps URL for a continent zoom-level map image."""
return _retrieve_google_maps_image_url(coords, 4)
def retrieve_regional_level_image(coords):
"""Get the Google Maps URL for a regional zoom-level map image."""
return _retrieve_google_maps_image_url(coords, 7)
|
Add Anytown Maps utils library
|
Add Anytown Maps utils library
|
Python
|
mit
|
andrewyang96/AnytownMapper,andrewyang96/AnytownMapper,andrewyang96/AnytownMapper
|
Add Anytown Maps utils library
|
"""Functions to retrieve images for the maps."""
def _retrieve_google_maps_image_url(coords, zoom_level):
lat, lng = coords
return (
'http://maps.googleapis.com/maps/api/staticmap?center={0},{1}&zoom={2}'
'&scale=false&size=600x300&maptype=roadmap&format=png'
'&markers=size:small%7Ccolor:0xff0000%7Clabel:%7C{0},{1}'
).format(lat, lng, zoom_level)
def retrieve_continent_level_image(coords):
"""Get the Google Maps URL for a continent zoom-level map image."""
return _retrieve_google_maps_image_url(coords, 4)
def retrieve_regional_level_image(coords):
"""Get the Google Maps URL for a regional zoom-level map image."""
return _retrieve_google_maps_image_url(coords, 7)
|
<commit_before><commit_msg>Add Anytown Maps utils library<commit_after>
|
"""Functions to retrieve images for the maps."""
def _retrieve_google_maps_image_url(coords, zoom_level):
lat, lng = coords
return (
'http://maps.googleapis.com/maps/api/staticmap?center={0},{1}&zoom={2}'
'&scale=false&size=600x300&maptype=roadmap&format=png'
'&markers=size:small%7Ccolor:0xff0000%7Clabel:%7C{0},{1}'
).format(lat, lng, zoom_level)
def retrieve_continent_level_image(coords):
"""Get the Google Maps URL for a continent zoom-level map image."""
return _retrieve_google_maps_image_url(coords, 4)
def retrieve_regional_level_image(coords):
"""Get the Google Maps URL for a regional zoom-level map image."""
return _retrieve_google_maps_image_url(coords, 7)
|
Add Anytown Maps utils library"""Functions to retrieve images for the maps."""
def _retrieve_google_maps_image_url(coords, zoom_level):
lat, lng = coords
return (
'http://maps.googleapis.com/maps/api/staticmap?center={0},{1}&zoom={2}'
'&scale=false&size=600x300&maptype=roadmap&format=png'
'&markers=size:small%7Ccolor:0xff0000%7Clabel:%7C{0},{1}'
).format(lat, lng, zoom_level)
def retrieve_continent_level_image(coords):
"""Get the Google Maps URL for a continent zoom-level map image."""
return _retrieve_google_maps_image_url(coords, 4)
def retrieve_regional_level_image(coords):
"""Get the Google Maps URL for a regional zoom-level map image."""
return _retrieve_google_maps_image_url(coords, 7)
|
<commit_before><commit_msg>Add Anytown Maps utils library<commit_after>"""Functions to retrieve images for the maps."""
def _retrieve_google_maps_image_url(coords, zoom_level):
lat, lng = coords
return (
'http://maps.googleapis.com/maps/api/staticmap?center={0},{1}&zoom={2}'
'&scale=false&size=600x300&maptype=roadmap&format=png'
'&markers=size:small%7Ccolor:0xff0000%7Clabel:%7C{0},{1}'
).format(lat, lng, zoom_level)
def retrieve_continent_level_image(coords):
"""Get the Google Maps URL for a continent zoom-level map image."""
return _retrieve_google_maps_image_url(coords, 4)
def retrieve_regional_level_image(coords):
"""Get the Google Maps URL for a regional zoom-level map image."""
return _retrieve_google_maps_image_url(coords, 7)
|
|
b9a28ad7358b64211905957ef854026cf03764c8
|
keras_tf_atrous_bug.py
|
keras_tf_atrous_bug.py
|
import numpy as np
from keras.models import Model
from keras.layers import Input, AtrousConvolution1D
inp = Input((100, 1))
M = AtrousConvolution1D(1, 2, atrous_rate=25, border_mode='same')(inp)
M = Model(inp, M)
M.compile('sgd', 'mse')
M.train_on_batch(np.random.rand(1, 100, 1), np.random.rand(1, 100, 1))
|
Introduce Keras TF atrous bug
|
Introduce Keras TF atrous bug
|
Python
|
apache-2.0
|
israelg99/eva
|
Introduce Keras TF atrous bug
|
import numpy as np
from keras.models import Model
from keras.layers import Input, AtrousConvolution1D
inp = Input((100, 1))
M = AtrousConvolution1D(1, 2, atrous_rate=25, border_mode='same')(inp)
M = Model(inp, M)
M.compile('sgd', 'mse')
M.train_on_batch(np.random.rand(1, 100, 1), np.random.rand(1, 100, 1))
|
<commit_before><commit_msg>Introduce Keras TF atrous bug<commit_after>
|
import numpy as np
from keras.models import Model
from keras.layers import Input, AtrousConvolution1D
inp = Input((100, 1))
M = AtrousConvolution1D(1, 2, atrous_rate=25, border_mode='same')(inp)
M = Model(inp, M)
M.compile('sgd', 'mse')
M.train_on_batch(np.random.rand(1, 100, 1), np.random.rand(1, 100, 1))
|
Introduce Keras TF atrous bugimport numpy as np
from keras.models import Model
from keras.layers import Input, AtrousConvolution1D
inp = Input((100, 1))
M = AtrousConvolution1D(1, 2, atrous_rate=25, border_mode='same')(inp)
M = Model(inp, M)
M.compile('sgd', 'mse')
M.train_on_batch(np.random.rand(1, 100, 1), np.random.rand(1, 100, 1))
|
<commit_before><commit_msg>Introduce Keras TF atrous bug<commit_after>import numpy as np
from keras.models import Model
from keras.layers import Input, AtrousConvolution1D
inp = Input((100, 1))
M = AtrousConvolution1D(1, 2, atrous_rate=25, border_mode='same')(inp)
M = Model(inp, M)
M.compile('sgd', 'mse')
M.train_on_batch(np.random.rand(1, 100, 1), np.random.rand(1, 100, 1))
|
|
d9f60cba7af74dd724306111b366dda78b363236
|
common/djangoapps/django_comment_common/migrations/0008_role_user_index.py
|
common/djangoapps/django_comment_common/migrations/0008_role_user_index.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_comment_common', '0007_discussionsidmapping'),
]
operations = [
migrations.RunSQL(
'CREATE INDEX dcc_role_users_user_role_idx ON django_comment_client_role_users(user_id, role_id);'
),
]
|
Add a (user_id, role_id) index to dcc_role_users table.
|
Add a (user_id, role_id) index to dcc_role_users table.
|
Python
|
agpl-3.0
|
ESOedX/edx-platform,philanthropy-u/edx-platform,EDUlib/edx-platform,eduNEXT/edunext-platform,ESOedX/edx-platform,msegado/edx-platform,philanthropy-u/edx-platform,ahmedaljazzar/edx-platform,eduNEXT/edx-platform,ESOedX/edx-platform,ahmedaljazzar/edx-platform,edx-solutions/edx-platform,edx-solutions/edx-platform,EDUlib/edx-platform,angelapper/edx-platform,jolyonb/edx-platform,arbrandes/edx-platform,teltek/edx-platform,eduNEXT/edunext-platform,jolyonb/edx-platform,eduNEXT/edx-platform,eduNEXT/edx-platform,msegado/edx-platform,angelapper/edx-platform,appsembler/edx-platform,cpennington/edx-platform,a-parhom/edx-platform,msegado/edx-platform,edx-solutions/edx-platform,arbrandes/edx-platform,msegado/edx-platform,appsembler/edx-platform,jolyonb/edx-platform,a-parhom/edx-platform,jolyonb/edx-platform,edx/edx-platform,edx/edx-platform,stvstnfrd/edx-platform,ahmedaljazzar/edx-platform,EDUlib/edx-platform,eduNEXT/edx-platform,mitocw/edx-platform,mitocw/edx-platform,a-parhom/edx-platform,stvstnfrd/edx-platform,eduNEXT/edunext-platform,teltek/edx-platform,stvstnfrd/edx-platform,mitocw/edx-platform,a-parhom/edx-platform,teltek/edx-platform,ahmedaljazzar/edx-platform,edx/edx-platform,arbrandes/edx-platform,angelapper/edx-platform,cpennington/edx-platform,mitocw/edx-platform,ESOedX/edx-platform,stvstnfrd/edx-platform,edx/edx-platform,eduNEXT/edunext-platform,cpennington/edx-platform,msegado/edx-platform,edx-solutions/edx-platform,appsembler/edx-platform,cpennington/edx-platform,teltek/edx-platform,arbrandes/edx-platform,EDUlib/edx-platform,appsembler/edx-platform,philanthropy-u/edx-platform,angelapper/edx-platform,philanthropy-u/edx-platform
|
Add a (user_id, role_id) index to dcc_role_users table.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_comment_common', '0007_discussionsidmapping'),
]
operations = [
migrations.RunSQL(
'CREATE INDEX dcc_role_users_user_role_idx ON django_comment_client_role_users(user_id, role_id);'
),
]
|
<commit_before><commit_msg>Add a (user_id, role_id) index to dcc_role_users table.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_comment_common', '0007_discussionsidmapping'),
]
operations = [
migrations.RunSQL(
'CREATE INDEX dcc_role_users_user_role_idx ON django_comment_client_role_users(user_id, role_id);'
),
]
|
Add a (user_id, role_id) index to dcc_role_users table.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_comment_common', '0007_discussionsidmapping'),
]
operations = [
migrations.RunSQL(
'CREATE INDEX dcc_role_users_user_role_idx ON django_comment_client_role_users(user_id, role_id);'
),
]
|
<commit_before><commit_msg>Add a (user_id, role_id) index to dcc_role_users table.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_comment_common', '0007_discussionsidmapping'),
]
operations = [
migrations.RunSQL(
'CREATE INDEX dcc_role_users_user_role_idx ON django_comment_client_role_users(user_id, role_id);'
),
]
|
|
86de63f819a67aafd71e1565266e1bec09bc62c2
|
corehq/apps/accounting/management/commands/find_inactive_custom_modules.py
|
corehq/apps/accounting/management/commands/find_inactive_custom_modules.py
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import csv
from collections import defaultdict
from django.apps import apps
from django.core.management import BaseCommand
from django.conf import settings
from importlib import import_module
from corehq.apps.accounting.models import Subscription, SoftwarePlanEdition
from corehq.apps.domain.models import Domain
import six
class Command(BaseCommand):
def handle(self, **kwargs):
domains_by_module = defaultdict(list)
for domain, module in settings.DOMAIN_MODULE_MAP.items():
domains_by_module[module].append(domain)
with open("custom-modules.csv", "w") as f:
writer = csv.writer(f)
writer.writerow([
'module',
'path',
'domains',
'domains exist',
'plans',
'in DOMAIN_MODULE_MAP',
'likely removable',
])
visited_paths = set()
for module, domains in domains_by_module.items():
try:
path = import_module(module).__path__[0]
except ImportError:
path = "PATH NOT FOUND"
visited_paths.add(path)
writer.writerow(self.log_module_info(module, path, domains, in_module_map=True))
for app_config in apps.get_app_configs():
if (app_config.path.startswith(settings.FILEPATH + "/custom")
and app_config.path not in visited_paths):
# Just check and see if the label corresponds to a domain
writer.writerow(
self.log_module_info(
app_config.label,
app_config.path,
[app_config.label],
in_module_map=False
)
)
def log_module_info(self, module, path, domains, in_module_map):
domains_exist = []
plans = []
all_community = True
for domain in domains:
domain_obj = Domain.get_by_name(domain)
plan = "Not Found"
domains_exist.append(domain_obj is not None)
if domain_obj:
subscription = Subscription.get_active_subscription_by_domain(domain)
if subscription:
plan = subscription.plan_version.plan.name
if subscription.plan_version.plan.edition != SoftwarePlanEdition.COMMUNITY:
all_community = False
plans.append(plan)
return [
module,
path[len(settings.FILEPATH) + 1:],
" | ".join(domains),
" | ".join(map(six.text_type, domains_exist)),
" | ".join(plans),
in_module_map,
all(domains_exist) and all_community,
]
|
Add management command to identify stale custom modules
|
Add management command to identify stale custom modules
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add management command to identify stale custom modules
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import csv
from collections import defaultdict
from django.apps import apps
from django.core.management import BaseCommand
from django.conf import settings
from importlib import import_module
from corehq.apps.accounting.models import Subscription, SoftwarePlanEdition
from corehq.apps.domain.models import Domain
import six
class Command(BaseCommand):
def handle(self, **kwargs):
domains_by_module = defaultdict(list)
for domain, module in settings.DOMAIN_MODULE_MAP.items():
domains_by_module[module].append(domain)
with open("custom-modules.csv", "w") as f:
writer = csv.writer(f)
writer.writerow([
'module',
'path',
'domains',
'domains exist',
'plans',
'in DOMAIN_MODULE_MAP',
'likely removable',
])
visited_paths = set()
for module, domains in domains_by_module.items():
try:
path = import_module(module).__path__[0]
except ImportError:
path = "PATH NOT FOUND"
visited_paths.add(path)
writer.writerow(self.log_module_info(module, path, domains, in_module_map=True))
for app_config in apps.get_app_configs():
if (app_config.path.startswith(settings.FILEPATH + "/custom")
and app_config.path not in visited_paths):
# Just check and see if the label corresponds to a domain
writer.writerow(
self.log_module_info(
app_config.label,
app_config.path,
[app_config.label],
in_module_map=False
)
)
def log_module_info(self, module, path, domains, in_module_map):
domains_exist = []
plans = []
all_community = True
for domain in domains:
domain_obj = Domain.get_by_name(domain)
plan = "Not Found"
domains_exist.append(domain_obj is not None)
if domain_obj:
subscription = Subscription.get_active_subscription_by_domain(domain)
if subscription:
plan = subscription.plan_version.plan.name
if subscription.plan_version.plan.edition != SoftwarePlanEdition.COMMUNITY:
all_community = False
plans.append(plan)
return [
module,
path[len(settings.FILEPATH) + 1:],
" | ".join(domains),
" | ".join(map(six.text_type, domains_exist)),
" | ".join(plans),
in_module_map,
all(domains_exist) and all_community,
]
|
<commit_before><commit_msg>Add management command to identify stale custom modules<commit_after>
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import csv
from collections import defaultdict
from django.apps import apps
from django.core.management import BaseCommand
from django.conf import settings
from importlib import import_module
from corehq.apps.accounting.models import Subscription, SoftwarePlanEdition
from corehq.apps.domain.models import Domain
import six
class Command(BaseCommand):
def handle(self, **kwargs):
domains_by_module = defaultdict(list)
for domain, module in settings.DOMAIN_MODULE_MAP.items():
domains_by_module[module].append(domain)
with open("custom-modules.csv", "w") as f:
writer = csv.writer(f)
writer.writerow([
'module',
'path',
'domains',
'domains exist',
'plans',
'in DOMAIN_MODULE_MAP',
'likely removable',
])
visited_paths = set()
for module, domains in domains_by_module.items():
try:
path = import_module(module).__path__[0]
except ImportError:
path = "PATH NOT FOUND"
visited_paths.add(path)
writer.writerow(self.log_module_info(module, path, domains, in_module_map=True))
for app_config in apps.get_app_configs():
if (app_config.path.startswith(settings.FILEPATH + "/custom")
and app_config.path not in visited_paths):
# Just check and see if the label corresponds to a domain
writer.writerow(
self.log_module_info(
app_config.label,
app_config.path,
[app_config.label],
in_module_map=False
)
)
def log_module_info(self, module, path, domains, in_module_map):
domains_exist = []
plans = []
all_community = True
for domain in domains:
domain_obj = Domain.get_by_name(domain)
plan = "Not Found"
domains_exist.append(domain_obj is not None)
if domain_obj:
subscription = Subscription.get_active_subscription_by_domain(domain)
if subscription:
plan = subscription.plan_version.plan.name
if subscription.plan_version.plan.edition != SoftwarePlanEdition.COMMUNITY:
all_community = False
plans.append(plan)
return [
module,
path[len(settings.FILEPATH) + 1:],
" | ".join(domains),
" | ".join(map(six.text_type, domains_exist)),
" | ".join(plans),
in_module_map,
all(domains_exist) and all_community,
]
|
Add management command to identify stale custom modulesfrom __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import csv
from collections import defaultdict
from django.apps import apps
from django.core.management import BaseCommand
from django.conf import settings
from importlib import import_module
from corehq.apps.accounting.models import Subscription, SoftwarePlanEdition
from corehq.apps.domain.models import Domain
import six
class Command(BaseCommand):
def handle(self, **kwargs):
domains_by_module = defaultdict(list)
for domain, module in settings.DOMAIN_MODULE_MAP.items():
domains_by_module[module].append(domain)
with open("custom-modules.csv", "w") as f:
writer = csv.writer(f)
writer.writerow([
'module',
'path',
'domains',
'domains exist',
'plans',
'in DOMAIN_MODULE_MAP',
'likely removable',
])
visited_paths = set()
for module, domains in domains_by_module.items():
try:
path = import_module(module).__path__[0]
except ImportError:
path = "PATH NOT FOUND"
visited_paths.add(path)
writer.writerow(self.log_module_info(module, path, domains, in_module_map=True))
for app_config in apps.get_app_configs():
if (app_config.path.startswith(settings.FILEPATH + "/custom")
and app_config.path not in visited_paths):
# Just check and see if the label corresponds to a domain
writer.writerow(
self.log_module_info(
app_config.label,
app_config.path,
[app_config.label],
in_module_map=False
)
)
def log_module_info(self, module, path, domains, in_module_map):
domains_exist = []
plans = []
all_community = True
for domain in domains:
domain_obj = Domain.get_by_name(domain)
plan = "Not Found"
domains_exist.append(domain_obj is not None)
if domain_obj:
subscription = Subscription.get_active_subscription_by_domain(domain)
if subscription:
plan = subscription.plan_version.plan.name
if subscription.plan_version.plan.edition != SoftwarePlanEdition.COMMUNITY:
all_community = False
plans.append(plan)
return [
module,
path[len(settings.FILEPATH) + 1:],
" | ".join(domains),
" | ".join(map(six.text_type, domains_exist)),
" | ".join(plans),
in_module_map,
all(domains_exist) and all_community,
]
|
<commit_before><commit_msg>Add management command to identify stale custom modules<commit_after>from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import csv
from collections import defaultdict
from django.apps import apps
from django.core.management import BaseCommand
from django.conf import settings
from importlib import import_module
from corehq.apps.accounting.models import Subscription, SoftwarePlanEdition
from corehq.apps.domain.models import Domain
import six
class Command(BaseCommand):
def handle(self, **kwargs):
domains_by_module = defaultdict(list)
for domain, module in settings.DOMAIN_MODULE_MAP.items():
domains_by_module[module].append(domain)
with open("custom-modules.csv", "w") as f:
writer = csv.writer(f)
writer.writerow([
'module',
'path',
'domains',
'domains exist',
'plans',
'in DOMAIN_MODULE_MAP',
'likely removable',
])
visited_paths = set()
for module, domains in domains_by_module.items():
try:
path = import_module(module).__path__[0]
except ImportError:
path = "PATH NOT FOUND"
visited_paths.add(path)
writer.writerow(self.log_module_info(module, path, domains, in_module_map=True))
for app_config in apps.get_app_configs():
if (app_config.path.startswith(settings.FILEPATH + "/custom")
and app_config.path not in visited_paths):
# Just check and see if the label corresponds to a domain
writer.writerow(
self.log_module_info(
app_config.label,
app_config.path,
[app_config.label],
in_module_map=False
)
)
def log_module_info(self, module, path, domains, in_module_map):
domains_exist = []
plans = []
all_community = True
for domain in domains:
domain_obj = Domain.get_by_name(domain)
plan = "Not Found"
domains_exist.append(domain_obj is not None)
if domain_obj:
subscription = Subscription.get_active_subscription_by_domain(domain)
if subscription:
plan = subscription.plan_version.plan.name
if subscription.plan_version.plan.edition != SoftwarePlanEdition.COMMUNITY:
all_community = False
plans.append(plan)
return [
module,
path[len(settings.FILEPATH) + 1:],
" | ".join(domains),
" | ".join(map(six.text_type, domains_exist)),
" | ".join(plans),
in_module_map,
all(domains_exist) and all_community,
]
|
|
b47bf6a862075af25d7dbfa022f403997474414d
|
dragonflow/common/utils.py
|
dragonflow/common/utils.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
import eventlet
from dragonflow._i18n import _LE
LOG = logging.getLogger(__name__)
eventlet.monkey_patch()
class DFDaemon(object):
def __init__(self):
super(DFDaemon, self).__init__()
self.pool = eventlet.GreenPool()
self.is_daemonize = False
self.thread = None
def daemonize(self, run):
if self.is_daemonize:
LOG.error(_LE("already daemonized"))
return
self.is_daemonize = True
self.thread = self.pool.spawn_n(run)
eventlet.sleep(0)
return self.thread
def stop(self):
if self.is_daemonize and self.thread:
eventlet.greenthread.kill(self.thread)
eventlet.sleep(0)
self.thread = None
self.is_daemonize = False
|
Add Dargonflow DFDaemon base class
|
Add Dargonflow DFDaemon base class
Change-Id: Ie84681792cdc13b3127a95a18121d4b9a8ff8dba
|
Python
|
apache-2.0
|
FrankDuan/df_code,openstack/dragonflow,openstack/dragonflow,FrankDuan/df_code,FrankDuan/df_code,openstack/dragonflow
|
Add Dargonflow DFDaemon base class
Change-Id: Ie84681792cdc13b3127a95a18121d4b9a8ff8dba
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
import eventlet
from dragonflow._i18n import _LE
LOG = logging.getLogger(__name__)
eventlet.monkey_patch()
class DFDaemon(object):
def __init__(self):
super(DFDaemon, self).__init__()
self.pool = eventlet.GreenPool()
self.is_daemonize = False
self.thread = None
def daemonize(self, run):
if self.is_daemonize:
LOG.error(_LE("already daemonized"))
return
self.is_daemonize = True
self.thread = self.pool.spawn_n(run)
eventlet.sleep(0)
return self.thread
def stop(self):
if self.is_daemonize and self.thread:
eventlet.greenthread.kill(self.thread)
eventlet.sleep(0)
self.thread = None
self.is_daemonize = False
|
<commit_before><commit_msg>Add Dargonflow DFDaemon base class
Change-Id: Ie84681792cdc13b3127a95a18121d4b9a8ff8dba<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
import eventlet
from dragonflow._i18n import _LE
LOG = logging.getLogger(__name__)
eventlet.monkey_patch()
class DFDaemon(object):
def __init__(self):
super(DFDaemon, self).__init__()
self.pool = eventlet.GreenPool()
self.is_daemonize = False
self.thread = None
def daemonize(self, run):
if self.is_daemonize:
LOG.error(_LE("already daemonized"))
return
self.is_daemonize = True
self.thread = self.pool.spawn_n(run)
eventlet.sleep(0)
return self.thread
def stop(self):
if self.is_daemonize and self.thread:
eventlet.greenthread.kill(self.thread)
eventlet.sleep(0)
self.thread = None
self.is_daemonize = False
|
Add Dargonflow DFDaemon base class
Change-Id: Ie84681792cdc13b3127a95a18121d4b9a8ff8dba# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
import eventlet
from dragonflow._i18n import _LE
LOG = logging.getLogger(__name__)
eventlet.monkey_patch()
class DFDaemon(object):
def __init__(self):
super(DFDaemon, self).__init__()
self.pool = eventlet.GreenPool()
self.is_daemonize = False
self.thread = None
def daemonize(self, run):
if self.is_daemonize:
LOG.error(_LE("already daemonized"))
return
self.is_daemonize = True
self.thread = self.pool.spawn_n(run)
eventlet.sleep(0)
return self.thread
def stop(self):
if self.is_daemonize and self.thread:
eventlet.greenthread.kill(self.thread)
eventlet.sleep(0)
self.thread = None
self.is_daemonize = False
|
<commit_before><commit_msg>Add Dargonflow DFDaemon base class
Change-Id: Ie84681792cdc13b3127a95a18121d4b9a8ff8dba<commit_after># Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
import eventlet
from dragonflow._i18n import _LE
LOG = logging.getLogger(__name__)
eventlet.monkey_patch()
class DFDaemon(object):
def __init__(self):
super(DFDaemon, self).__init__()
self.pool = eventlet.GreenPool()
self.is_daemonize = False
self.thread = None
def daemonize(self, run):
if self.is_daemonize:
LOG.error(_LE("already daemonized"))
return
self.is_daemonize = True
self.thread = self.pool.spawn_n(run)
eventlet.sleep(0)
return self.thread
def stop(self):
if self.is_daemonize and self.thread:
eventlet.greenthread.kill(self.thread)
eventlet.sleep(0)
self.thread = None
self.is_daemonize = False
|
|
551f3de948b6b0a7338dbb2e1783a407b724fc04
|
tests/v6/test_spawn_primitive_generators.py
|
tests/v6/test_spawn_primitive_generators.py
|
import pytest
from .exemplar_generators import EXEMPLAR_PRIMITIVE_GENERATORS
@pytest.mark.parametrize("g", EXEMPLAR_PRIMITIVE_GENERATORS)
def test_spawn_primitive_generators(g):
"""
Test that primitive generators can be spawned and the spawned versions produce the same elements.
"""
num_items = 50
seed = 12345
g.reset(seed)
# Let g generate a few items
items_g_pre_spawn = list(g.generate(num_items))
# Spawn g and let both generate a few more items
h = g.spawn()
items_g_post_spawn = list(g.generate(num_items))
items_h_post_spawn = list(h.generate(num_items))
# Reset h and re-generate the full list of items
h.reset(seed)
items_h_all = list(h.generate(2*num_items))
# Verify that the items generated by h after spawning
# as well as the full sets of items are identical.
assert items_h_post_spawn == items_g_post_spawn
assert items_h_all == items_g_pre_spawn + items_g_post_spawn
|
Add test that spawns of primitive generators produce the same elements as the original
|
Add test that spawns of primitive generators produce the same elements as the original
|
Python
|
mit
|
maxalbert/tohu
|
Add test that spawns of primitive generators produce the same elements as the original
|
import pytest
from .exemplar_generators import EXEMPLAR_PRIMITIVE_GENERATORS
@pytest.mark.parametrize("g", EXEMPLAR_PRIMITIVE_GENERATORS)
def test_spawn_primitive_generators(g):
"""
Test that primitive generators can be spawned and the spawned versions produce the same elements.
"""
num_items = 50
seed = 12345
g.reset(seed)
# Let g generate a few items
items_g_pre_spawn = list(g.generate(num_items))
# Spawn g and let both generate a few more items
h = g.spawn()
items_g_post_spawn = list(g.generate(num_items))
items_h_post_spawn = list(h.generate(num_items))
# Reset h and re-generate the full list of items
h.reset(seed)
items_h_all = list(h.generate(2*num_items))
# Verify that the items generated by h after spawning
# as well as the full sets of items are identical.
assert items_h_post_spawn == items_g_post_spawn
assert items_h_all == items_g_pre_spawn + items_g_post_spawn
|
<commit_before><commit_msg>Add test that spawns of primitive generators produce the same elements as the original<commit_after>
|
import pytest
from .exemplar_generators import EXEMPLAR_PRIMITIVE_GENERATORS
@pytest.mark.parametrize("g", EXEMPLAR_PRIMITIVE_GENERATORS)
def test_spawn_primitive_generators(g):
"""
Test that primitive generators can be spawned and the spawned versions produce the same elements.
"""
num_items = 50
seed = 12345
g.reset(seed)
# Let g generate a few items
items_g_pre_spawn = list(g.generate(num_items))
# Spawn g and let both generate a few more items
h = g.spawn()
items_g_post_spawn = list(g.generate(num_items))
items_h_post_spawn = list(h.generate(num_items))
# Reset h and re-generate the full list of items
h.reset(seed)
items_h_all = list(h.generate(2*num_items))
# Verify that the items generated by h after spawning
# as well as the full sets of items are identical.
assert items_h_post_spawn == items_g_post_spawn
assert items_h_all == items_g_pre_spawn + items_g_post_spawn
|
Add test that spawns of primitive generators produce the same elements as the originalimport pytest
from .exemplar_generators import EXEMPLAR_PRIMITIVE_GENERATORS
@pytest.mark.parametrize("g", EXEMPLAR_PRIMITIVE_GENERATORS)
def test_spawn_primitive_generators(g):
"""
Test that primitive generators can be spawned and the spawned versions produce the same elements.
"""
num_items = 50
seed = 12345
g.reset(seed)
# Let g generate a few items
items_g_pre_spawn = list(g.generate(num_items))
# Spawn g and let both generate a few more items
h = g.spawn()
items_g_post_spawn = list(g.generate(num_items))
items_h_post_spawn = list(h.generate(num_items))
# Reset h and re-generate the full list of items
h.reset(seed)
items_h_all = list(h.generate(2*num_items))
# Verify that the items generated by h after spawning
# as well as the full sets of items are identical.
assert items_h_post_spawn == items_g_post_spawn
assert items_h_all == items_g_pre_spawn + items_g_post_spawn
|
<commit_before><commit_msg>Add test that spawns of primitive generators produce the same elements as the original<commit_after>import pytest
from .exemplar_generators import EXEMPLAR_PRIMITIVE_GENERATORS
@pytest.mark.parametrize("g", EXEMPLAR_PRIMITIVE_GENERATORS)
def test_spawn_primitive_generators(g):
"""
Test that primitive generators can be spawned and the spawned versions produce the same elements.
"""
num_items = 50
seed = 12345
g.reset(seed)
# Let g generate a few items
items_g_pre_spawn = list(g.generate(num_items))
# Spawn g and let both generate a few more items
h = g.spawn()
items_g_post_spawn = list(g.generate(num_items))
items_h_post_spawn = list(h.generate(num_items))
# Reset h and re-generate the full list of items
h.reset(seed)
items_h_all = list(h.generate(2*num_items))
# Verify that the items generated by h after spawning
# as well as the full sets of items are identical.
assert items_h_post_spawn == items_g_post_spawn
assert items_h_all == items_g_pre_spawn + items_g_post_spawn
|
|
b39d03cf5e4ca514da6e8d0f20e0d5e8960c191c
|
server/src/weblab/db/upgrade/regular/versions/3fab9480c190_professor_instructor.py
|
server/src/weblab/db/upgrade/regular/versions/3fab9480c190_professor_instructor.py
|
"""professor => instructor
Revision ID: 3fab9480c190
Revises: 31ded1f6ad6
Create Date: 2014-02-17 00:56:12.566690
"""
# revision identifiers, used by Alembic.
revision = '3fab9480c190'
down_revision = '31ded1f6ad6'
from alembic import op
import sqlalchemy as sa
metadata = sa.MetaData()
role = sa.Table('Role', metadata,
sa.Column('id', sa.Integer()),
sa.Column('name', sa.String(20)),
)
def upgrade():
update_stmt = role.update().where(role.c.name == 'professor').values(name = 'instructor')
op.execute(update_stmt)
def downgrade():
update_stmt = role.update().where(role.c.name == 'instructor').values(name = 'professor')
op.execute(update_stmt)
|
Use instructor instead of professor
|
Use instructor instead of professor
|
Python
|
bsd-2-clause
|
morelab/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,porduna/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,weblabdeusto/weblabdeusto,zstars/weblabdeusto,morelab/weblabdeusto,morelab/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,porduna/weblabdeusto,morelab/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,zstars/weblabdeusto,porduna/weblabdeusto,morelab/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,morelab/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,porduna/weblabdeusto,weblabdeusto/weblabdeusto,zstars/weblabdeusto,zstars/weblabdeusto,porduna/weblabdeusto,weblabdeusto/weblabdeusto,porduna/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto
|
Use instructor instead of professor
|
"""professor => instructor
Revision ID: 3fab9480c190
Revises: 31ded1f6ad6
Create Date: 2014-02-17 00:56:12.566690
"""
# revision identifiers, used by Alembic.
revision = '3fab9480c190'
down_revision = '31ded1f6ad6'
from alembic import op
import sqlalchemy as sa
metadata = sa.MetaData()
role = sa.Table('Role', metadata,
sa.Column('id', sa.Integer()),
sa.Column('name', sa.String(20)),
)
def upgrade():
update_stmt = role.update().where(role.c.name == 'professor').values(name = 'instructor')
op.execute(update_stmt)
def downgrade():
update_stmt = role.update().where(role.c.name == 'instructor').values(name = 'professor')
op.execute(update_stmt)
|
<commit_before><commit_msg>Use instructor instead of professor<commit_after>
|
"""professor => instructor
Revision ID: 3fab9480c190
Revises: 31ded1f6ad6
Create Date: 2014-02-17 00:56:12.566690
"""
# revision identifiers, used by Alembic.
revision = '3fab9480c190'
down_revision = '31ded1f6ad6'
from alembic import op
import sqlalchemy as sa
metadata = sa.MetaData()
role = sa.Table('Role', metadata,
sa.Column('id', sa.Integer()),
sa.Column('name', sa.String(20)),
)
def upgrade():
update_stmt = role.update().where(role.c.name == 'professor').values(name = 'instructor')
op.execute(update_stmt)
def downgrade():
update_stmt = role.update().where(role.c.name == 'instructor').values(name = 'professor')
op.execute(update_stmt)
|
Use instructor instead of professor"""professor => instructor
Revision ID: 3fab9480c190
Revises: 31ded1f6ad6
Create Date: 2014-02-17 00:56:12.566690
"""
# revision identifiers, used by Alembic.
revision = '3fab9480c190'
down_revision = '31ded1f6ad6'
from alembic import op
import sqlalchemy as sa
metadata = sa.MetaData()
role = sa.Table('Role', metadata,
sa.Column('id', sa.Integer()),
sa.Column('name', sa.String(20)),
)
def upgrade():
update_stmt = role.update().where(role.c.name == 'professor').values(name = 'instructor')
op.execute(update_stmt)
def downgrade():
update_stmt = role.update().where(role.c.name == 'instructor').values(name = 'professor')
op.execute(update_stmt)
|
<commit_before><commit_msg>Use instructor instead of professor<commit_after>"""professor => instructor
Revision ID: 3fab9480c190
Revises: 31ded1f6ad6
Create Date: 2014-02-17 00:56:12.566690
"""
# revision identifiers, used by Alembic.
revision = '3fab9480c190'
down_revision = '31ded1f6ad6'
from alembic import op
import sqlalchemy as sa
metadata = sa.MetaData()
role = sa.Table('Role', metadata,
sa.Column('id', sa.Integer()),
sa.Column('name', sa.String(20)),
)
def upgrade():
update_stmt = role.update().where(role.c.name == 'professor').values(name = 'instructor')
op.execute(update_stmt)
def downgrade():
update_stmt = role.update().where(role.c.name == 'instructor').values(name = 'professor')
op.execute(update_stmt)
|
|
2192cf4caa93a2b9c2a4332ba59c40175e8f977b
|
gscripts/ipython_imports.py
|
gscripts/ipython_imports.py
|
import numpy as np
import pandas as pd
import matplotlib_venn
import matplotlib.pyplot as plt
import brewer2mpl
set1 = brewer2mpl.get_map('Set1', 'qualitative', 9).mpl_colors
red = set1[0]
blue = set1[1]
green = set1[2]
purple = set1[3]
orange = set1[4]
yellow = set1[5]
brown = set1[6]
pink = set1[7]
grey = set1[8]
|
Add file for easy ipython imports
|
Add file for easy ipython imports
|
Python
|
mit
|
YeoLab/gscripts,YeoLab/gscripts,YeoLab/gscripts,YeoLab/gscripts
|
Add file for easy ipython imports
|
import numpy as np
import pandas as pd
import matplotlib_venn
import matplotlib.pyplot as plt
import brewer2mpl
set1 = brewer2mpl.get_map('Set1', 'qualitative', 9).mpl_colors
red = set1[0]
blue = set1[1]
green = set1[2]
purple = set1[3]
orange = set1[4]
yellow = set1[5]
brown = set1[6]
pink = set1[7]
grey = set1[8]
|
<commit_before><commit_msg>Add file for easy ipython imports<commit_after>
|
import numpy as np
import pandas as pd
import matplotlib_venn
import matplotlib.pyplot as plt
import brewer2mpl
set1 = brewer2mpl.get_map('Set1', 'qualitative', 9).mpl_colors
red = set1[0]
blue = set1[1]
green = set1[2]
purple = set1[3]
orange = set1[4]
yellow = set1[5]
brown = set1[6]
pink = set1[7]
grey = set1[8]
|
Add file for easy ipython importsimport numpy as np
import pandas as pd
import matplotlib_venn
import matplotlib.pyplot as plt
import brewer2mpl
set1 = brewer2mpl.get_map('Set1', 'qualitative', 9).mpl_colors
red = set1[0]
blue = set1[1]
green = set1[2]
purple = set1[3]
orange = set1[4]
yellow = set1[5]
brown = set1[6]
pink = set1[7]
grey = set1[8]
|
<commit_before><commit_msg>Add file for easy ipython imports<commit_after>import numpy as np
import pandas as pd
import matplotlib_venn
import matplotlib.pyplot as plt
import brewer2mpl
set1 = brewer2mpl.get_map('Set1', 'qualitative', 9).mpl_colors
red = set1[0]
blue = set1[1]
green = set1[2]
purple = set1[3]
orange = set1[4]
yellow = set1[5]
brown = set1[6]
pink = set1[7]
grey = set1[8]
|
|
55755871c240289238072602eefd9eed14d7e70e
|
bin/combine-examples.py
|
bin/combine-examples.py
|
#!/usr/bin/python
import re
import sys
def main(argv):
examples = {}
requires = set()
for filename in argv[1:]:
lines = open(filename, 'rU').readlines()
if len(lines) > 0 and lines[0].startswith('// NOCOMPILE'):
continue
requires.update(line for line in lines if line.startswith('goog.require'))
examples[filename] = [line for line in lines if not line.startswith('goog.require')]
for require in sorted(requires):
print require,
for filename in sorted(examples.keys()):
print '// ', filename
print '(function(){'
for line in examples[filename]:
print line,
print '})();'
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
#!/usr/bin/python
import re
import sys
def main(argv):
examples = {}
requires = set()
for filename in argv[1:]:
lines = open(filename, 'rU').readlines()
if len(lines) > 0 and lines[0].startswith('// NOCOMPILE'):
continue
requires.update(line for line in lines if line.startswith('goog.require'))
examples[filename] = [line for line in lines if not line.startswith('goog.require')]
for require in sorted(requires):
sys.stdout.write(require)
for filename in sorted(examples.keys()):
sys.stdout.write('// ' + filename + '\n')
sys.stdout.write('(function(){\n')
for line in examples[filename]:
sys.stdout.write(line)
sys.stdout.write('})();\n')
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Use write to avoid newline problems
|
Use write to avoid newline problems
|
Python
|
bsd-2-clause
|
elemoine/ol3,gingerik/ol3,itayod/ol3,stweil/ol3,bill-chadwick/ol3,epointal/ol3,adube/ol3,denilsonsa/ol3,fblackburn/ol3,xiaoqqchen/ol3,bogdanvaduva/ol3,landonb/ol3,tsauerwein/ol3,klokantech/ol3,landonb/ol3,bjornharrtell/ol3,llambanna/ol3,gingerik/ol3,gingerik/ol3,Distem/ol3,bjornharrtell/ol3,Distem/ol3,richstoner/ol3,klokantech/ol3raster,thhomas/ol3,thhomas/ol3,Antreasgr/ol3,mechdrew/ol3,mechdrew/ol3,stweil/ol3,kjelderg/ol3,planetlabs/ol3,klokantech/ol3,elemoine/ol3,openlayers/openlayers,antonio83moura/ol3,fperucic/ol3,jmiller-boundless/ol3,hafenr/ol3,epointal/ol3,ahocevar/ol3,tsauerwein/ol3,klokantech/ol3raster,geonux/ol3,wlerner/ol3,wlerner/ol3,stweil/ol3,gingerik/ol3,wlerner/ol3,xiaoqqchen/ol3,tamarmot/ol3,stweil/openlayers,mechdrew/ol3,ahocevar/ol3,thomasmoelhave/ol3,kjelderg/ol3,klokantech/ol3raster,pmlrsg/ol3,klokantech/ol3,thhomas/ol3,CandoImage/ol3,thomasmoelhave/ol3,planetlabs/ol3,ahocevar/openlayers,oterral/ol3,t27/ol3,bartvde/ol3,geekdenz/openlayers,mzur/ol3,geekdenz/ol3,mzur/ol3,jmiller-boundless/ol3,Distem/ol3,NOAA-ORR-ERD/ol3,Andrey-Pavlov/ol3,CandoImage/ol3,freylis/ol3,mechdrew/ol3,fperucic/ol3,klokantech/ol3raster,richstoner/ol3,openlayers/openlayers,NOAA-ORR-ERD/ol3,Antreasgr/ol3,thomasmoelhave/ol3,tschaub/ol3,llambanna/ol3,Morgul/ol3,ahocevar/openlayers,elemoine/ol3,alvinlindstam/ol3,xiaoqqchen/ol3,t27/ol3,ahocevar/ol3,Andrey-Pavlov/ol3,kjelderg/ol3,t27/ol3,bogdanvaduva/ol3,alvinlindstam/ol3,jmiller-boundless/ol3,fredj/ol3,jmiller-boundless/ol3,adube/ol3,pmlrsg/ol3,t27/ol3,alvinlindstam/ol3,freylis/ol3,geekdenz/openlayers,itayod/ol3,planetlabs/ol3,llambanna/ol3,denilsonsa/ol3,bartvde/ol3,pmlrsg/ol3,tsauerwein/ol3,oterral/ol3,fredj/ol3,epointal/ol3,geekdenz/ol3,aisaacs/ol3,alexbrault/ol3,jacmendt/ol3,ahocevar/ol3,bill-chadwick/ol3,CandoImage/ol3,hafenr/ol3,denilsonsa/ol3,kjelderg/ol3,fredj/ol3,richstoner/ol3,tamarmot/ol3,bjornharrtell/ol3,stweil/ol3,landonb/ol3,adube/ol3,tamarmot/ol3,tschaub/ol3,wlerner/ol3,das-peter/ol3,kkuunnddaannkk/ol3,hafenr/ol3,bogdanvaduva/ol3,Morgul/ol3,geonux/ol3,jacmendt/ol3,bill-chadwick/ol3,geonux/ol3,freylis/ol3,denilsonsa/ol3,kkuunnddaannkk/ol3,fperucic/ol3,alexbrault/ol3,Antreasgr/ol3,das-peter/ol3,tschaub/ol3,llambanna/ol3,NOAA-ORR-ERD/ol3,alvinlindstam/ol3,NOAA-ORR-ERD/ol3,Andrey-Pavlov/ol3,tsauerwein/ol3,klokantech/ol3,geekdenz/ol3,mzur/ol3,freylis/ol3,geonux/ol3,thhomas/ol3,geekdenz/ol3,Antreasgr/ol3,fredj/ol3,stweil/openlayers,alexbrault/ol3,Andrey-Pavlov/ol3,bartvde/ol3,bill-chadwick/ol3,planetlabs/ol3,jacmendt/ol3,landonb/ol3,thomasmoelhave/ol3,aisaacs/ol3,pmlrsg/ol3,stweil/openlayers,fperucic/ol3,hafenr/ol3,bartvde/ol3,jacmendt/ol3,itayod/ol3,oterral/ol3,itayod/ol3,elemoine/ol3,bogdanvaduva/ol3,kkuunnddaannkk/ol3,das-peter/ol3,Morgul/ol3,CandoImage/ol3,aisaacs/ol3,aisaacs/ol3,antonio83moura/ol3,epointal/ol3,Distem/ol3,richstoner/ol3,ahocevar/openlayers,alexbrault/ol3,geekdenz/openlayers,xiaoqqchen/ol3,mzur/ol3,jmiller-boundless/ol3,tschaub/ol3,das-peter/ol3,fblackburn/ol3,tamarmot/ol3,antonio83moura/ol3,openlayers/openlayers,kkuunnddaannkk/ol3,fblackburn/ol3,fblackburn/ol3,Morgul/ol3,antonio83moura/ol3
|
#!/usr/bin/python
import re
import sys
def main(argv):
examples = {}
requires = set()
for filename in argv[1:]:
lines = open(filename, 'rU').readlines()
if len(lines) > 0 and lines[0].startswith('// NOCOMPILE'):
continue
requires.update(line for line in lines if line.startswith('goog.require'))
examples[filename] = [line for line in lines if not line.startswith('goog.require')]
for require in sorted(requires):
print require,
for filename in sorted(examples.keys()):
print '// ', filename
print '(function(){'
for line in examples[filename]:
print line,
print '})();'
if __name__ == '__main__':
sys.exit(main(sys.argv))
Use write to avoid newline problems
|
#!/usr/bin/python
import re
import sys
def main(argv):
examples = {}
requires = set()
for filename in argv[1:]:
lines = open(filename, 'rU').readlines()
if len(lines) > 0 and lines[0].startswith('// NOCOMPILE'):
continue
requires.update(line for line in lines if line.startswith('goog.require'))
examples[filename] = [line for line in lines if not line.startswith('goog.require')]
for require in sorted(requires):
sys.stdout.write(require)
for filename in sorted(examples.keys()):
sys.stdout.write('// ' + filename + '\n')
sys.stdout.write('(function(){\n')
for line in examples[filename]:
sys.stdout.write(line)
sys.stdout.write('})();\n')
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before>#!/usr/bin/python
import re
import sys
def main(argv):
examples = {}
requires = set()
for filename in argv[1:]:
lines = open(filename, 'rU').readlines()
if len(lines) > 0 and lines[0].startswith('// NOCOMPILE'):
continue
requires.update(line for line in lines if line.startswith('goog.require'))
examples[filename] = [line for line in lines if not line.startswith('goog.require')]
for require in sorted(requires):
print require,
for filename in sorted(examples.keys()):
print '// ', filename
print '(function(){'
for line in examples[filename]:
print line,
print '})();'
if __name__ == '__main__':
sys.exit(main(sys.argv))
<commit_msg>Use write to avoid newline problems<commit_after>
|
#!/usr/bin/python
import re
import sys
def main(argv):
examples = {}
requires = set()
for filename in argv[1:]:
lines = open(filename, 'rU').readlines()
if len(lines) > 0 and lines[0].startswith('// NOCOMPILE'):
continue
requires.update(line for line in lines if line.startswith('goog.require'))
examples[filename] = [line for line in lines if not line.startswith('goog.require')]
for require in sorted(requires):
sys.stdout.write(require)
for filename in sorted(examples.keys()):
sys.stdout.write('// ' + filename + '\n')
sys.stdout.write('(function(){\n')
for line in examples[filename]:
sys.stdout.write(line)
sys.stdout.write('})();\n')
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
#!/usr/bin/python
import re
import sys
def main(argv):
examples = {}
requires = set()
for filename in argv[1:]:
lines = open(filename, 'rU').readlines()
if len(lines) > 0 and lines[0].startswith('// NOCOMPILE'):
continue
requires.update(line for line in lines if line.startswith('goog.require'))
examples[filename] = [line for line in lines if not line.startswith('goog.require')]
for require in sorted(requires):
print require,
for filename in sorted(examples.keys()):
print '// ', filename
print '(function(){'
for line in examples[filename]:
print line,
print '})();'
if __name__ == '__main__':
sys.exit(main(sys.argv))
Use write to avoid newline problems#!/usr/bin/python
import re
import sys
def main(argv):
examples = {}
requires = set()
for filename in argv[1:]:
lines = open(filename, 'rU').readlines()
if len(lines) > 0 and lines[0].startswith('// NOCOMPILE'):
continue
requires.update(line for line in lines if line.startswith('goog.require'))
examples[filename] = [line for line in lines if not line.startswith('goog.require')]
for require in sorted(requires):
sys.stdout.write(require)
for filename in sorted(examples.keys()):
sys.stdout.write('// ' + filename + '\n')
sys.stdout.write('(function(){\n')
for line in examples[filename]:
sys.stdout.write(line)
sys.stdout.write('})();\n')
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before>#!/usr/bin/python
import re
import sys
def main(argv):
examples = {}
requires = set()
for filename in argv[1:]:
lines = open(filename, 'rU').readlines()
if len(lines) > 0 and lines[0].startswith('// NOCOMPILE'):
continue
requires.update(line for line in lines if line.startswith('goog.require'))
examples[filename] = [line for line in lines if not line.startswith('goog.require')]
for require in sorted(requires):
print require,
for filename in sorted(examples.keys()):
print '// ', filename
print '(function(){'
for line in examples[filename]:
print line,
print '})();'
if __name__ == '__main__':
sys.exit(main(sys.argv))
<commit_msg>Use write to avoid newline problems<commit_after>#!/usr/bin/python
import re
import sys
def main(argv):
examples = {}
requires = set()
for filename in argv[1:]:
lines = open(filename, 'rU').readlines()
if len(lines) > 0 and lines[0].startswith('// NOCOMPILE'):
continue
requires.update(line for line in lines if line.startswith('goog.require'))
examples[filename] = [line for line in lines if not line.startswith('goog.require')]
for require in sorted(requires):
sys.stdout.write(require)
for filename in sorted(examples.keys()):
sys.stdout.write('// ' + filename + '\n')
sys.stdout.write('(function(){\n')
for line in examples[filename]:
sys.stdout.write(line)
sys.stdout.write('})();\n')
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
ecf0105a48c479f28d2e83a5816ab9b021186562
|
REF/search-by-keyword.py
|
REF/search-by-keyword.py
|
import sys
import xml.etree.ElementTree as ET
def getkeywords(filename):
with open(filename) as kwfile:
terms = ()
for line in kwfile:
terms += ( '"' + line.rstrip('\n') + '"' ,)
return terms
def simpleSearch(phraseterm, proxies=None):
import requests
svcbase = \
'http://impact.ref.ac.uk/casestudiesapi/REFAPI.svc/SearchCaseStudies'
url = svcbase + '?' + phraseterm + '&format=XML'
# RAL web cache breaks if len(url) > 2700
# print 'Len(url) = %d.\nURL=%s\n' % (len(url), url)
response = requests.get(url, proxies=proxies)
if response.status_code != 200:
print response.status_code
print response.reason
sys.exit("Bad response from server")
return response.text.encode("utf-8")
def getTagSingleText(elt, tagbase, tag):
elt = elt.findall(tagbase+tag)
if len(elt) > 0:
return elt[0].text
else:
return None
def getCSIDs(root, tagbase, cstag):
csids = ()
for child in root:
csids += (getTagSingleText(child, tagbase, cstag), )
return csids
def main():
tagbase = '{http://schemas.datacontract.org/2004/07/REFAPIService}'
cstag = 'CaseStudyId'
terms = getkeywords(sys.argv[1])
terms = " ".join(terms)
allcsids = ()
phrase = 'phrase=' + terms
searchResults = simpleSearch(phrase, {"http": "http://wwwcache.rl.ac.uk:8080/"} )
root = ET.fromstring(searchResults)
nelements = len( list(root) )
if nelements != 0:
csids = getCSIDs(root, tagbase, cstag)
if csids is not None and len(csids) > 0:
for kid in csids:
if kid is not None:
allcsids += (kid, )
print 'Number of matching Case Study IDs = %d.\n' % len(allcsids)
for c in allcsids:
if c is not None:
print c
if __name__ == "__main__":
main()
|
Add the published keyword search proggy
|
Add the published keyword search proggy
|
Python
|
mit
|
ijjorama/Impact
|
Add the published keyword search proggy
|
import sys
import xml.etree.ElementTree as ET
def getkeywords(filename):
with open(filename) as kwfile:
terms = ()
for line in kwfile:
terms += ( '"' + line.rstrip('\n') + '"' ,)
return terms
def simpleSearch(phraseterm, proxies=None):
import requests
svcbase = \
'http://impact.ref.ac.uk/casestudiesapi/REFAPI.svc/SearchCaseStudies'
url = svcbase + '?' + phraseterm + '&format=XML'
# RAL web cache breaks if len(url) > 2700
# print 'Len(url) = %d.\nURL=%s\n' % (len(url), url)
response = requests.get(url, proxies=proxies)
if response.status_code != 200:
print response.status_code
print response.reason
sys.exit("Bad response from server")
return response.text.encode("utf-8")
def getTagSingleText(elt, tagbase, tag):
elt = elt.findall(tagbase+tag)
if len(elt) > 0:
return elt[0].text
else:
return None
def getCSIDs(root, tagbase, cstag):
csids = ()
for child in root:
csids += (getTagSingleText(child, tagbase, cstag), )
return csids
def main():
tagbase = '{http://schemas.datacontract.org/2004/07/REFAPIService}'
cstag = 'CaseStudyId'
terms = getkeywords(sys.argv[1])
terms = " ".join(terms)
allcsids = ()
phrase = 'phrase=' + terms
searchResults = simpleSearch(phrase, {"http": "http://wwwcache.rl.ac.uk:8080/"} )
root = ET.fromstring(searchResults)
nelements = len( list(root) )
if nelements != 0:
csids = getCSIDs(root, tagbase, cstag)
if csids is not None and len(csids) > 0:
for kid in csids:
if kid is not None:
allcsids += (kid, )
print 'Number of matching Case Study IDs = %d.\n' % len(allcsids)
for c in allcsids:
if c is not None:
print c
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add the published keyword search proggy<commit_after>
|
import sys
import xml.etree.ElementTree as ET
def getkeywords(filename):
with open(filename) as kwfile:
terms = ()
for line in kwfile:
terms += ( '"' + line.rstrip('\n') + '"' ,)
return terms
def simpleSearch(phraseterm, proxies=None):
import requests
svcbase = \
'http://impact.ref.ac.uk/casestudiesapi/REFAPI.svc/SearchCaseStudies'
url = svcbase + '?' + phraseterm + '&format=XML'
# RAL web cache breaks if len(url) > 2700
# print 'Len(url) = %d.\nURL=%s\n' % (len(url), url)
response = requests.get(url, proxies=proxies)
if response.status_code != 200:
print response.status_code
print response.reason
sys.exit("Bad response from server")
return response.text.encode("utf-8")
def getTagSingleText(elt, tagbase, tag):
elt = elt.findall(tagbase+tag)
if len(elt) > 0:
return elt[0].text
else:
return None
def getCSIDs(root, tagbase, cstag):
csids = ()
for child in root:
csids += (getTagSingleText(child, tagbase, cstag), )
return csids
def main():
tagbase = '{http://schemas.datacontract.org/2004/07/REFAPIService}'
cstag = 'CaseStudyId'
terms = getkeywords(sys.argv[1])
terms = " ".join(terms)
allcsids = ()
phrase = 'phrase=' + terms
searchResults = simpleSearch(phrase, {"http": "http://wwwcache.rl.ac.uk:8080/"} )
root = ET.fromstring(searchResults)
nelements = len( list(root) )
if nelements != 0:
csids = getCSIDs(root, tagbase, cstag)
if csids is not None and len(csids) > 0:
for kid in csids:
if kid is not None:
allcsids += (kid, )
print 'Number of matching Case Study IDs = %d.\n' % len(allcsids)
for c in allcsids:
if c is not None:
print c
if __name__ == "__main__":
main()
|
Add the published keyword search proggyimport sys
import xml.etree.ElementTree as ET
def getkeywords(filename):
with open(filename) as kwfile:
terms = ()
for line in kwfile:
terms += ( '"' + line.rstrip('\n') + '"' ,)
return terms
def simpleSearch(phraseterm, proxies=None):
import requests
svcbase = \
'http://impact.ref.ac.uk/casestudiesapi/REFAPI.svc/SearchCaseStudies'
url = svcbase + '?' + phraseterm + '&format=XML'
# RAL web cache breaks if len(url) > 2700
# print 'Len(url) = %d.\nURL=%s\n' % (len(url), url)
response = requests.get(url, proxies=proxies)
if response.status_code != 200:
print response.status_code
print response.reason
sys.exit("Bad response from server")
return response.text.encode("utf-8")
def getTagSingleText(elt, tagbase, tag):
elt = elt.findall(tagbase+tag)
if len(elt) > 0:
return elt[0].text
else:
return None
def getCSIDs(root, tagbase, cstag):
csids = ()
for child in root:
csids += (getTagSingleText(child, tagbase, cstag), )
return csids
def main():
tagbase = '{http://schemas.datacontract.org/2004/07/REFAPIService}'
cstag = 'CaseStudyId'
terms = getkeywords(sys.argv[1])
terms = " ".join(terms)
allcsids = ()
phrase = 'phrase=' + terms
searchResults = simpleSearch(phrase, {"http": "http://wwwcache.rl.ac.uk:8080/"} )
root = ET.fromstring(searchResults)
nelements = len( list(root) )
if nelements != 0:
csids = getCSIDs(root, tagbase, cstag)
if csids is not None and len(csids) > 0:
for kid in csids:
if kid is not None:
allcsids += (kid, )
print 'Number of matching Case Study IDs = %d.\n' % len(allcsids)
for c in allcsids:
if c is not None:
print c
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add the published keyword search proggy<commit_after>import sys
import xml.etree.ElementTree as ET
def getkeywords(filename):
with open(filename) as kwfile:
terms = ()
for line in kwfile:
terms += ( '"' + line.rstrip('\n') + '"' ,)
return terms
def simpleSearch(phraseterm, proxies=None):
import requests
svcbase = \
'http://impact.ref.ac.uk/casestudiesapi/REFAPI.svc/SearchCaseStudies'
url = svcbase + '?' + phraseterm + '&format=XML'
# RAL web cache breaks if len(url) > 2700
# print 'Len(url) = %d.\nURL=%s\n' % (len(url), url)
response = requests.get(url, proxies=proxies)
if response.status_code != 200:
print response.status_code
print response.reason
sys.exit("Bad response from server")
return response.text.encode("utf-8")
def getTagSingleText(elt, tagbase, tag):
elt = elt.findall(tagbase+tag)
if len(elt) > 0:
return elt[0].text
else:
return None
def getCSIDs(root, tagbase, cstag):
csids = ()
for child in root:
csids += (getTagSingleText(child, tagbase, cstag), )
return csids
def main():
tagbase = '{http://schemas.datacontract.org/2004/07/REFAPIService}'
cstag = 'CaseStudyId'
terms = getkeywords(sys.argv[1])
terms = " ".join(terms)
allcsids = ()
phrase = 'phrase=' + terms
searchResults = simpleSearch(phrase, {"http": "http://wwwcache.rl.ac.uk:8080/"} )
root = ET.fromstring(searchResults)
nelements = len( list(root) )
if nelements != 0:
csids = getCSIDs(root, tagbase, cstag)
if csids is not None and len(csids) > 0:
for kid in csids:
if kid is not None:
allcsids += (kid, )
print 'Number of matching Case Study IDs = %d.\n' % len(allcsids)
for c in allcsids:
if c is not None:
print c
if __name__ == "__main__":
main()
|
|
149c546cb70f45ee8128966bfe1af8e8a10958b0
|
largedatatest/createdata.py
|
largedatatest/createdata.py
|
#!/usr/bin/env python
import h5py
import random
X = 500
Y = 500
f = h5py.File('/tmp/test-datafile.hdf5', 'w')
dset = f.create_dataset("default", (X, Y), dtype='float64')
for x in xrange(X):
for y in xrange(Y):
dset[x,y] = random.random()
f.close()
|
Add largedatatest for testing a large data set from an hdf5 file
|
Add largedatatest for testing a large data set from an hdf5 file
|
Python
|
apache-2.0
|
matyasselmeci/dask_condor,matyasselmeci/dask_condor
|
Add largedatatest for testing a large data set from an hdf5 file
|
#!/usr/bin/env python
import h5py
import random
X = 500
Y = 500
f = h5py.File('/tmp/test-datafile.hdf5', 'w')
dset = f.create_dataset("default", (X, Y), dtype='float64')
for x in xrange(X):
for y in xrange(Y):
dset[x,y] = random.random()
f.close()
|
<commit_before><commit_msg>Add largedatatest for testing a large data set from an hdf5 file<commit_after>
|
#!/usr/bin/env python
import h5py
import random
X = 500
Y = 500
f = h5py.File('/tmp/test-datafile.hdf5', 'w')
dset = f.create_dataset("default", (X, Y), dtype='float64')
for x in xrange(X):
for y in xrange(Y):
dset[x,y] = random.random()
f.close()
|
Add largedatatest for testing a large data set from an hdf5 file#!/usr/bin/env python
import h5py
import random
X = 500
Y = 500
f = h5py.File('/tmp/test-datafile.hdf5', 'w')
dset = f.create_dataset("default", (X, Y), dtype='float64')
for x in xrange(X):
for y in xrange(Y):
dset[x,y] = random.random()
f.close()
|
<commit_before><commit_msg>Add largedatatest for testing a large data set from an hdf5 file<commit_after>#!/usr/bin/env python
import h5py
import random
X = 500
Y = 500
f = h5py.File('/tmp/test-datafile.hdf5', 'w')
dset = f.create_dataset("default", (X, Y), dtype='float64')
for x in xrange(X):
for y in xrange(Y):
dset[x,y] = random.random()
f.close()
|
|
a840dfbcfbefd2827226c4b27aa29cf7854dc36a
|
autoscaling/delete-old-launch-configuration.py
|
autoscaling/delete-old-launch-configuration.py
|
#!/usr/bin/env python
import re
import sys
import boto.ec2.autoscale
from boto.ec2.autoscale import LaunchConfiguration
def main(REGION, pattern):
print('Checking new launch configuration in the "{0}" region.'.format(REGION))
asConnection = boto.ec2.autoscale.connect_to_region(REGION)
lc = asConnection.get_all_launch_configurations(max_records=100)
print('Total number of launch configuration: {0}'.format(len(lc)))
target_lc = find_target_lc(lc, pattern)
print('Number of launch configuration for pattern ("{0}"): {1}'.format(pattern, len(target_lc)))
delete_oldest_k_lc(asConnection, target_lc, 10)
def delete_lc (conn, lc):
print('Deleting launch configuration: {0}'.format(lc.name))
conn.delete_launch_configuration(lc.name)
def find_target_lc (lc, pattern):
target_lc = filter(lambda x: re.search(pattern, x.name), lc)
return target_lc
def delete_oldest_k_lc (conn, lc, k):
"""Delete oldest k launch configuration which matches given pattern, keep at least recent 10 launch configuration"""
min_items = 25;
num_to_be_deleted = min(k, len(lc) - min_items);
if (num_to_be_deleted < 1):
print('Nothing to delete, count of launch configuration: {0}'.format(len(lc)))
return None
else:
sorted_lc = sorted(lc, key=lambda lc: lc.created_time)
to_be_deleted_lc = sorted_lc[:num_to_be_deleted]
print('Deleteing launch configuration total number: {0}'.format(len(to_be_deleted_lc)))
map(lambda x: delete_lc(conn, x), to_be_deleted_lc)
return None
if __name__ == "__main__":
if (len(sys.argv) != 3):
exit('Usage: {0} region pattern'.format(sys.argv[0]))
else:
main(sys.argv[1], sys.argv[2])
|
Add script to delete old launch configuration
|
Add script to delete old launch configuration
|
Python
|
mit
|
tendant/aws-script
|
Add script to delete old launch configuration
|
#!/usr/bin/env python
import re
import sys
import boto.ec2.autoscale
from boto.ec2.autoscale import LaunchConfiguration
def main(REGION, pattern):
print('Checking new launch configuration in the "{0}" region.'.format(REGION))
asConnection = boto.ec2.autoscale.connect_to_region(REGION)
lc = asConnection.get_all_launch_configurations(max_records=100)
print('Total number of launch configuration: {0}'.format(len(lc)))
target_lc = find_target_lc(lc, pattern)
print('Number of launch configuration for pattern ("{0}"): {1}'.format(pattern, len(target_lc)))
delete_oldest_k_lc(asConnection, target_lc, 10)
def delete_lc (conn, lc):
print('Deleting launch configuration: {0}'.format(lc.name))
conn.delete_launch_configuration(lc.name)
def find_target_lc (lc, pattern):
target_lc = filter(lambda x: re.search(pattern, x.name), lc)
return target_lc
def delete_oldest_k_lc (conn, lc, k):
"""Delete oldest k launch configuration which matches given pattern, keep at least recent 10 launch configuration"""
min_items = 25;
num_to_be_deleted = min(k, len(lc) - min_items);
if (num_to_be_deleted < 1):
print('Nothing to delete, count of launch configuration: {0}'.format(len(lc)))
return None
else:
sorted_lc = sorted(lc, key=lambda lc: lc.created_time)
to_be_deleted_lc = sorted_lc[:num_to_be_deleted]
print('Deleteing launch configuration total number: {0}'.format(len(to_be_deleted_lc)))
map(lambda x: delete_lc(conn, x), to_be_deleted_lc)
return None
if __name__ == "__main__":
if (len(sys.argv) != 3):
exit('Usage: {0} region pattern'.format(sys.argv[0]))
else:
main(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add script to delete old launch configuration<commit_after>
|
#!/usr/bin/env python
import re
import sys
import boto.ec2.autoscale
from boto.ec2.autoscale import LaunchConfiguration
def main(REGION, pattern):
print('Checking new launch configuration in the "{0}" region.'.format(REGION))
asConnection = boto.ec2.autoscale.connect_to_region(REGION)
lc = asConnection.get_all_launch_configurations(max_records=100)
print('Total number of launch configuration: {0}'.format(len(lc)))
target_lc = find_target_lc(lc, pattern)
print('Number of launch configuration for pattern ("{0}"): {1}'.format(pattern, len(target_lc)))
delete_oldest_k_lc(asConnection, target_lc, 10)
def delete_lc (conn, lc):
print('Deleting launch configuration: {0}'.format(lc.name))
conn.delete_launch_configuration(lc.name)
def find_target_lc (lc, pattern):
target_lc = filter(lambda x: re.search(pattern, x.name), lc)
return target_lc
def delete_oldest_k_lc (conn, lc, k):
"""Delete oldest k launch configuration which matches given pattern, keep at least recent 10 launch configuration"""
min_items = 25;
num_to_be_deleted = min(k, len(lc) - min_items);
if (num_to_be_deleted < 1):
print('Nothing to delete, count of launch configuration: {0}'.format(len(lc)))
return None
else:
sorted_lc = sorted(lc, key=lambda lc: lc.created_time)
to_be_deleted_lc = sorted_lc[:num_to_be_deleted]
print('Deleteing launch configuration total number: {0}'.format(len(to_be_deleted_lc)))
map(lambda x: delete_lc(conn, x), to_be_deleted_lc)
return None
if __name__ == "__main__":
if (len(sys.argv) != 3):
exit('Usage: {0} region pattern'.format(sys.argv[0]))
else:
main(sys.argv[1], sys.argv[2])
|
Add script to delete old launch configuration#!/usr/bin/env python
import re
import sys
import boto.ec2.autoscale
from boto.ec2.autoscale import LaunchConfiguration
def main(REGION, pattern):
print('Checking new launch configuration in the "{0}" region.'.format(REGION))
asConnection = boto.ec2.autoscale.connect_to_region(REGION)
lc = asConnection.get_all_launch_configurations(max_records=100)
print('Total number of launch configuration: {0}'.format(len(lc)))
target_lc = find_target_lc(lc, pattern)
print('Number of launch configuration for pattern ("{0}"): {1}'.format(pattern, len(target_lc)))
delete_oldest_k_lc(asConnection, target_lc, 10)
def delete_lc (conn, lc):
print('Deleting launch configuration: {0}'.format(lc.name))
conn.delete_launch_configuration(lc.name)
def find_target_lc (lc, pattern):
target_lc = filter(lambda x: re.search(pattern, x.name), lc)
return target_lc
def delete_oldest_k_lc (conn, lc, k):
"""Delete oldest k launch configuration which matches given pattern, keep at least recent 10 launch configuration"""
min_items = 25;
num_to_be_deleted = min(k, len(lc) - min_items);
if (num_to_be_deleted < 1):
print('Nothing to delete, count of launch configuration: {0}'.format(len(lc)))
return None
else:
sorted_lc = sorted(lc, key=lambda lc: lc.created_time)
to_be_deleted_lc = sorted_lc[:num_to_be_deleted]
print('Deleteing launch configuration total number: {0}'.format(len(to_be_deleted_lc)))
map(lambda x: delete_lc(conn, x), to_be_deleted_lc)
return None
if __name__ == "__main__":
if (len(sys.argv) != 3):
exit('Usage: {0} region pattern'.format(sys.argv[0]))
else:
main(sys.argv[1], sys.argv[2])
|
<commit_before><commit_msg>Add script to delete old launch configuration<commit_after>#!/usr/bin/env python
import re
import sys
import boto.ec2.autoscale
from boto.ec2.autoscale import LaunchConfiguration
def main(REGION, pattern):
print('Checking new launch configuration in the "{0}" region.'.format(REGION))
asConnection = boto.ec2.autoscale.connect_to_region(REGION)
lc = asConnection.get_all_launch_configurations(max_records=100)
print('Total number of launch configuration: {0}'.format(len(lc)))
target_lc = find_target_lc(lc, pattern)
print('Number of launch configuration for pattern ("{0}"): {1}'.format(pattern, len(target_lc)))
delete_oldest_k_lc(asConnection, target_lc, 10)
def delete_lc (conn, lc):
print('Deleting launch configuration: {0}'.format(lc.name))
conn.delete_launch_configuration(lc.name)
def find_target_lc (lc, pattern):
target_lc = filter(lambda x: re.search(pattern, x.name), lc)
return target_lc
def delete_oldest_k_lc (conn, lc, k):
"""Delete oldest k launch configuration which matches given pattern, keep at least recent 10 launch configuration"""
min_items = 25;
num_to_be_deleted = min(k, len(lc) - min_items);
if (num_to_be_deleted < 1):
print('Nothing to delete, count of launch configuration: {0}'.format(len(lc)))
return None
else:
sorted_lc = sorted(lc, key=lambda lc: lc.created_time)
to_be_deleted_lc = sorted_lc[:num_to_be_deleted]
print('Deleteing launch configuration total number: {0}'.format(len(to_be_deleted_lc)))
map(lambda x: delete_lc(conn, x), to_be_deleted_lc)
return None
if __name__ == "__main__":
if (len(sys.argv) != 3):
exit('Usage: {0} region pattern'.format(sys.argv[0]))
else:
main(sys.argv[1], sys.argv[2])
|
|
3ba0b1cde749d92a6966e45e29c1dcf8fee54a0d
|
scripts/instrumentationRunner.py
|
scripts/instrumentationRunner.py
|
"""
Submits the apks to BrowserStack to run the instrumentation tests.
"""
import os
import shlex
import subprocess
from subprocess import PIPE
import sys
import json
def appendData(command, dataUrl):
var = 'data={\"url\": \"%s\"}' % dataUrl
return command + " " + json.dumps(var)
def buildTestCommand(appToken, testToken):
test = {}
test["devices"] = ["LG G5-6.0"] #using just 1 device right now.
test["app"] = appToken
test["deviceLogs"] = True
test["testSuite"] = testToken
return json.dumps(json.dumps(test))
if __name__ == "__main__":
if "BROWSERSTACK_USERNAME" in os.environ:
userName = os.environ["BROWSERSTACK_USERNAME"]
if "BROWSERSTACK_PASSWORD" in os.environ:
password = os.environ["BROWSERSTACK_PASSWORD"]
if "BUILD_NUMBER" in os.environ:
buildNumber = os.environ["BUILD_NUMBER"]
base = "https://jenkins.dimagi.com/job/commcare-android-instrumentation-tests/{}".format(buildNumber)
debugApk = base + "/artifact/app-commcare-debug.apk"
testApk = base + "/artifact/app-commcare-debug-androidTest.apk"
debugUrl = "https://api-cloud.browserstack.com/app-automate/upload"
testUrl = "https://api-cloud.browserstack.com/app-automate/espresso/test-suite"
command = 'curl -u "{}:{}" -X POST "{}" -F'
debugUploadCmd = appendData(command.format(userName, password, debugUrl), debugApk)
output = subprocess.Popen(shlex.split(debugUploadCmd), stdout=PIPE, stderr=None, shell=False)
appToken = json.loads(output.communicate()[0])["app_url"]
testUploadCmd = appendData(command.format(userName, password, testUrl), testApk)
output = subprocess.Popen(shlex.split(testUploadCmd), stdout=PIPE, stderr=None, shell=False)
testToken = json.loads(output.communicate()[0])["test_url"]
# Running the tests on LG-G5
espressoUrl = "https://api-cloud.browserstack.com/app-automate/espresso/build"
runConfig = buildTestCommand(appToken, testToken)
runCmd = 'curl -X POST "{}" -d \ {} -H "Content-Type: application/json" -u "{}:{}"'.format(espressoUrl, runConfig, userName, password)
output = subprocess.Popen(shlex.split(runCmd), stdout=PIPE, stderr=None, shell=False)
print(output.communicate())
|
Add a python script to schedule runs on browserstack
|
Add a python script to schedule runs on browserstack
|
Python
|
apache-2.0
|
dimagi/commcare-android,dimagi/commcare-android,dimagi/commcare-android,dimagi/commcare-android,dimagi/commcare-android,dimagi/commcare-android
|
Add a python script to schedule runs on browserstack
|
"""
Submits the apks to BrowserStack to run the instrumentation tests.
"""
import os
import shlex
import subprocess
from subprocess import PIPE
import sys
import json
def appendData(command, dataUrl):
var = 'data={\"url\": \"%s\"}' % dataUrl
return command + " " + json.dumps(var)
def buildTestCommand(appToken, testToken):
test = {}
test["devices"] = ["LG G5-6.0"] #using just 1 device right now.
test["app"] = appToken
test["deviceLogs"] = True
test["testSuite"] = testToken
return json.dumps(json.dumps(test))
if __name__ == "__main__":
if "BROWSERSTACK_USERNAME" in os.environ:
userName = os.environ["BROWSERSTACK_USERNAME"]
if "BROWSERSTACK_PASSWORD" in os.environ:
password = os.environ["BROWSERSTACK_PASSWORD"]
if "BUILD_NUMBER" in os.environ:
buildNumber = os.environ["BUILD_NUMBER"]
base = "https://jenkins.dimagi.com/job/commcare-android-instrumentation-tests/{}".format(buildNumber)
debugApk = base + "/artifact/app-commcare-debug.apk"
testApk = base + "/artifact/app-commcare-debug-androidTest.apk"
debugUrl = "https://api-cloud.browserstack.com/app-automate/upload"
testUrl = "https://api-cloud.browserstack.com/app-automate/espresso/test-suite"
command = 'curl -u "{}:{}" -X POST "{}" -F'
debugUploadCmd = appendData(command.format(userName, password, debugUrl), debugApk)
output = subprocess.Popen(shlex.split(debugUploadCmd), stdout=PIPE, stderr=None, shell=False)
appToken = json.loads(output.communicate()[0])["app_url"]
testUploadCmd = appendData(command.format(userName, password, testUrl), testApk)
output = subprocess.Popen(shlex.split(testUploadCmd), stdout=PIPE, stderr=None, shell=False)
testToken = json.loads(output.communicate()[0])["test_url"]
# Running the tests on LG-G5
espressoUrl = "https://api-cloud.browserstack.com/app-automate/espresso/build"
runConfig = buildTestCommand(appToken, testToken)
runCmd = 'curl -X POST "{}" -d \ {} -H "Content-Type: application/json" -u "{}:{}"'.format(espressoUrl, runConfig, userName, password)
output = subprocess.Popen(shlex.split(runCmd), stdout=PIPE, stderr=None, shell=False)
print(output.communicate())
|
<commit_before><commit_msg>Add a python script to schedule runs on browserstack<commit_after>
|
"""
Submits the apks to BrowserStack to run the instrumentation tests.
"""
import os
import shlex
import subprocess
from subprocess import PIPE
import sys
import json
def appendData(command, dataUrl):
var = 'data={\"url\": \"%s\"}' % dataUrl
return command + " " + json.dumps(var)
def buildTestCommand(appToken, testToken):
test = {}
test["devices"] = ["LG G5-6.0"] #using just 1 device right now.
test["app"] = appToken
test["deviceLogs"] = True
test["testSuite"] = testToken
return json.dumps(json.dumps(test))
if __name__ == "__main__":
if "BROWSERSTACK_USERNAME" in os.environ:
userName = os.environ["BROWSERSTACK_USERNAME"]
if "BROWSERSTACK_PASSWORD" in os.environ:
password = os.environ["BROWSERSTACK_PASSWORD"]
if "BUILD_NUMBER" in os.environ:
buildNumber = os.environ["BUILD_NUMBER"]
base = "https://jenkins.dimagi.com/job/commcare-android-instrumentation-tests/{}".format(buildNumber)
debugApk = base + "/artifact/app-commcare-debug.apk"
testApk = base + "/artifact/app-commcare-debug-androidTest.apk"
debugUrl = "https://api-cloud.browserstack.com/app-automate/upload"
testUrl = "https://api-cloud.browserstack.com/app-automate/espresso/test-suite"
command = 'curl -u "{}:{}" -X POST "{}" -F'
debugUploadCmd = appendData(command.format(userName, password, debugUrl), debugApk)
output = subprocess.Popen(shlex.split(debugUploadCmd), stdout=PIPE, stderr=None, shell=False)
appToken = json.loads(output.communicate()[0])["app_url"]
testUploadCmd = appendData(command.format(userName, password, testUrl), testApk)
output = subprocess.Popen(shlex.split(testUploadCmd), stdout=PIPE, stderr=None, shell=False)
testToken = json.loads(output.communicate()[0])["test_url"]
# Running the tests on LG-G5
espressoUrl = "https://api-cloud.browserstack.com/app-automate/espresso/build"
runConfig = buildTestCommand(appToken, testToken)
runCmd = 'curl -X POST "{}" -d \ {} -H "Content-Type: application/json" -u "{}:{}"'.format(espressoUrl, runConfig, userName, password)
output = subprocess.Popen(shlex.split(runCmd), stdout=PIPE, stderr=None, shell=False)
print(output.communicate())
|
Add a python script to schedule runs on browserstack"""
Submits the apks to BrowserStack to run the instrumentation tests.
"""
import os
import shlex
import subprocess
from subprocess import PIPE
import sys
import json
def appendData(command, dataUrl):
var = 'data={\"url\": \"%s\"}' % dataUrl
return command + " " + json.dumps(var)
def buildTestCommand(appToken, testToken):
test = {}
test["devices"] = ["LG G5-6.0"] #using just 1 device right now.
test["app"] = appToken
test["deviceLogs"] = True
test["testSuite"] = testToken
return json.dumps(json.dumps(test))
if __name__ == "__main__":
if "BROWSERSTACK_USERNAME" in os.environ:
userName = os.environ["BROWSERSTACK_USERNAME"]
if "BROWSERSTACK_PASSWORD" in os.environ:
password = os.environ["BROWSERSTACK_PASSWORD"]
if "BUILD_NUMBER" in os.environ:
buildNumber = os.environ["BUILD_NUMBER"]
base = "https://jenkins.dimagi.com/job/commcare-android-instrumentation-tests/{}".format(buildNumber)
debugApk = base + "/artifact/app-commcare-debug.apk"
testApk = base + "/artifact/app-commcare-debug-androidTest.apk"
debugUrl = "https://api-cloud.browserstack.com/app-automate/upload"
testUrl = "https://api-cloud.browserstack.com/app-automate/espresso/test-suite"
command = 'curl -u "{}:{}" -X POST "{}" -F'
debugUploadCmd = appendData(command.format(userName, password, debugUrl), debugApk)
output = subprocess.Popen(shlex.split(debugUploadCmd), stdout=PIPE, stderr=None, shell=False)
appToken = json.loads(output.communicate()[0])["app_url"]
testUploadCmd = appendData(command.format(userName, password, testUrl), testApk)
output = subprocess.Popen(shlex.split(testUploadCmd), stdout=PIPE, stderr=None, shell=False)
testToken = json.loads(output.communicate()[0])["test_url"]
# Running the tests on LG-G5
espressoUrl = "https://api-cloud.browserstack.com/app-automate/espresso/build"
runConfig = buildTestCommand(appToken, testToken)
runCmd = 'curl -X POST "{}" -d \ {} -H "Content-Type: application/json" -u "{}:{}"'.format(espressoUrl, runConfig, userName, password)
output = subprocess.Popen(shlex.split(runCmd), stdout=PIPE, stderr=None, shell=False)
print(output.communicate())
|
<commit_before><commit_msg>Add a python script to schedule runs on browserstack<commit_after>"""
Submits the apks to BrowserStack to run the instrumentation tests.
"""
import os
import shlex
import subprocess
from subprocess import PIPE
import sys
import json
def appendData(command, dataUrl):
var = 'data={\"url\": \"%s\"}' % dataUrl
return command + " " + json.dumps(var)
def buildTestCommand(appToken, testToken):
test = {}
test["devices"] = ["LG G5-6.0"] #using just 1 device right now.
test["app"] = appToken
test["deviceLogs"] = True
test["testSuite"] = testToken
return json.dumps(json.dumps(test))
if __name__ == "__main__":
if "BROWSERSTACK_USERNAME" in os.environ:
userName = os.environ["BROWSERSTACK_USERNAME"]
if "BROWSERSTACK_PASSWORD" in os.environ:
password = os.environ["BROWSERSTACK_PASSWORD"]
if "BUILD_NUMBER" in os.environ:
buildNumber = os.environ["BUILD_NUMBER"]
base = "https://jenkins.dimagi.com/job/commcare-android-instrumentation-tests/{}".format(buildNumber)
debugApk = base + "/artifact/app-commcare-debug.apk"
testApk = base + "/artifact/app-commcare-debug-androidTest.apk"
debugUrl = "https://api-cloud.browserstack.com/app-automate/upload"
testUrl = "https://api-cloud.browserstack.com/app-automate/espresso/test-suite"
command = 'curl -u "{}:{}" -X POST "{}" -F'
debugUploadCmd = appendData(command.format(userName, password, debugUrl), debugApk)
output = subprocess.Popen(shlex.split(debugUploadCmd), stdout=PIPE, stderr=None, shell=False)
appToken = json.loads(output.communicate()[0])["app_url"]
testUploadCmd = appendData(command.format(userName, password, testUrl), testApk)
output = subprocess.Popen(shlex.split(testUploadCmd), stdout=PIPE, stderr=None, shell=False)
testToken = json.loads(output.communicate()[0])["test_url"]
# Running the tests on LG-G5
espressoUrl = "https://api-cloud.browserstack.com/app-automate/espresso/build"
runConfig = buildTestCommand(appToken, testToken)
runCmd = 'curl -X POST "{}" -d \ {} -H "Content-Type: application/json" -u "{}:{}"'.format(espressoUrl, runConfig, userName, password)
output = subprocess.Popen(shlex.split(runCmd), stdout=PIPE, stderr=None, shell=False)
print(output.communicate())
|
|
790163173efb34e2b9b0ad0d8927057836f0ad51
|
examples/grouped_violinplots.py
|
examples/grouped_violinplots.py
|
"""
Grouped violinplots with split violins
======================================
"""
import seaborn as sns
sns.set(style="darkgrid", palette="pastel", color_codes=True)
# Load the example tips dataset
tips = sns.load_dataset("tips")
# Draw a nested violinplot and split the violins for easier comparison
sns.violinplot(x="day", y="total_bill", hue="sex", data=tips,
inner="quart", split=True, palette={"Male": "r", "Female": "y"})
sns.despine(offset=10, trim=True)
|
Add a split violin example script
|
Add a split violin example script
|
Python
|
bsd-3-clause
|
jat255/seaborn,clarkfitzg/seaborn,bsipocz/seaborn,uhjish/seaborn,mwaskom/seaborn,ashhher3/seaborn,sinhrks/seaborn,sauliusl/seaborn,q1ang/seaborn,nileracecrew/seaborn,lypzln/seaborn,phobson/seaborn,mwaskom/seaborn,arokem/seaborn,lukauskas/seaborn,olgabot/seaborn,parantapa/seaborn,muku42/seaborn,anntzer/seaborn,kyleam/seaborn,wrobstory/seaborn,huongttlan/seaborn,lukauskas/seaborn,mia1rab/seaborn,dhimmel/seaborn,tim777z/seaborn,dimarkov/seaborn,Lx37/seaborn,gef756/seaborn,anntzer/seaborn,phobson/seaborn,mclevey/seaborn,JWarmenhoven/seaborn,petebachant/seaborn,drewokane/seaborn,oesteban/seaborn,arokem/seaborn
|
Add a split violin example script
|
"""
Grouped violinplots with split violins
======================================
"""
import seaborn as sns
sns.set(style="darkgrid", palette="pastel", color_codes=True)
# Load the example tips dataset
tips = sns.load_dataset("tips")
# Draw a nested violinplot and split the violins for easier comparison
sns.violinplot(x="day", y="total_bill", hue="sex", data=tips,
inner="quart", split=True, palette={"Male": "r", "Female": "y"})
sns.despine(offset=10, trim=True)
|
<commit_before><commit_msg>Add a split violin example script<commit_after>
|
"""
Grouped violinplots with split violins
======================================
"""
import seaborn as sns
sns.set(style="darkgrid", palette="pastel", color_codes=True)
# Load the example tips dataset
tips = sns.load_dataset("tips")
# Draw a nested violinplot and split the violins for easier comparison
sns.violinplot(x="day", y="total_bill", hue="sex", data=tips,
inner="quart", split=True, palette={"Male": "r", "Female": "y"})
sns.despine(offset=10, trim=True)
|
Add a split violin example script"""
Grouped violinplots with split violins
======================================
"""
import seaborn as sns
sns.set(style="darkgrid", palette="pastel", color_codes=True)
# Load the example tips dataset
tips = sns.load_dataset("tips")
# Draw a nested violinplot and split the violins for easier comparison
sns.violinplot(x="day", y="total_bill", hue="sex", data=tips,
inner="quart", split=True, palette={"Male": "r", "Female": "y"})
sns.despine(offset=10, trim=True)
|
<commit_before><commit_msg>Add a split violin example script<commit_after>"""
Grouped violinplots with split violins
======================================
"""
import seaborn as sns
sns.set(style="darkgrid", palette="pastel", color_codes=True)
# Load the example tips dataset
tips = sns.load_dataset("tips")
# Draw a nested violinplot and split the violins for easier comparison
sns.violinplot(x="day", y="total_bill", hue="sex", data=tips,
inner="quart", split=True, palette={"Male": "r", "Female": "y"})
sns.despine(offset=10, trim=True)
|
|
fa5d8bbd194a7903d1f6cdc8057a76a4f3752b21
|
h5shuffle.py
|
h5shuffle.py
|
from __future__ import division
import argparse
import numpy as np
import h5py
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str)
args = parser.parse_args()
f = h5py.File(args.file, 'r+')
inds = None
for key, dataset in f.iteritems():
if inds is None:
inds = np.arange(dataset.shape[0])
np.random.shuffle(inds)
else:
assert len(inds) == dataset.shape[0]
f[key][:] = dataset[()][inds]
if __name__ == "__main__":
main()
|
Add script for shuffling the first axis of each entry in a h5 file
|
Add script for shuffling the first axis of each entry in a h5 file
|
Python
|
mit
|
alexlee-gk/visual_dynamics
|
Add script for shuffling the first axis of each entry in a h5 file
|
from __future__ import division
import argparse
import numpy as np
import h5py
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str)
args = parser.parse_args()
f = h5py.File(args.file, 'r+')
inds = None
for key, dataset in f.iteritems():
if inds is None:
inds = np.arange(dataset.shape[0])
np.random.shuffle(inds)
else:
assert len(inds) == dataset.shape[0]
f[key][:] = dataset[()][inds]
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script for shuffling the first axis of each entry in a h5 file<commit_after>
|
from __future__ import division
import argparse
import numpy as np
import h5py
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str)
args = parser.parse_args()
f = h5py.File(args.file, 'r+')
inds = None
for key, dataset in f.iteritems():
if inds is None:
inds = np.arange(dataset.shape[0])
np.random.shuffle(inds)
else:
assert len(inds) == dataset.shape[0]
f[key][:] = dataset[()][inds]
if __name__ == "__main__":
main()
|
Add script for shuffling the first axis of each entry in a h5 filefrom __future__ import division
import argparse
import numpy as np
import h5py
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str)
args = parser.parse_args()
f = h5py.File(args.file, 'r+')
inds = None
for key, dataset in f.iteritems():
if inds is None:
inds = np.arange(dataset.shape[0])
np.random.shuffle(inds)
else:
assert len(inds) == dataset.shape[0]
f[key][:] = dataset[()][inds]
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script for shuffling the first axis of each entry in a h5 file<commit_after>from __future__ import division
import argparse
import numpy as np
import h5py
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str)
args = parser.parse_args()
f = h5py.File(args.file, 'r+')
inds = None
for key, dataset in f.iteritems():
if inds is None:
inds = np.arange(dataset.shape[0])
np.random.shuffle(inds)
else:
assert len(inds) == dataset.shape[0]
f[key][:] = dataset[()][inds]
if __name__ == "__main__":
main()
|
|
3f4f55f3232546ea407672522f6223a9548b3167
|
tests/pycurl_object_test.py
|
tests/pycurl_object_test.py
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
import sys
class PycurlObjectTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def test_set_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
assert hasattr(self.curl, 'attr')
def test_get_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
self.assertEqual(1, self.curl.attr)
def test_delete_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
self.assertEqual(1, self.curl.attr)
assert hasattr(self.curl, 'attr')
del self.curl.attr
assert not hasattr(self.curl, 'attr')
|
Add a test for general object behavior as it seems to be changed by python 3 patch
|
Add a test for general object behavior as it seems to be changed by python 3 patch
|
Python
|
lgpl-2.1
|
pycurl/pycurl,pycurl/pycurl,pycurl/pycurl
|
Add a test for general object behavior as it seems to be changed by python 3 patch
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
import sys
class PycurlObjectTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def test_set_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
assert hasattr(self.curl, 'attr')
def test_get_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
self.assertEqual(1, self.curl.attr)
def test_delete_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
self.assertEqual(1, self.curl.attr)
assert hasattr(self.curl, 'attr')
del self.curl.attr
assert not hasattr(self.curl, 'attr')
|
<commit_before><commit_msg>Add a test for general object behavior as it seems to be changed by python 3 patch<commit_after>
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
import sys
class PycurlObjectTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def test_set_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
assert hasattr(self.curl, 'attr')
def test_get_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
self.assertEqual(1, self.curl.attr)
def test_delete_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
self.assertEqual(1, self.curl.attr)
assert hasattr(self.curl, 'attr')
del self.curl.attr
assert not hasattr(self.curl, 'attr')
|
Add a test for general object behavior as it seems to be changed by python 3 patch#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
import sys
class PycurlObjectTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def test_set_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
assert hasattr(self.curl, 'attr')
def test_get_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
self.assertEqual(1, self.curl.attr)
def test_delete_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
self.assertEqual(1, self.curl.attr)
assert hasattr(self.curl, 'attr')
del self.curl.attr
assert not hasattr(self.curl, 'attr')
|
<commit_before><commit_msg>Add a test for general object behavior as it seems to be changed by python 3 patch<commit_after>#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import unittest
import sys
class PycurlObjectTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def test_set_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
assert hasattr(self.curl, 'attr')
def test_get_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
self.assertEqual(1, self.curl.attr)
def test_delete_attribute(self):
assert not hasattr(self.curl, 'attr')
self.curl.attr = 1
self.assertEqual(1, self.curl.attr)
assert hasattr(self.curl, 'attr')
del self.curl.attr
assert not hasattr(self.curl, 'attr')
|
|
4c4bf05e34d46a396c3ac124c5f78bf37d142580
|
nodeconductor/structure/migrations/0037_remove_customer_billing_backend_id.py
|
nodeconductor/structure/migrations/0037_remove_customer_billing_backend_id.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('structure', '0036_add_vat_fields'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='billing_backend_id',
),
]
|
Remove customer billing backend id
|
Remove customer billing backend id
- nc-1554
|
Python
|
mit
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
Remove customer billing backend id
- nc-1554
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('structure', '0036_add_vat_fields'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='billing_backend_id',
),
]
|
<commit_before><commit_msg>Remove customer billing backend id
- nc-1554<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('structure', '0036_add_vat_fields'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='billing_backend_id',
),
]
|
Remove customer billing backend id
- nc-1554# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('structure', '0036_add_vat_fields'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='billing_backend_id',
),
]
|
<commit_before><commit_msg>Remove customer billing backend id
- nc-1554<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('structure', '0036_add_vat_fields'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='billing_backend_id',
),
]
|
|
dbf71a7f4973d22259e81e095402d39acb823651
|
pymatgen/symmetry/tests/test_spacegroup.py
|
pymatgen/symmetry/tests/test_spacegroup.py
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add a unittest for spacegroup. Still very basic.
|
Add a unittest for spacegroup. Still very basic.
|
Python
|
mit
|
rousseab/pymatgen,ctoher/pymatgen,yanikou19/pymatgen,ctoher/pymatgen,sonium0/pymatgen,yanikou19/pymatgen,Bismarrck/pymatgen,migueldiascosta/pymatgen,ctoher/pymatgen,Bismarrck/pymatgen,Dioptas/pymatgen,sonium0/pymatgen,migueldiascosta/pymatgen,Bismarrck/pymatgen,Bismarrck/pymatgen,sonium0/pymatgen,rousseab/pymatgen,yanikou19/pymatgen,Bismarrck/pymatgen,migueldiascosta/pymatgen,rousseab/pymatgen,Dioptas/pymatgen
|
Add a unittest for spacegroup. Still very basic.
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add a unittest for spacegroup. Still very basic.<commit_after>
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add a unittest for spacegroup. Still very basic.#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add a unittest for spacegroup. Still very basic.<commit_after>#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
|
1098f43d5fb02328ce4708bd5a8c1b5f8ac23f51
|
py/optimal-division.py
|
py/optimal-division.py
|
class Solution(object):
def optimalDivision(self, nums):
"""
:type nums: List[int]
:rtype: str
"""
min_result, max_result = dict(), dict()
lnums = len(nums)
def find_cut(start, end, need_max):
if start + 1 == end:
return 0, (nums[start], 1)
if need_max and (start, end) in max_result:
return max_result[start, end]
if not need_max and (start, end) in min_result:
return min_result[start, end]
if need_max:
M, Mcut = None, None
for cut in xrange(start + 1, end):
c1, v1 = find_cut(start, cut, True)
c2, v2 = find_cut(cut, end, False)
if M is None or v1[0] * v2[1] * M[1] > M[0] * v1[1] * v2[0]:
M, Mcut = (v1[0] * v2[1], v1[1] * v2[0]), cut
max_result[start, end] = Mcut, M
return max_result[start, end]
else:
m, mcut = None, None
for cut in xrange(start + 1, end):
c1, v1 = find_cut(start, cut, False)
c2, v2 = find_cut(cut, end, True)
if m is None or v1[0] * v2[1] * m[1] < m[0] * v1[1] * v2[0]:
m, mcut = (v1[0] * v2[1], v1[1] * v2[0]), cut
min_result[start, end] = mcut, m
return min_result[start, end]
def print_ans(start, end, need_max):
if start + 1 == end:
return str(nums[start])
cut, val = find_cut(start, end, need_max)
ans = print_ans(start, cut, need_max) + "/"
if end - cut > 1:
ans += "("
ans += print_ans(cut, end, not need_max)
if end - cut > 1:
ans += ")"
return ans
return print_ans(0, lnums, True)
|
Add py solution for 553. Optimal Division
|
Add py solution for 553. Optimal Division
553. Optimal Division: https://leetcode.com/problems/optimal-division/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 553. Optimal Division
553. Optimal Division: https://leetcode.com/problems/optimal-division/
|
class Solution(object):
def optimalDivision(self, nums):
"""
:type nums: List[int]
:rtype: str
"""
min_result, max_result = dict(), dict()
lnums = len(nums)
def find_cut(start, end, need_max):
if start + 1 == end:
return 0, (nums[start], 1)
if need_max and (start, end) in max_result:
return max_result[start, end]
if not need_max and (start, end) in min_result:
return min_result[start, end]
if need_max:
M, Mcut = None, None
for cut in xrange(start + 1, end):
c1, v1 = find_cut(start, cut, True)
c2, v2 = find_cut(cut, end, False)
if M is None or v1[0] * v2[1] * M[1] > M[0] * v1[1] * v2[0]:
M, Mcut = (v1[0] * v2[1], v1[1] * v2[0]), cut
max_result[start, end] = Mcut, M
return max_result[start, end]
else:
m, mcut = None, None
for cut in xrange(start + 1, end):
c1, v1 = find_cut(start, cut, False)
c2, v2 = find_cut(cut, end, True)
if m is None or v1[0] * v2[1] * m[1] < m[0] * v1[1] * v2[0]:
m, mcut = (v1[0] * v2[1], v1[1] * v2[0]), cut
min_result[start, end] = mcut, m
return min_result[start, end]
def print_ans(start, end, need_max):
if start + 1 == end:
return str(nums[start])
cut, val = find_cut(start, end, need_max)
ans = print_ans(start, cut, need_max) + "/"
if end - cut > 1:
ans += "("
ans += print_ans(cut, end, not need_max)
if end - cut > 1:
ans += ")"
return ans
return print_ans(0, lnums, True)
|
<commit_before><commit_msg>Add py solution for 553. Optimal Division
553. Optimal Division: https://leetcode.com/problems/optimal-division/<commit_after>
|
class Solution(object):
def optimalDivision(self, nums):
"""
:type nums: List[int]
:rtype: str
"""
min_result, max_result = dict(), dict()
lnums = len(nums)
def find_cut(start, end, need_max):
if start + 1 == end:
return 0, (nums[start], 1)
if need_max and (start, end) in max_result:
return max_result[start, end]
if not need_max and (start, end) in min_result:
return min_result[start, end]
if need_max:
M, Mcut = None, None
for cut in xrange(start + 1, end):
c1, v1 = find_cut(start, cut, True)
c2, v2 = find_cut(cut, end, False)
if M is None or v1[0] * v2[1] * M[1] > M[0] * v1[1] * v2[0]:
M, Mcut = (v1[0] * v2[1], v1[1] * v2[0]), cut
max_result[start, end] = Mcut, M
return max_result[start, end]
else:
m, mcut = None, None
for cut in xrange(start + 1, end):
c1, v1 = find_cut(start, cut, False)
c2, v2 = find_cut(cut, end, True)
if m is None or v1[0] * v2[1] * m[1] < m[0] * v1[1] * v2[0]:
m, mcut = (v1[0] * v2[1], v1[1] * v2[0]), cut
min_result[start, end] = mcut, m
return min_result[start, end]
def print_ans(start, end, need_max):
if start + 1 == end:
return str(nums[start])
cut, val = find_cut(start, end, need_max)
ans = print_ans(start, cut, need_max) + "/"
if end - cut > 1:
ans += "("
ans += print_ans(cut, end, not need_max)
if end - cut > 1:
ans += ")"
return ans
return print_ans(0, lnums, True)
|
Add py solution for 553. Optimal Division
553. Optimal Division: https://leetcode.com/problems/optimal-division/class Solution(object):
def optimalDivision(self, nums):
"""
:type nums: List[int]
:rtype: str
"""
min_result, max_result = dict(), dict()
lnums = len(nums)
def find_cut(start, end, need_max):
if start + 1 == end:
return 0, (nums[start], 1)
if need_max and (start, end) in max_result:
return max_result[start, end]
if not need_max and (start, end) in min_result:
return min_result[start, end]
if need_max:
M, Mcut = None, None
for cut in xrange(start + 1, end):
c1, v1 = find_cut(start, cut, True)
c2, v2 = find_cut(cut, end, False)
if M is None or v1[0] * v2[1] * M[1] > M[0] * v1[1] * v2[0]:
M, Mcut = (v1[0] * v2[1], v1[1] * v2[0]), cut
max_result[start, end] = Mcut, M
return max_result[start, end]
else:
m, mcut = None, None
for cut in xrange(start + 1, end):
c1, v1 = find_cut(start, cut, False)
c2, v2 = find_cut(cut, end, True)
if m is None or v1[0] * v2[1] * m[1] < m[0] * v1[1] * v2[0]:
m, mcut = (v1[0] * v2[1], v1[1] * v2[0]), cut
min_result[start, end] = mcut, m
return min_result[start, end]
def print_ans(start, end, need_max):
if start + 1 == end:
return str(nums[start])
cut, val = find_cut(start, end, need_max)
ans = print_ans(start, cut, need_max) + "/"
if end - cut > 1:
ans += "("
ans += print_ans(cut, end, not need_max)
if end - cut > 1:
ans += ")"
return ans
return print_ans(0, lnums, True)
|
<commit_before><commit_msg>Add py solution for 553. Optimal Division
553. Optimal Division: https://leetcode.com/problems/optimal-division/<commit_after>class Solution(object):
def optimalDivision(self, nums):
"""
:type nums: List[int]
:rtype: str
"""
min_result, max_result = dict(), dict()
lnums = len(nums)
def find_cut(start, end, need_max):
if start + 1 == end:
return 0, (nums[start], 1)
if need_max and (start, end) in max_result:
return max_result[start, end]
if not need_max and (start, end) in min_result:
return min_result[start, end]
if need_max:
M, Mcut = None, None
for cut in xrange(start + 1, end):
c1, v1 = find_cut(start, cut, True)
c2, v2 = find_cut(cut, end, False)
if M is None or v1[0] * v2[1] * M[1] > M[0] * v1[1] * v2[0]:
M, Mcut = (v1[0] * v2[1], v1[1] * v2[0]), cut
max_result[start, end] = Mcut, M
return max_result[start, end]
else:
m, mcut = None, None
for cut in xrange(start + 1, end):
c1, v1 = find_cut(start, cut, False)
c2, v2 = find_cut(cut, end, True)
if m is None or v1[0] * v2[1] * m[1] < m[0] * v1[1] * v2[0]:
m, mcut = (v1[0] * v2[1], v1[1] * v2[0]), cut
min_result[start, end] = mcut, m
return min_result[start, end]
def print_ans(start, end, need_max):
if start + 1 == end:
return str(nums[start])
cut, val = find_cut(start, end, need_max)
ans = print_ans(start, cut, need_max) + "/"
if end - cut > 1:
ans += "("
ans += print_ans(cut, end, not need_max)
if end - cut > 1:
ans += ")"
return ans
return print_ans(0, lnums, True)
|
|
0ebd6e8e5e8c09f15e6170de03965de150edca22
|
crawler/crawler.py
|
crawler/crawler.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
import requests
from lxml import html
class Songlist(object):
def __init__(self, url):
self.url = url
response = requests.get(url)
self.tree = html.fromstring(response.text)
def _get_num(self, css_expression):
text_in_tag = self.tree.cssselect(css_expression)[0].text
matched_num = re.search(r'\d+', text_in_tag)
if matched_num is None:
return 0
num = int(matched_num.group())
return num
@property
def name(self):
return self.tree.cssselect('h2')[0].text
@property
def plays(self):
return int(self.tree.cssselect('#play-count')[0].text)
@property
def comments(self):
return self._get_num('.u-btni-cmmt i')
@property
def shares(self):
return self._get_num('.u-btni-share i')
@property
def favourites(self):
return self._get_num('.u-btni-fav i')
@property
def tags(self):
if self.tree.cssselect('.tags'):
tags = [tag.text for tag in self.tree.cssselect('.u-tag i')]
return tags
else:
return []
@property
def meta(self):
songlist_meta = {
'name': self.name,
'url': self.url,
'plays': self.plays,
'comments': self.comments,
'shares': self.shares,
'favourites': self.favourites,
'tags': self.tags
}
return songlist_meta
|
Add a class for getting meta info from a songlist
|
Add a class for getting meta info from a songlist
|
Python
|
mit
|
lord63/wangyi_music_top100,lord63/wangyi_music_top100,lord63/wangyi_music_top100
|
Add a class for getting meta info from a songlist
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
import requests
from lxml import html
class Songlist(object):
def __init__(self, url):
self.url = url
response = requests.get(url)
self.tree = html.fromstring(response.text)
def _get_num(self, css_expression):
text_in_tag = self.tree.cssselect(css_expression)[0].text
matched_num = re.search(r'\d+', text_in_tag)
if matched_num is None:
return 0
num = int(matched_num.group())
return num
@property
def name(self):
return self.tree.cssselect('h2')[0].text
@property
def plays(self):
return int(self.tree.cssselect('#play-count')[0].text)
@property
def comments(self):
return self._get_num('.u-btni-cmmt i')
@property
def shares(self):
return self._get_num('.u-btni-share i')
@property
def favourites(self):
return self._get_num('.u-btni-fav i')
@property
def tags(self):
if self.tree.cssselect('.tags'):
tags = [tag.text for tag in self.tree.cssselect('.u-tag i')]
return tags
else:
return []
@property
def meta(self):
songlist_meta = {
'name': self.name,
'url': self.url,
'plays': self.plays,
'comments': self.comments,
'shares': self.shares,
'favourites': self.favourites,
'tags': self.tags
}
return songlist_meta
|
<commit_before><commit_msg>Add a class for getting meta info from a songlist<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
import requests
from lxml import html
class Songlist(object):
def __init__(self, url):
self.url = url
response = requests.get(url)
self.tree = html.fromstring(response.text)
def _get_num(self, css_expression):
text_in_tag = self.tree.cssselect(css_expression)[0].text
matched_num = re.search(r'\d+', text_in_tag)
if matched_num is None:
return 0
num = int(matched_num.group())
return num
@property
def name(self):
return self.tree.cssselect('h2')[0].text
@property
def plays(self):
return int(self.tree.cssselect('#play-count')[0].text)
@property
def comments(self):
return self._get_num('.u-btni-cmmt i')
@property
def shares(self):
return self._get_num('.u-btni-share i')
@property
def favourites(self):
return self._get_num('.u-btni-fav i')
@property
def tags(self):
if self.tree.cssselect('.tags'):
tags = [tag.text for tag in self.tree.cssselect('.u-tag i')]
return tags
else:
return []
@property
def meta(self):
songlist_meta = {
'name': self.name,
'url': self.url,
'plays': self.plays,
'comments': self.comments,
'shares': self.shares,
'favourites': self.favourites,
'tags': self.tags
}
return songlist_meta
|
Add a class for getting meta info from a songlist#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
import requests
from lxml import html
class Songlist(object):
def __init__(self, url):
self.url = url
response = requests.get(url)
self.tree = html.fromstring(response.text)
def _get_num(self, css_expression):
text_in_tag = self.tree.cssselect(css_expression)[0].text
matched_num = re.search(r'\d+', text_in_tag)
if matched_num is None:
return 0
num = int(matched_num.group())
return num
@property
def name(self):
return self.tree.cssselect('h2')[0].text
@property
def plays(self):
return int(self.tree.cssselect('#play-count')[0].text)
@property
def comments(self):
return self._get_num('.u-btni-cmmt i')
@property
def shares(self):
return self._get_num('.u-btni-share i')
@property
def favourites(self):
return self._get_num('.u-btni-fav i')
@property
def tags(self):
if self.tree.cssselect('.tags'):
tags = [tag.text for tag in self.tree.cssselect('.u-tag i')]
return tags
else:
return []
@property
def meta(self):
songlist_meta = {
'name': self.name,
'url': self.url,
'plays': self.plays,
'comments': self.comments,
'shares': self.shares,
'favourites': self.favourites,
'tags': self.tags
}
return songlist_meta
|
<commit_before><commit_msg>Add a class for getting meta info from a songlist<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
import requests
from lxml import html
class Songlist(object):
def __init__(self, url):
self.url = url
response = requests.get(url)
self.tree = html.fromstring(response.text)
def _get_num(self, css_expression):
text_in_tag = self.tree.cssselect(css_expression)[0].text
matched_num = re.search(r'\d+', text_in_tag)
if matched_num is None:
return 0
num = int(matched_num.group())
return num
@property
def name(self):
return self.tree.cssselect('h2')[0].text
@property
def plays(self):
return int(self.tree.cssselect('#play-count')[0].text)
@property
def comments(self):
return self._get_num('.u-btni-cmmt i')
@property
def shares(self):
return self._get_num('.u-btni-share i')
@property
def favourites(self):
return self._get_num('.u-btni-fav i')
@property
def tags(self):
if self.tree.cssselect('.tags'):
tags = [tag.text for tag in self.tree.cssselect('.u-tag i')]
return tags
else:
return []
@property
def meta(self):
songlist_meta = {
'name': self.name,
'url': self.url,
'plays': self.plays,
'comments': self.comments,
'shares': self.shares,
'favourites': self.favourites,
'tags': self.tags
}
return songlist_meta
|
|
c00a32f913d7e22e7405b4560f8c618ea20071b8
|
python/cluster-test.py
|
python/cluster-test.py
|
#!/usr/bin/env python
# Copyright (C) 2010 Red Hat, Inc.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free
# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
import http
import xmlfmt
import yamlfmt
import jsonfmt
from testutils import *
opts = parseOptions()
links = http.HEAD_for_links(opts)
print links
for fmt in [xmlfmt]:
t = TestUtils(opts, fmt)
print "=== ", fmt.MEDIA_TYPE, " ==="
for cluster in t.get(links['clusters'], fmt.parseClusterCollection):
print t.get(cluster.href, fmt.parseCluster)
dc = t.get(links['datacenters'], fmt.parseDataCenterCollection)[0]
print dc
for cpu in t.get(links['cpus'], fmt.parseCpuCollection):
print cpu
c = fmt.Cluster()
c.name = "foo"
c.cpu = fmt.CPU()
c.cpu.id = cpu.id
c.data_center = fmt.DataCenter()
c.data_center.id = dc.id
print c
c = t.create(links['clusters'], c, fmt.parseCluster)
t.delete(c.href)
|
Add a simple python clusters test
|
Add a simple python clusters test
|
Python
|
lgpl-2.1
|
colloquium/rhevm-api,markmc/rhevm-api,colloquium/rhevm-api,markmc/rhevm-api,markmc/rhevm-api
|
Add a simple python clusters test
|
#!/usr/bin/env python
# Copyright (C) 2010 Red Hat, Inc.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free
# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
import http
import xmlfmt
import yamlfmt
import jsonfmt
from testutils import *
opts = parseOptions()
links = http.HEAD_for_links(opts)
print links
for fmt in [xmlfmt]:
t = TestUtils(opts, fmt)
print "=== ", fmt.MEDIA_TYPE, " ==="
for cluster in t.get(links['clusters'], fmt.parseClusterCollection):
print t.get(cluster.href, fmt.parseCluster)
dc = t.get(links['datacenters'], fmt.parseDataCenterCollection)[0]
print dc
for cpu in t.get(links['cpus'], fmt.parseCpuCollection):
print cpu
c = fmt.Cluster()
c.name = "foo"
c.cpu = fmt.CPU()
c.cpu.id = cpu.id
c.data_center = fmt.DataCenter()
c.data_center.id = dc.id
print c
c = t.create(links['clusters'], c, fmt.parseCluster)
t.delete(c.href)
|
<commit_before><commit_msg>Add a simple python clusters test<commit_after>
|
#!/usr/bin/env python
# Copyright (C) 2010 Red Hat, Inc.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free
# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
import http
import xmlfmt
import yamlfmt
import jsonfmt
from testutils import *
opts = parseOptions()
links = http.HEAD_for_links(opts)
print links
for fmt in [xmlfmt]:
t = TestUtils(opts, fmt)
print "=== ", fmt.MEDIA_TYPE, " ==="
for cluster in t.get(links['clusters'], fmt.parseClusterCollection):
print t.get(cluster.href, fmt.parseCluster)
dc = t.get(links['datacenters'], fmt.parseDataCenterCollection)[0]
print dc
for cpu in t.get(links['cpus'], fmt.parseCpuCollection):
print cpu
c = fmt.Cluster()
c.name = "foo"
c.cpu = fmt.CPU()
c.cpu.id = cpu.id
c.data_center = fmt.DataCenter()
c.data_center.id = dc.id
print c
c = t.create(links['clusters'], c, fmt.parseCluster)
t.delete(c.href)
|
Add a simple python clusters test#!/usr/bin/env python
# Copyright (C) 2010 Red Hat, Inc.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free
# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
import http
import xmlfmt
import yamlfmt
import jsonfmt
from testutils import *
opts = parseOptions()
links = http.HEAD_for_links(opts)
print links
for fmt in [xmlfmt]:
t = TestUtils(opts, fmt)
print "=== ", fmt.MEDIA_TYPE, " ==="
for cluster in t.get(links['clusters'], fmt.parseClusterCollection):
print t.get(cluster.href, fmt.parseCluster)
dc = t.get(links['datacenters'], fmt.parseDataCenterCollection)[0]
print dc
for cpu in t.get(links['cpus'], fmt.parseCpuCollection):
print cpu
c = fmt.Cluster()
c.name = "foo"
c.cpu = fmt.CPU()
c.cpu.id = cpu.id
c.data_center = fmt.DataCenter()
c.data_center.id = dc.id
print c
c = t.create(links['clusters'], c, fmt.parseCluster)
t.delete(c.href)
|
<commit_before><commit_msg>Add a simple python clusters test<commit_after>#!/usr/bin/env python
# Copyright (C) 2010 Red Hat, Inc.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free
# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
import http
import xmlfmt
import yamlfmt
import jsonfmt
from testutils import *
opts = parseOptions()
links = http.HEAD_for_links(opts)
print links
for fmt in [xmlfmt]:
t = TestUtils(opts, fmt)
print "=== ", fmt.MEDIA_TYPE, " ==="
for cluster in t.get(links['clusters'], fmt.parseClusterCollection):
print t.get(cluster.href, fmt.parseCluster)
dc = t.get(links['datacenters'], fmt.parseDataCenterCollection)[0]
print dc
for cpu in t.get(links['cpus'], fmt.parseCpuCollection):
print cpu
c = fmt.Cluster()
c.name = "foo"
c.cpu = fmt.CPU()
c.cpu.id = cpu.id
c.data_center = fmt.DataCenter()
c.data_center.id = dc.id
print c
c = t.create(links['clusters'], c, fmt.parseCluster)
t.delete(c.href)
|
|
669a2edf8fd92b8473a8e4fe45ff2afb41babfbb
|
algorithms/ids.py
|
algorithms/ids.py
|
"""
pynpuzzle - Solve n-puzzle with Python
Iterative deepening depth-first search algorithm
Version : 1.0.0
Author : Hamidreza Mahdavipanah
Repository: http://github.com/mahdavipanah/pynpuzzle
License : MIT License
"""
from .util.tree_search import Node
def search(state, goal_state):
"""Iterative deepening depth-first"""
depth = 0
def dls(node):
if node.is_goal(goal_state):
return node
if node.depth < depth:
node.expand()
for child in node.children:
result = dls(child)
if result:
return result
return None
answer = None
while not answer:
answer = dls(Node(state))
depth += 1
output = []
output.append(answer.state)
for parent in answer.parents():
output.append(parent.state)
output.reverse()
return output
|
Add Iterative deepening depth-first algorithm
|
Add Iterative deepening depth-first algorithm
|
Python
|
mit
|
mahdavipanah/pynpuzzle
|
Add Iterative deepening depth-first algorithm
|
"""
pynpuzzle - Solve n-puzzle with Python
Iterative deepening depth-first search algorithm
Version : 1.0.0
Author : Hamidreza Mahdavipanah
Repository: http://github.com/mahdavipanah/pynpuzzle
License : MIT License
"""
from .util.tree_search import Node
def search(state, goal_state):
"""Iterative deepening depth-first"""
depth = 0
def dls(node):
if node.is_goal(goal_state):
return node
if node.depth < depth:
node.expand()
for child in node.children:
result = dls(child)
if result:
return result
return None
answer = None
while not answer:
answer = dls(Node(state))
depth += 1
output = []
output.append(answer.state)
for parent in answer.parents():
output.append(parent.state)
output.reverse()
return output
|
<commit_before><commit_msg>Add Iterative deepening depth-first algorithm<commit_after>
|
"""
pynpuzzle - Solve n-puzzle with Python
Iterative deepening depth-first search algorithm
Version : 1.0.0
Author : Hamidreza Mahdavipanah
Repository: http://github.com/mahdavipanah/pynpuzzle
License : MIT License
"""
from .util.tree_search import Node
def search(state, goal_state):
"""Iterative deepening depth-first"""
depth = 0
def dls(node):
if node.is_goal(goal_state):
return node
if node.depth < depth:
node.expand()
for child in node.children:
result = dls(child)
if result:
return result
return None
answer = None
while not answer:
answer = dls(Node(state))
depth += 1
output = []
output.append(answer.state)
for parent in answer.parents():
output.append(parent.state)
output.reverse()
return output
|
Add Iterative deepening depth-first algorithm"""
pynpuzzle - Solve n-puzzle with Python
Iterative deepening depth-first search algorithm
Version : 1.0.0
Author : Hamidreza Mahdavipanah
Repository: http://github.com/mahdavipanah/pynpuzzle
License : MIT License
"""
from .util.tree_search import Node
def search(state, goal_state):
"""Iterative deepening depth-first"""
depth = 0
def dls(node):
if node.is_goal(goal_state):
return node
if node.depth < depth:
node.expand()
for child in node.children:
result = dls(child)
if result:
return result
return None
answer = None
while not answer:
answer = dls(Node(state))
depth += 1
output = []
output.append(answer.state)
for parent in answer.parents():
output.append(parent.state)
output.reverse()
return output
|
<commit_before><commit_msg>Add Iterative deepening depth-first algorithm<commit_after>"""
pynpuzzle - Solve n-puzzle with Python
Iterative deepening depth-first search algorithm
Version : 1.0.0
Author : Hamidreza Mahdavipanah
Repository: http://github.com/mahdavipanah/pynpuzzle
License : MIT License
"""
from .util.tree_search import Node
def search(state, goal_state):
"""Iterative deepening depth-first"""
depth = 0
def dls(node):
if node.is_goal(goal_state):
return node
if node.depth < depth:
node.expand()
for child in node.children:
result = dls(child)
if result:
return result
return None
answer = None
while not answer:
answer = dls(Node(state))
depth += 1
output = []
output.append(answer.state)
for parent in answer.parents():
output.append(parent.state)
output.reverse()
return output
|
|
4f36b66690425998ecf803c459ca652e75ae9fca
|
src/examples/python/extractor_metadata.py
|
src/examples/python/extractor_metadata.py
|
import sys
import os, fnmatch
from essentia.standard import MetadataReader, YamlOutput
from essentia import Pool
FILE_EXT = ('.mp3', '.flac', '.ogg')
def find_files(directory, pattern):
for root, dirs, files in os.walk(directory):
for basename in files:
if basename.lower().endswith(pattern):
filename = os.path.join(root, basename)
yield filename
try:
indir = sys.argv[1]
result_file = sys.argv[2]
except:
print "usage:", sys.argv[0], "<input-directory> <result.json>"
sys.exit()
result = Pool()
files = [f for f in find_files(indir, FILE_EXT)]
print 'Found', len(files), 'audio files (' + '/'.join(FILE_EXT) + ')'
i = 0
for filename in files:
i += 1
print 'Extracting metadata:', filename
namespace = 'track_' + str(i)
meta = MetadataReader(filename=filename, failOnError=True, tagPoolName=namespace + '.metadata')()
pool_meta, duration, bitrate, samplerate, channels = meta[7:]
pool_meta.set(namespace + ".file_path", os.path.relpath(filename))
pool_meta.set(namespace + ".duration", duration)
pool_meta.set(namespace + ".bit_rate", bitrate)
pool_meta.set(namespace + ".sample_rate", samplerate)
pool_meta.set(namespace + ".channels", channels)
result.merge(pool_meta)
print "Saving results to", result_file
YamlOutput(filename=result_file, format='json', doubleCheck=True, writeVersion=False)(result)
|
Add script for metadata extraction given a folder
|
Add script for metadata extraction given a folder
- Extracts audio file metadata using MetadataReader for all audio
files in a given folder
|
Python
|
agpl-3.0
|
carthach/essentia,carthach/essentia,arseneyr/essentia,MTG/essentia,carthach/essentia,carthach/essentia,MTG/essentia,carthach/essentia,arseneyr/essentia,arseneyr/essentia,MTG/essentia,MTG/essentia,MTG/essentia,arseneyr/essentia,arseneyr/essentia
|
Add script for metadata extraction given a folder
- Extracts audio file metadata using MetadataReader for all audio
files in a given folder
|
import sys
import os, fnmatch
from essentia.standard import MetadataReader, YamlOutput
from essentia import Pool
FILE_EXT = ('.mp3', '.flac', '.ogg')
def find_files(directory, pattern):
for root, dirs, files in os.walk(directory):
for basename in files:
if basename.lower().endswith(pattern):
filename = os.path.join(root, basename)
yield filename
try:
indir = sys.argv[1]
result_file = sys.argv[2]
except:
print "usage:", sys.argv[0], "<input-directory> <result.json>"
sys.exit()
result = Pool()
files = [f for f in find_files(indir, FILE_EXT)]
print 'Found', len(files), 'audio files (' + '/'.join(FILE_EXT) + ')'
i = 0
for filename in files:
i += 1
print 'Extracting metadata:', filename
namespace = 'track_' + str(i)
meta = MetadataReader(filename=filename, failOnError=True, tagPoolName=namespace + '.metadata')()
pool_meta, duration, bitrate, samplerate, channels = meta[7:]
pool_meta.set(namespace + ".file_path", os.path.relpath(filename))
pool_meta.set(namespace + ".duration", duration)
pool_meta.set(namespace + ".bit_rate", bitrate)
pool_meta.set(namespace + ".sample_rate", samplerate)
pool_meta.set(namespace + ".channels", channels)
result.merge(pool_meta)
print "Saving results to", result_file
YamlOutput(filename=result_file, format='json', doubleCheck=True, writeVersion=False)(result)
|
<commit_before><commit_msg>Add script for metadata extraction given a folder
- Extracts audio file metadata using MetadataReader for all audio
files in a given folder<commit_after>
|
import sys
import os, fnmatch
from essentia.standard import MetadataReader, YamlOutput
from essentia import Pool
FILE_EXT = ('.mp3', '.flac', '.ogg')
def find_files(directory, pattern):
for root, dirs, files in os.walk(directory):
for basename in files:
if basename.lower().endswith(pattern):
filename = os.path.join(root, basename)
yield filename
try:
indir = sys.argv[1]
result_file = sys.argv[2]
except:
print "usage:", sys.argv[0], "<input-directory> <result.json>"
sys.exit()
result = Pool()
files = [f for f in find_files(indir, FILE_EXT)]
print 'Found', len(files), 'audio files (' + '/'.join(FILE_EXT) + ')'
i = 0
for filename in files:
i += 1
print 'Extracting metadata:', filename
namespace = 'track_' + str(i)
meta = MetadataReader(filename=filename, failOnError=True, tagPoolName=namespace + '.metadata')()
pool_meta, duration, bitrate, samplerate, channels = meta[7:]
pool_meta.set(namespace + ".file_path", os.path.relpath(filename))
pool_meta.set(namespace + ".duration", duration)
pool_meta.set(namespace + ".bit_rate", bitrate)
pool_meta.set(namespace + ".sample_rate", samplerate)
pool_meta.set(namespace + ".channels", channels)
result.merge(pool_meta)
print "Saving results to", result_file
YamlOutput(filename=result_file, format='json', doubleCheck=True, writeVersion=False)(result)
|
Add script for metadata extraction given a folder
- Extracts audio file metadata using MetadataReader for all audio
files in a given folderimport sys
import os, fnmatch
from essentia.standard import MetadataReader, YamlOutput
from essentia import Pool
FILE_EXT = ('.mp3', '.flac', '.ogg')
def find_files(directory, pattern):
for root, dirs, files in os.walk(directory):
for basename in files:
if basename.lower().endswith(pattern):
filename = os.path.join(root, basename)
yield filename
try:
indir = sys.argv[1]
result_file = sys.argv[2]
except:
print "usage:", sys.argv[0], "<input-directory> <result.json>"
sys.exit()
result = Pool()
files = [f for f in find_files(indir, FILE_EXT)]
print 'Found', len(files), 'audio files (' + '/'.join(FILE_EXT) + ')'
i = 0
for filename in files:
i += 1
print 'Extracting metadata:', filename
namespace = 'track_' + str(i)
meta = MetadataReader(filename=filename, failOnError=True, tagPoolName=namespace + '.metadata')()
pool_meta, duration, bitrate, samplerate, channels = meta[7:]
pool_meta.set(namespace + ".file_path", os.path.relpath(filename))
pool_meta.set(namespace + ".duration", duration)
pool_meta.set(namespace + ".bit_rate", bitrate)
pool_meta.set(namespace + ".sample_rate", samplerate)
pool_meta.set(namespace + ".channels", channels)
result.merge(pool_meta)
print "Saving results to", result_file
YamlOutput(filename=result_file, format='json', doubleCheck=True, writeVersion=False)(result)
|
<commit_before><commit_msg>Add script for metadata extraction given a folder
- Extracts audio file metadata using MetadataReader for all audio
files in a given folder<commit_after>import sys
import os, fnmatch
from essentia.standard import MetadataReader, YamlOutput
from essentia import Pool
FILE_EXT = ('.mp3', '.flac', '.ogg')
def find_files(directory, pattern):
for root, dirs, files in os.walk(directory):
for basename in files:
if basename.lower().endswith(pattern):
filename = os.path.join(root, basename)
yield filename
try:
indir = sys.argv[1]
result_file = sys.argv[2]
except:
print "usage:", sys.argv[0], "<input-directory> <result.json>"
sys.exit()
result = Pool()
files = [f for f in find_files(indir, FILE_EXT)]
print 'Found', len(files), 'audio files (' + '/'.join(FILE_EXT) + ')'
i = 0
for filename in files:
i += 1
print 'Extracting metadata:', filename
namespace = 'track_' + str(i)
meta = MetadataReader(filename=filename, failOnError=True, tagPoolName=namespace + '.metadata')()
pool_meta, duration, bitrate, samplerate, channels = meta[7:]
pool_meta.set(namespace + ".file_path", os.path.relpath(filename))
pool_meta.set(namespace + ".duration", duration)
pool_meta.set(namespace + ".bit_rate", bitrate)
pool_meta.set(namespace + ".sample_rate", samplerate)
pool_meta.set(namespace + ".channels", channels)
result.merge(pool_meta)
print "Saving results to", result_file
YamlOutput(filename=result_file, format='json', doubleCheck=True, writeVersion=False)(result)
|
|
a71ce9fb543c7108f8c606bc46674cfad0ff8cc7
|
migrations/versions/0218_another_letter_org.py
|
migrations/versions/0218_another_letter_org.py
|
"""empty message
Revision ID: 0218_another_letter_org
Revises: 0217_default_email_branding
"""
# revision identifiers, used by Alembic.
revision = '0218_another_letter_org'
down_revision = '0217_default_email_branding'
from alembic import op
NEW_ORGANISATIONS = [
('511', 'NHS'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add NHS logo for letters
|
Add NHS logo for letters
Matches: https://github.com/alphagov/notifications-template-preview/pull/192
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add NHS logo for letters
Matches: https://github.com/alphagov/notifications-template-preview/pull/192
|
"""empty message
Revision ID: 0218_another_letter_org
Revises: 0217_default_email_branding
"""
# revision identifiers, used by Alembic.
revision = '0218_another_letter_org'
down_revision = '0217_default_email_branding'
from alembic import op
NEW_ORGANISATIONS = [
('511', 'NHS'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add NHS logo for letters
Matches: https://github.com/alphagov/notifications-template-preview/pull/192<commit_after>
|
"""empty message
Revision ID: 0218_another_letter_org
Revises: 0217_default_email_branding
"""
# revision identifiers, used by Alembic.
revision = '0218_another_letter_org'
down_revision = '0217_default_email_branding'
from alembic import op
NEW_ORGANISATIONS = [
('511', 'NHS'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add NHS logo for letters
Matches: https://github.com/alphagov/notifications-template-preview/pull/192"""empty message
Revision ID: 0218_another_letter_org
Revises: 0217_default_email_branding
"""
# revision identifiers, used by Alembic.
revision = '0218_another_letter_org'
down_revision = '0217_default_email_branding'
from alembic import op
NEW_ORGANISATIONS = [
('511', 'NHS'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add NHS logo for letters
Matches: https://github.com/alphagov/notifications-template-preview/pull/192<commit_after>"""empty message
Revision ID: 0218_another_letter_org
Revises: 0217_default_email_branding
"""
# revision identifiers, used by Alembic.
revision = '0218_another_letter_org'
down_revision = '0217_default_email_branding'
from alembic import op
NEW_ORGANISATIONS = [
('511', 'NHS'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
|
6aa2e5d3b8519ae3b8e21431e4eaf34dd9d34f9f
|
tests/test_wysiwyg_editor.py
|
tests/test_wysiwyg_editor.py
|
from . import TheInternetTestCase
from helium.api import click, Text, press, CONTROL, COMMAND, write
from sys import platform
class WYSIWYGEditorTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/tinymce"
def test_use_wysiwyg_editor(self):
self.assertTrue(Text("Your content goes here.").exists())
click("Your content goes here.")
if platform == 'darwin':
press(COMMAND + 'a')
else:
press(CONTROL + 'a')
write("Hello Helium!")
self.assertTrue(Text("Hello Helium!").exists())
|
Add test case for tinyMCE (WYSIWYG editor).
|
Add test case for tinyMCE (WYSIWYG editor).
|
Python
|
mit
|
bugfree-software/the-internet-solution-python
|
Add test case for tinyMCE (WYSIWYG editor).
|
from . import TheInternetTestCase
from helium.api import click, Text, press, CONTROL, COMMAND, write
from sys import platform
class WYSIWYGEditorTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/tinymce"
def test_use_wysiwyg_editor(self):
self.assertTrue(Text("Your content goes here.").exists())
click("Your content goes here.")
if platform == 'darwin':
press(COMMAND + 'a')
else:
press(CONTROL + 'a')
write("Hello Helium!")
self.assertTrue(Text("Hello Helium!").exists())
|
<commit_before><commit_msg>Add test case for tinyMCE (WYSIWYG editor).<commit_after>
|
from . import TheInternetTestCase
from helium.api import click, Text, press, CONTROL, COMMAND, write
from sys import platform
class WYSIWYGEditorTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/tinymce"
def test_use_wysiwyg_editor(self):
self.assertTrue(Text("Your content goes here.").exists())
click("Your content goes here.")
if platform == 'darwin':
press(COMMAND + 'a')
else:
press(CONTROL + 'a')
write("Hello Helium!")
self.assertTrue(Text("Hello Helium!").exists())
|
Add test case for tinyMCE (WYSIWYG editor).from . import TheInternetTestCase
from helium.api import click, Text, press, CONTROL, COMMAND, write
from sys import platform
class WYSIWYGEditorTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/tinymce"
def test_use_wysiwyg_editor(self):
self.assertTrue(Text("Your content goes here.").exists())
click("Your content goes here.")
if platform == 'darwin':
press(COMMAND + 'a')
else:
press(CONTROL + 'a')
write("Hello Helium!")
self.assertTrue(Text("Hello Helium!").exists())
|
<commit_before><commit_msg>Add test case for tinyMCE (WYSIWYG editor).<commit_after>from . import TheInternetTestCase
from helium.api import click, Text, press, CONTROL, COMMAND, write
from sys import platform
class WYSIWYGEditorTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/tinymce"
def test_use_wysiwyg_editor(self):
self.assertTrue(Text("Your content goes here.").exists())
click("Your content goes here.")
if platform == 'darwin':
press(COMMAND + 'a')
else:
press(CONTROL + 'a')
write("Hello Helium!")
self.assertTrue(Text("Hello Helium!").exists())
|
|
b8a410bc2f54a89e0e44100a890183fcd6a88e3e
|
tools/convert-url-history.py
|
tools/convert-url-history.py
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a data file based on an input file containing URL's.
This outputs a "weighted" (top hits first) data file for namebench
to use. Ignores subsequent hits for the same site.
"""
import operator
import sys
import re
filename = sys.argv[1]
if not filename:
print "You must provide a filename."
sys.exit(1)
parse_re = re.compile(' \w+://([\-\w\.]+)')
hits = {}
last_host = None
for line in open(filename).readlines():
match = parse_re.search(line)
if match:
host = match.groups()[0] + '.'
if host != last_host:
hits[host] = hits.get(host, 0) + 1
last_host = host
top_hits = sorted(hits.items(), key=operator.itemgetter(1),reverse=True)
for (hit, count) in top_hits:
print 'A %s\t# %s hits' % (hit, count)
|
Add initial URL history importer
|
Add initial URL history importer
|
Python
|
apache-2.0
|
jimmsta/namebench-1,catap/namebench
|
Add initial URL history importer
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a data file based on an input file containing URL's.
This outputs a "weighted" (top hits first) data file for namebench
to use. Ignores subsequent hits for the same site.
"""
import operator
import sys
import re
filename = sys.argv[1]
if not filename:
print "You must provide a filename."
sys.exit(1)
parse_re = re.compile(' \w+://([\-\w\.]+)')
hits = {}
last_host = None
for line in open(filename).readlines():
match = parse_re.search(line)
if match:
host = match.groups()[0] + '.'
if host != last_host:
hits[host] = hits.get(host, 0) + 1
last_host = host
top_hits = sorted(hits.items(), key=operator.itemgetter(1),reverse=True)
for (hit, count) in top_hits:
print 'A %s\t# %s hits' % (hit, count)
|
<commit_before><commit_msg>Add initial URL history importer<commit_after>
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a data file based on an input file containing URL's.
This outputs a "weighted" (top hits first) data file for namebench
to use. Ignores subsequent hits for the same site.
"""
import operator
import sys
import re
filename = sys.argv[1]
if not filename:
print "You must provide a filename."
sys.exit(1)
parse_re = re.compile(' \w+://([\-\w\.]+)')
hits = {}
last_host = None
for line in open(filename).readlines():
match = parse_re.search(line)
if match:
host = match.groups()[0] + '.'
if host != last_host:
hits[host] = hits.get(host, 0) + 1
last_host = host
top_hits = sorted(hits.items(), key=operator.itemgetter(1),reverse=True)
for (hit, count) in top_hits:
print 'A %s\t# %s hits' % (hit, count)
|
Add initial URL history importer#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a data file based on an input file containing URL's.
This outputs a "weighted" (top hits first) data file for namebench
to use. Ignores subsequent hits for the same site.
"""
import operator
import sys
import re
filename = sys.argv[1]
if not filename:
print "You must provide a filename."
sys.exit(1)
parse_re = re.compile(' \w+://([\-\w\.]+)')
hits = {}
last_host = None
for line in open(filename).readlines():
match = parse_re.search(line)
if match:
host = match.groups()[0] + '.'
if host != last_host:
hits[host] = hits.get(host, 0) + 1
last_host = host
top_hits = sorted(hits.items(), key=operator.itemgetter(1),reverse=True)
for (hit, count) in top_hits:
print 'A %s\t# %s hits' % (hit, count)
|
<commit_before><commit_msg>Add initial URL history importer<commit_after>#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a data file based on an input file containing URL's.
This outputs a "weighted" (top hits first) data file for namebench
to use. Ignores subsequent hits for the same site.
"""
import operator
import sys
import re
filename = sys.argv[1]
if not filename:
print "You must provide a filename."
sys.exit(1)
parse_re = re.compile(' \w+://([\-\w\.]+)')
hits = {}
last_host = None
for line in open(filename).readlines():
match = parse_re.search(line)
if match:
host = match.groups()[0] + '.'
if host != last_host:
hits[host] = hits.get(host, 0) + 1
last_host = host
top_hits = sorted(hits.items(), key=operator.itemgetter(1),reverse=True)
for (hit, count) in top_hits:
print 'A %s\t# %s hits' % (hit, count)
|
|
2759119ea8a2afe6c47575825aef9ae59c1ce921
|
python/misc/oo/CSStudent.py
|
python/misc/oo/CSStudent.py
|
# Code from https://www.geeksforgeeks.org/g-fact-34-class-or-static-variables-in-python/
# Copy - Paste here to test it directly
# Python program to show that the variables with a value
# assigned in class declaration, are class variables
# Class for Computer Science Student
class CSStudent:
stream = 'cse' # Class Variable
def __init__(self,name,roll):
self.name = name # Instance Variable
self.roll = roll # Instance Variable
# Objects of CSStudent class
a = CSStudent('Geek', 1)
b = CSStudent('Nerd', 2)
print(a.stream) # prints "cse"
print(b.stream) # prints "cse"
print(a.name) # prints "Geek"
print(b.name) # prints "Nerd"
print(a.roll) # prints "1"
print(b.roll) # prints "2"
# Class variables can be accessed using class
# name also
print(CSStudent.stream) # prints "cse"
# Now if we change the stream for just a it won't be changed for b
a.stream = 'ece'
print(a.stream) # prints 'ece'
print(b.stream) # prints 'cse'
# To change the stream for all instances of the class we can change it
# directly from the class
CSStudent.stream = 'mech'
print(a.stream) # prints 'mech' <-- error here, it prints ece
print(b.stream) # prints 'mech'
|
Test d'un bout de code de GeeksForGeeks
|
Test d'un bout de code de GeeksForGeeks
|
Python
|
mit
|
TGITS/programming-workouts,TGITS/programming-workouts,TGITS/programming-workouts,TGITS/programming-workouts,TGITS/programming-workouts,TGITS/programming-workouts,TGITS/programming-workouts,TGITS/programming-workouts,TGITS/programming-workouts,TGITS/programming-workouts,TGITS/programming-workouts,TGITS/programming-workouts,TGITS/programming-workouts,TGITS/programming-workouts,TGITS/programming-workouts
|
Test d'un bout de code de GeeksForGeeks
|
# Code from https://www.geeksforgeeks.org/g-fact-34-class-or-static-variables-in-python/
# Copy - Paste here to test it directly
# Python program to show that the variables with a value
# assigned in class declaration, are class variables
# Class for Computer Science Student
class CSStudent:
stream = 'cse' # Class Variable
def __init__(self,name,roll):
self.name = name # Instance Variable
self.roll = roll # Instance Variable
# Objects of CSStudent class
a = CSStudent('Geek', 1)
b = CSStudent('Nerd', 2)
print(a.stream) # prints "cse"
print(b.stream) # prints "cse"
print(a.name) # prints "Geek"
print(b.name) # prints "Nerd"
print(a.roll) # prints "1"
print(b.roll) # prints "2"
# Class variables can be accessed using class
# name also
print(CSStudent.stream) # prints "cse"
# Now if we change the stream for just a it won't be changed for b
a.stream = 'ece'
print(a.stream) # prints 'ece'
print(b.stream) # prints 'cse'
# To change the stream for all instances of the class we can change it
# directly from the class
CSStudent.stream = 'mech'
print(a.stream) # prints 'mech' <-- error here, it prints ece
print(b.stream) # prints 'mech'
|
<commit_before><commit_msg>Test d'un bout de code de GeeksForGeeks<commit_after>
|
# Code from https://www.geeksforgeeks.org/g-fact-34-class-or-static-variables-in-python/
# Copy - Paste here to test it directly
# Python program to show that the variables with a value
# assigned in class declaration, are class variables
# Class for Computer Science Student
class CSStudent:
stream = 'cse' # Class Variable
def __init__(self,name,roll):
self.name = name # Instance Variable
self.roll = roll # Instance Variable
# Objects of CSStudent class
a = CSStudent('Geek', 1)
b = CSStudent('Nerd', 2)
print(a.stream) # prints "cse"
print(b.stream) # prints "cse"
print(a.name) # prints "Geek"
print(b.name) # prints "Nerd"
print(a.roll) # prints "1"
print(b.roll) # prints "2"
# Class variables can be accessed using class
# name also
print(CSStudent.stream) # prints "cse"
# Now if we change the stream for just a it won't be changed for b
a.stream = 'ece'
print(a.stream) # prints 'ece'
print(b.stream) # prints 'cse'
# To change the stream for all instances of the class we can change it
# directly from the class
CSStudent.stream = 'mech'
print(a.stream) # prints 'mech' <-- error here, it prints ece
print(b.stream) # prints 'mech'
|
Test d'un bout de code de GeeksForGeeks# Code from https://www.geeksforgeeks.org/g-fact-34-class-or-static-variables-in-python/
# Copy - Paste here to test it directly
# Python program to show that the variables with a value
# assigned in class declaration, are class variables
# Class for Computer Science Student
class CSStudent:
stream = 'cse' # Class Variable
def __init__(self,name,roll):
self.name = name # Instance Variable
self.roll = roll # Instance Variable
# Objects of CSStudent class
a = CSStudent('Geek', 1)
b = CSStudent('Nerd', 2)
print(a.stream) # prints "cse"
print(b.stream) # prints "cse"
print(a.name) # prints "Geek"
print(b.name) # prints "Nerd"
print(a.roll) # prints "1"
print(b.roll) # prints "2"
# Class variables can be accessed using class
# name also
print(CSStudent.stream) # prints "cse"
# Now if we change the stream for just a it won't be changed for b
a.stream = 'ece'
print(a.stream) # prints 'ece'
print(b.stream) # prints 'cse'
# To change the stream for all instances of the class we can change it
# directly from the class
CSStudent.stream = 'mech'
print(a.stream) # prints 'mech' <-- error here, it prints ece
print(b.stream) # prints 'mech'
|
<commit_before><commit_msg>Test d'un bout de code de GeeksForGeeks<commit_after># Code from https://www.geeksforgeeks.org/g-fact-34-class-or-static-variables-in-python/
# Copy - Paste here to test it directly
# Python program to show that the variables with a value
# assigned in class declaration, are class variables
# Class for Computer Science Student
class CSStudent:
stream = 'cse' # Class Variable
def __init__(self,name,roll):
self.name = name # Instance Variable
self.roll = roll # Instance Variable
# Objects of CSStudent class
a = CSStudent('Geek', 1)
b = CSStudent('Nerd', 2)
print(a.stream) # prints "cse"
print(b.stream) # prints "cse"
print(a.name) # prints "Geek"
print(b.name) # prints "Nerd"
print(a.roll) # prints "1"
print(b.roll) # prints "2"
# Class variables can be accessed using class
# name also
print(CSStudent.stream) # prints "cse"
# Now if we change the stream for just a it won't be changed for b
a.stream = 'ece'
print(a.stream) # prints 'ece'
print(b.stream) # prints 'cse'
# To change the stream for all instances of the class we can change it
# directly from the class
CSStudent.stream = 'mech'
print(a.stream) # prints 'mech' <-- error here, it prints ece
print(b.stream) # prints 'mech'
|
|
49c73b00b5528706fbb340e53b37e59c8303d70d
|
oneflow/settings/snippets/common_production.py
|
oneflow/settings/snippets/common_production.py
|
#
# Put production machines hostnames here.
#
# MANAGERS += (('Matthieu Chaignot', 'mc@1flow.io'), )
ALLOWED_HOSTS += [
'1flow.io',
'app.1flow.io',
'api.1flow.io',
]
|
#
# Put production machines hostnames here.
#
MANAGERS += (('Matthieu Chaignot', 'mchaignot@gmail.com'), )
ALLOWED_HOSTS += [
'1flow.io',
'app.1flow.io',
'api.1flow.io',
]
|
Add Matthieu to MANAGERS, for him to receive the warn-closed-feed mail.
|
Add Matthieu to MANAGERS, for him to receive the warn-closed-feed mail.
|
Python
|
agpl-3.0
|
1flow/1flow,1flow/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow
|
#
# Put production machines hostnames here.
#
# MANAGERS += (('Matthieu Chaignot', 'mc@1flow.io'), )
ALLOWED_HOSTS += [
'1flow.io',
'app.1flow.io',
'api.1flow.io',
]
Add Matthieu to MANAGERS, for him to receive the warn-closed-feed mail.
|
#
# Put production machines hostnames here.
#
MANAGERS += (('Matthieu Chaignot', 'mchaignot@gmail.com'), )
ALLOWED_HOSTS += [
'1flow.io',
'app.1flow.io',
'api.1flow.io',
]
|
<commit_before>#
# Put production machines hostnames here.
#
# MANAGERS += (('Matthieu Chaignot', 'mc@1flow.io'), )
ALLOWED_HOSTS += [
'1flow.io',
'app.1flow.io',
'api.1flow.io',
]
<commit_msg>Add Matthieu to MANAGERS, for him to receive the warn-closed-feed mail.<commit_after>
|
#
# Put production machines hostnames here.
#
MANAGERS += (('Matthieu Chaignot', 'mchaignot@gmail.com'), )
ALLOWED_HOSTS += [
'1flow.io',
'app.1flow.io',
'api.1flow.io',
]
|
#
# Put production machines hostnames here.
#
# MANAGERS += (('Matthieu Chaignot', 'mc@1flow.io'), )
ALLOWED_HOSTS += [
'1flow.io',
'app.1flow.io',
'api.1flow.io',
]
Add Matthieu to MANAGERS, for him to receive the warn-closed-feed mail.#
# Put production machines hostnames here.
#
MANAGERS += (('Matthieu Chaignot', 'mchaignot@gmail.com'), )
ALLOWED_HOSTS += [
'1flow.io',
'app.1flow.io',
'api.1flow.io',
]
|
<commit_before>#
# Put production machines hostnames here.
#
# MANAGERS += (('Matthieu Chaignot', 'mc@1flow.io'), )
ALLOWED_HOSTS += [
'1flow.io',
'app.1flow.io',
'api.1flow.io',
]
<commit_msg>Add Matthieu to MANAGERS, for him to receive the warn-closed-feed mail.<commit_after>#
# Put production machines hostnames here.
#
MANAGERS += (('Matthieu Chaignot', 'mchaignot@gmail.com'), )
ALLOWED_HOSTS += [
'1flow.io',
'app.1flow.io',
'api.1flow.io',
]
|
f25275dd2a65c9b962df3b522981d595c2d3809f
|
opendebates/migrations/0017_enable_unaccent.py
|
opendebates/migrations/0017_enable_unaccent.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management import call_command
from django.db import migrations, models
def update_search_index(apps, schema):
print("Updating search field...")
call_command("update_search_field", "opendebates")
print("Updating search field...done")
def no_op(apps, schema):
pass
class Migration(migrations.Migration):
dependencies = [
('opendebates', '0016_auto_20160212_1940'),
]
operations = [
migrations.RunSQL(
"""
CREATE EXTENSION IF NOT EXISTS unaccent;
ALTER FUNCTION unaccent(text) IMMUTABLE;
-- The next line doesn't work:
-- CREATE INDEX opendebates_submission_search_idx ON opendebates_submission USING gin(to_tsvector('english', search_index));
""",
"""
DROP EXTENSION IF EXISTS unaccent;
DROP INDEX IF EXISTS opendebates_submission_search_idx;
"""
),
migrations.RunPython(
update_search_index,
no_op
)
]
|
Add a migration for unaccent
|
Add a migration for unaccent
This is on top of https://github.com/caktus/django-opendebates/pull/88
|
Python
|
apache-2.0
|
ejucovy/django-opendebates,ejucovy/django-opendebates,ejucovy/django-opendebates,caktus/django-opendebates,ejucovy/django-opendebates,caktus/django-opendebates,caktus/django-opendebates,caktus/django-opendebates
|
Add a migration for unaccent
This is on top of https://github.com/caktus/django-opendebates/pull/88
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management import call_command
from django.db import migrations, models
def update_search_index(apps, schema):
print("Updating search field...")
call_command("update_search_field", "opendebates")
print("Updating search field...done")
def no_op(apps, schema):
pass
class Migration(migrations.Migration):
dependencies = [
('opendebates', '0016_auto_20160212_1940'),
]
operations = [
migrations.RunSQL(
"""
CREATE EXTENSION IF NOT EXISTS unaccent;
ALTER FUNCTION unaccent(text) IMMUTABLE;
-- The next line doesn't work:
-- CREATE INDEX opendebates_submission_search_idx ON opendebates_submission USING gin(to_tsvector('english', search_index));
""",
"""
DROP EXTENSION IF EXISTS unaccent;
DROP INDEX IF EXISTS opendebates_submission_search_idx;
"""
),
migrations.RunPython(
update_search_index,
no_op
)
]
|
<commit_before><commit_msg>Add a migration for unaccent
This is on top of https://github.com/caktus/django-opendebates/pull/88<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management import call_command
from django.db import migrations, models
def update_search_index(apps, schema):
print("Updating search field...")
call_command("update_search_field", "opendebates")
print("Updating search field...done")
def no_op(apps, schema):
pass
class Migration(migrations.Migration):
dependencies = [
('opendebates', '0016_auto_20160212_1940'),
]
operations = [
migrations.RunSQL(
"""
CREATE EXTENSION IF NOT EXISTS unaccent;
ALTER FUNCTION unaccent(text) IMMUTABLE;
-- The next line doesn't work:
-- CREATE INDEX opendebates_submission_search_idx ON opendebates_submission USING gin(to_tsvector('english', search_index));
""",
"""
DROP EXTENSION IF EXISTS unaccent;
DROP INDEX IF EXISTS opendebates_submission_search_idx;
"""
),
migrations.RunPython(
update_search_index,
no_op
)
]
|
Add a migration for unaccent
This is on top of https://github.com/caktus/django-opendebates/pull/88# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management import call_command
from django.db import migrations, models
def update_search_index(apps, schema):
print("Updating search field...")
call_command("update_search_field", "opendebates")
print("Updating search field...done")
def no_op(apps, schema):
pass
class Migration(migrations.Migration):
dependencies = [
('opendebates', '0016_auto_20160212_1940'),
]
operations = [
migrations.RunSQL(
"""
CREATE EXTENSION IF NOT EXISTS unaccent;
ALTER FUNCTION unaccent(text) IMMUTABLE;
-- The next line doesn't work:
-- CREATE INDEX opendebates_submission_search_idx ON opendebates_submission USING gin(to_tsvector('english', search_index));
""",
"""
DROP EXTENSION IF EXISTS unaccent;
DROP INDEX IF EXISTS opendebates_submission_search_idx;
"""
),
migrations.RunPython(
update_search_index,
no_op
)
]
|
<commit_before><commit_msg>Add a migration for unaccent
This is on top of https://github.com/caktus/django-opendebates/pull/88<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management import call_command
from django.db import migrations, models
def update_search_index(apps, schema):
print("Updating search field...")
call_command("update_search_field", "opendebates")
print("Updating search field...done")
def no_op(apps, schema):
pass
class Migration(migrations.Migration):
dependencies = [
('opendebates', '0016_auto_20160212_1940'),
]
operations = [
migrations.RunSQL(
"""
CREATE EXTENSION IF NOT EXISTS unaccent;
ALTER FUNCTION unaccent(text) IMMUTABLE;
-- The next line doesn't work:
-- CREATE INDEX opendebates_submission_search_idx ON opendebates_submission USING gin(to_tsvector('english', search_index));
""",
"""
DROP EXTENSION IF EXISTS unaccent;
DROP INDEX IF EXISTS opendebates_submission_search_idx;
"""
),
migrations.RunPython(
update_search_index,
no_op
)
]
|
|
3e50e0cd6c55730bd2d2ed1c22b71ce67723a0d5
|
corehq/apps/data_interfaces/migrations/0018_check_for_rule_migration.py
|
corehq/apps/data_interfaces/migrations/0018_check_for_rule_migration.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
from corehq.sql_db.operations import HqRunPython
def noop(*args, **kwargs):
pass
def assert_rule_migration_complete(apps, schema_editor):
AutomaticUpdateRule = apps.get_model('data_interfaces', 'AutomaticUpdateRule')
if AutomaticUpdateRule.objects.filter(migrated=False).count() > 0:
raise RuntimeError("""
If you are receiving this error in a Dimagi production environment,
it is unexpected.
Otherwise, you have unmigrated AutomaticUpdateRules. You can migrate them
by checking out tag `rule-migration` and running:
python manage.py migrate_rules
""")
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0017_alter_domaincaserulerun'),
]
operations = [
HqRunPython(assert_rule_migration_complete, reverse_code=noop),
]
|
Add blocking migration to ensure all rules have been migrated
|
Add blocking migration to ensure all rules have been migrated
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add blocking migration to ensure all rules have been migrated
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
from corehq.sql_db.operations import HqRunPython
def noop(*args, **kwargs):
pass
def assert_rule_migration_complete(apps, schema_editor):
AutomaticUpdateRule = apps.get_model('data_interfaces', 'AutomaticUpdateRule')
if AutomaticUpdateRule.objects.filter(migrated=False).count() > 0:
raise RuntimeError("""
If you are receiving this error in a Dimagi production environment,
it is unexpected.
Otherwise, you have unmigrated AutomaticUpdateRules. You can migrate them
by checking out tag `rule-migration` and running:
python manage.py migrate_rules
""")
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0017_alter_domaincaserulerun'),
]
operations = [
HqRunPython(assert_rule_migration_complete, reverse_code=noop),
]
|
<commit_before><commit_msg>Add blocking migration to ensure all rules have been migrated<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
from corehq.sql_db.operations import HqRunPython
def noop(*args, **kwargs):
pass
def assert_rule_migration_complete(apps, schema_editor):
AutomaticUpdateRule = apps.get_model('data_interfaces', 'AutomaticUpdateRule')
if AutomaticUpdateRule.objects.filter(migrated=False).count() > 0:
raise RuntimeError("""
If you are receiving this error in a Dimagi production environment,
it is unexpected.
Otherwise, you have unmigrated AutomaticUpdateRules. You can migrate them
by checking out tag `rule-migration` and running:
python manage.py migrate_rules
""")
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0017_alter_domaincaserulerun'),
]
operations = [
HqRunPython(assert_rule_migration_complete, reverse_code=noop),
]
|
Add blocking migration to ensure all rules have been migrated# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
from corehq.sql_db.operations import HqRunPython
def noop(*args, **kwargs):
pass
def assert_rule_migration_complete(apps, schema_editor):
AutomaticUpdateRule = apps.get_model('data_interfaces', 'AutomaticUpdateRule')
if AutomaticUpdateRule.objects.filter(migrated=False).count() > 0:
raise RuntimeError("""
If you are receiving this error in a Dimagi production environment,
it is unexpected.
Otherwise, you have unmigrated AutomaticUpdateRules. You can migrate them
by checking out tag `rule-migration` and running:
python manage.py migrate_rules
""")
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0017_alter_domaincaserulerun'),
]
operations = [
HqRunPython(assert_rule_migration_complete, reverse_code=noop),
]
|
<commit_before><commit_msg>Add blocking migration to ensure all rules have been migrated<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
from corehq.sql_db.operations import HqRunPython
def noop(*args, **kwargs):
pass
def assert_rule_migration_complete(apps, schema_editor):
AutomaticUpdateRule = apps.get_model('data_interfaces', 'AutomaticUpdateRule')
if AutomaticUpdateRule.objects.filter(migrated=False).count() > 0:
raise RuntimeError("""
If you are receiving this error in a Dimagi production environment,
it is unexpected.
Otherwise, you have unmigrated AutomaticUpdateRules. You can migrate them
by checking out tag `rule-migration` and running:
python manage.py migrate_rules
""")
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0017_alter_domaincaserulerun'),
]
operations = [
HqRunPython(assert_rule_migration_complete, reverse_code=noop),
]
|
|
2f25825812e38318076984a83a1d602d3d33bc9d
|
bin/tag_using_alchemy.py
|
bin/tag_using_alchemy.py
|
import copy
import requests
from urllib import quote_plus
from optparse import OptionParser
from pocketpy.auth import auth
from pocketpy.pocket import retrieve
KEYWORD_URL = "http://access.alchemyapi.com/calls/url/URLGetRankedKeywords"
def get_keywords_from_alchemy(access_token, item_url):
params = {"url": item_url, "apikey": access_token,
"maxRetrieve": 5, "outputMode": "json"}
response = requests.get(KEYWORD_URL, params=params)
body = response.json()
keywords = []
if body.get("keywords", None):
for keyword in body["keywords"]:
if float(keyword["relevance"]) > 0.9:
keywords.append(keyword["text"])
return keywords
def tag_items_if_not_already_tagged(credentials, items, access_token,
dry_run=True):
for uid, item in items.iteritems():
item_url = item.get("resolved_url")
keywords = get_keywords_from_alchemy(access_token, item_url)
if dry_run:
print item_url, keywords
else:
if "tags" not in item:
add_tags(credentials, uid, [tags])
if __name__ == "__main__":
parser = OptionParser()
parser.add_option('--alchemy-key', dest='alchemy_key',
help='alchemy access token')
parser.add_option('--dry-run', dest='dryrun', action='store_true',
default=False, help='Enable for a dry run')
(options, args) = parser.parse_args()
config = auth(options)
credentials = copy.deepcopy(config)
items = retrieve(config, verbose=True)
tag_items_if_not_already_tagged(credentials, items, options.alchemy_key,
dry_run=options.dryrun)
|
Tag using alchemy api. As request by costaclayton
|
Tag using alchemy api. As request by costaclayton
|
Python
|
mit
|
Newky/PocketPy
|
Tag using alchemy api. As request by costaclayton
|
import copy
import requests
from urllib import quote_plus
from optparse import OptionParser
from pocketpy.auth import auth
from pocketpy.pocket import retrieve
KEYWORD_URL = "http://access.alchemyapi.com/calls/url/URLGetRankedKeywords"
def get_keywords_from_alchemy(access_token, item_url):
params = {"url": item_url, "apikey": access_token,
"maxRetrieve": 5, "outputMode": "json"}
response = requests.get(KEYWORD_URL, params=params)
body = response.json()
keywords = []
if body.get("keywords", None):
for keyword in body["keywords"]:
if float(keyword["relevance"]) > 0.9:
keywords.append(keyword["text"])
return keywords
def tag_items_if_not_already_tagged(credentials, items, access_token,
dry_run=True):
for uid, item in items.iteritems():
item_url = item.get("resolved_url")
keywords = get_keywords_from_alchemy(access_token, item_url)
if dry_run:
print item_url, keywords
else:
if "tags" not in item:
add_tags(credentials, uid, [tags])
if __name__ == "__main__":
parser = OptionParser()
parser.add_option('--alchemy-key', dest='alchemy_key',
help='alchemy access token')
parser.add_option('--dry-run', dest='dryrun', action='store_true',
default=False, help='Enable for a dry run')
(options, args) = parser.parse_args()
config = auth(options)
credentials = copy.deepcopy(config)
items = retrieve(config, verbose=True)
tag_items_if_not_already_tagged(credentials, items, options.alchemy_key,
dry_run=options.dryrun)
|
<commit_before><commit_msg>Tag using alchemy api. As request by costaclayton<commit_after>
|
import copy
import requests
from urllib import quote_plus
from optparse import OptionParser
from pocketpy.auth import auth
from pocketpy.pocket import retrieve
KEYWORD_URL = "http://access.alchemyapi.com/calls/url/URLGetRankedKeywords"
def get_keywords_from_alchemy(access_token, item_url):
params = {"url": item_url, "apikey": access_token,
"maxRetrieve": 5, "outputMode": "json"}
response = requests.get(KEYWORD_URL, params=params)
body = response.json()
keywords = []
if body.get("keywords", None):
for keyword in body["keywords"]:
if float(keyword["relevance"]) > 0.9:
keywords.append(keyword["text"])
return keywords
def tag_items_if_not_already_tagged(credentials, items, access_token,
dry_run=True):
for uid, item in items.iteritems():
item_url = item.get("resolved_url")
keywords = get_keywords_from_alchemy(access_token, item_url)
if dry_run:
print item_url, keywords
else:
if "tags" not in item:
add_tags(credentials, uid, [tags])
if __name__ == "__main__":
parser = OptionParser()
parser.add_option('--alchemy-key', dest='alchemy_key',
help='alchemy access token')
parser.add_option('--dry-run', dest='dryrun', action='store_true',
default=False, help='Enable for a dry run')
(options, args) = parser.parse_args()
config = auth(options)
credentials = copy.deepcopy(config)
items = retrieve(config, verbose=True)
tag_items_if_not_already_tagged(credentials, items, options.alchemy_key,
dry_run=options.dryrun)
|
Tag using alchemy api. As request by costaclaytonimport copy
import requests
from urllib import quote_plus
from optparse import OptionParser
from pocketpy.auth import auth
from pocketpy.pocket import retrieve
KEYWORD_URL = "http://access.alchemyapi.com/calls/url/URLGetRankedKeywords"
def get_keywords_from_alchemy(access_token, item_url):
params = {"url": item_url, "apikey": access_token,
"maxRetrieve": 5, "outputMode": "json"}
response = requests.get(KEYWORD_URL, params=params)
body = response.json()
keywords = []
if body.get("keywords", None):
for keyword in body["keywords"]:
if float(keyword["relevance"]) > 0.9:
keywords.append(keyword["text"])
return keywords
def tag_items_if_not_already_tagged(credentials, items, access_token,
dry_run=True):
for uid, item in items.iteritems():
item_url = item.get("resolved_url")
keywords = get_keywords_from_alchemy(access_token, item_url)
if dry_run:
print item_url, keywords
else:
if "tags" not in item:
add_tags(credentials, uid, [tags])
if __name__ == "__main__":
parser = OptionParser()
parser.add_option('--alchemy-key', dest='alchemy_key',
help='alchemy access token')
parser.add_option('--dry-run', dest='dryrun', action='store_true',
default=False, help='Enable for a dry run')
(options, args) = parser.parse_args()
config = auth(options)
credentials = copy.deepcopy(config)
items = retrieve(config, verbose=True)
tag_items_if_not_already_tagged(credentials, items, options.alchemy_key,
dry_run=options.dryrun)
|
<commit_before><commit_msg>Tag using alchemy api. As request by costaclayton<commit_after>import copy
import requests
from urllib import quote_plus
from optparse import OptionParser
from pocketpy.auth import auth
from pocketpy.pocket import retrieve
KEYWORD_URL = "http://access.alchemyapi.com/calls/url/URLGetRankedKeywords"
def get_keywords_from_alchemy(access_token, item_url):
params = {"url": item_url, "apikey": access_token,
"maxRetrieve": 5, "outputMode": "json"}
response = requests.get(KEYWORD_URL, params=params)
body = response.json()
keywords = []
if body.get("keywords", None):
for keyword in body["keywords"]:
if float(keyword["relevance"]) > 0.9:
keywords.append(keyword["text"])
return keywords
def tag_items_if_not_already_tagged(credentials, items, access_token,
dry_run=True):
for uid, item in items.iteritems():
item_url = item.get("resolved_url")
keywords = get_keywords_from_alchemy(access_token, item_url)
if dry_run:
print item_url, keywords
else:
if "tags" not in item:
add_tags(credentials, uid, [tags])
if __name__ == "__main__":
parser = OptionParser()
parser.add_option('--alchemy-key', dest='alchemy_key',
help='alchemy access token')
parser.add_option('--dry-run', dest='dryrun', action='store_true',
default=False, help='Enable for a dry run')
(options, args) = parser.parse_args()
config = auth(options)
credentials = copy.deepcopy(config)
items = retrieve(config, verbose=True)
tag_items_if_not_already_tagged(credentials, items, options.alchemy_key,
dry_run=options.dryrun)
|
|
609b27736728ce17f7f05594a2a44ed1cb0a9fd2
|
scripts/util/dl_daily_shapefiles.py
|
scripts/util/dl_daily_shapefiles.py
|
#!/usr/bin/env python
"""Example script to download daily shapefiles from the dailyerosion site"""
import datetime
import urllib2
start_time = datetime.date(2007, 1, 1)
end_time = datetime.date(2015, 9, 8)
interval = datetime.timedelta(days=1)
now = start_time
while now < end_time:
print("Downloading shapefile for %s" % (now.strftime("%d %b %Y"),))
uri = ("http://dailyerosion.org/dl/shapefile.py?dt=%s"
) % (now.strftime("%Y-%m-%d"), )
fn = "dep%s.zip" % (now.strftime("%Y%m%d"), )
o = open(fn, 'wb')
o.write(urllib2.urlopen(uri).read())
o.close()
now += interval
|
Add example script that downloads DEP daily shapefiles
|
Add example script that downloads DEP daily shapefiles
|
Python
|
mit
|
akrherz/idep,akrherz/dep,akrherz/dep,akrherz/idep,akrherz/dep,akrherz/idep,akrherz/dep,akrherz/idep,akrherz/idep,akrherz/dep,akrherz/idep
|
Add example script that downloads DEP daily shapefiles
|
#!/usr/bin/env python
"""Example script to download daily shapefiles from the dailyerosion site"""
import datetime
import urllib2
start_time = datetime.date(2007, 1, 1)
end_time = datetime.date(2015, 9, 8)
interval = datetime.timedelta(days=1)
now = start_time
while now < end_time:
print("Downloading shapefile for %s" % (now.strftime("%d %b %Y"),))
uri = ("http://dailyerosion.org/dl/shapefile.py?dt=%s"
) % (now.strftime("%Y-%m-%d"), )
fn = "dep%s.zip" % (now.strftime("%Y%m%d"), )
o = open(fn, 'wb')
o.write(urllib2.urlopen(uri).read())
o.close()
now += interval
|
<commit_before><commit_msg>Add example script that downloads DEP daily shapefiles<commit_after>
|
#!/usr/bin/env python
"""Example script to download daily shapefiles from the dailyerosion site"""
import datetime
import urllib2
start_time = datetime.date(2007, 1, 1)
end_time = datetime.date(2015, 9, 8)
interval = datetime.timedelta(days=1)
now = start_time
while now < end_time:
print("Downloading shapefile for %s" % (now.strftime("%d %b %Y"),))
uri = ("http://dailyerosion.org/dl/shapefile.py?dt=%s"
) % (now.strftime("%Y-%m-%d"), )
fn = "dep%s.zip" % (now.strftime("%Y%m%d"), )
o = open(fn, 'wb')
o.write(urllib2.urlopen(uri).read())
o.close()
now += interval
|
Add example script that downloads DEP daily shapefiles#!/usr/bin/env python
"""Example script to download daily shapefiles from the dailyerosion site"""
import datetime
import urllib2
start_time = datetime.date(2007, 1, 1)
end_time = datetime.date(2015, 9, 8)
interval = datetime.timedelta(days=1)
now = start_time
while now < end_time:
print("Downloading shapefile for %s" % (now.strftime("%d %b %Y"),))
uri = ("http://dailyerosion.org/dl/shapefile.py?dt=%s"
) % (now.strftime("%Y-%m-%d"), )
fn = "dep%s.zip" % (now.strftime("%Y%m%d"), )
o = open(fn, 'wb')
o.write(urllib2.urlopen(uri).read())
o.close()
now += interval
|
<commit_before><commit_msg>Add example script that downloads DEP daily shapefiles<commit_after>#!/usr/bin/env python
"""Example script to download daily shapefiles from the dailyerosion site"""
import datetime
import urllib2
start_time = datetime.date(2007, 1, 1)
end_time = datetime.date(2015, 9, 8)
interval = datetime.timedelta(days=1)
now = start_time
while now < end_time:
print("Downloading shapefile for %s" % (now.strftime("%d %b %Y"),))
uri = ("http://dailyerosion.org/dl/shapefile.py?dt=%s"
) % (now.strftime("%Y-%m-%d"), )
fn = "dep%s.zip" % (now.strftime("%Y%m%d"), )
o = open(fn, 'wb')
o.write(urllib2.urlopen(uri).read())
o.close()
now += interval
|
|
a5487a417d53645bb33bf4f4b466965679813890
|
sensu/plugins/check-static-route.py
|
sensu/plugins/check-static-route.py
|
#!/usr/bin/env python
#
# Checks static route for specific subnet
#
# Return CRITICAL if no route found
#
# Jose L Coello Enriquez <jlcoello@us.ibm.com>
import subprocess
import argparse
import sys
STATE_OK = 0
STATE_WARNING = 1
STATE_CRITICAL = 2
CRITICALITY = 'critical'
def switch_on_criticality():
if CRITICALITY == 'warning':
sys.exit(STATE_WARNING)
else:
sys.exit(STATE_CRITICAL)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--subnet', help='provide subnet ip')
parser.add_argument('-g', '--gateway', help='provide gateway')
parser.add_argument('-z', '--criticality', default='critical')
args = parser.parse_args()
CRITICALITY = args.criticality
if args.subnet is None or args.gateway is None:
parser.print_help()
switch_on_criticality()
cmd = "ip route show %s via %s | grep %s" % (args.subnet , args.gateway, args.subnet)
subnet_check = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = subnet_check.communicate()
if subnet_check.returncode != 0 :
print('Gateway: %s not found for Subnet: %s' % (args.gateway , args.subnet))
switch_on_criticality()
else:
print('Gateway: %s set for Subnet: %s' % (args.gateway , args.subnet))
sys.exit(STATE_OK)
if __name__ == "__main__":
main()
|
Check for checking a static route is present for multi subnets
|
Check for checking a static route is present for multi subnets
|
Python
|
apache-2.0
|
blueboxgroup/ursula-monitoring,aacole/ursula-monitoring,aacole/ursula-monitoring,blueboxgroup/ursula-monitoring,aacole/ursula-monitoring,aacole/ursula-monitoring,blueboxgroup/ursula-monitoring,sivakom/ursula-monitoring,sivakom/ursula-monitoring,sivakom/ursula-monitoring,blueboxgroup/ursula-monitoring,sivakom/ursula-monitoring
|
Check for checking a static route is present for multi subnets
|
#!/usr/bin/env python
#
# Checks static route for specific subnet
#
# Return CRITICAL if no route found
#
# Jose L Coello Enriquez <jlcoello@us.ibm.com>
import subprocess
import argparse
import sys
STATE_OK = 0
STATE_WARNING = 1
STATE_CRITICAL = 2
CRITICALITY = 'critical'
def switch_on_criticality():
if CRITICALITY == 'warning':
sys.exit(STATE_WARNING)
else:
sys.exit(STATE_CRITICAL)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--subnet', help='provide subnet ip')
parser.add_argument('-g', '--gateway', help='provide gateway')
parser.add_argument('-z', '--criticality', default='critical')
args = parser.parse_args()
CRITICALITY = args.criticality
if args.subnet is None or args.gateway is None:
parser.print_help()
switch_on_criticality()
cmd = "ip route show %s via %s | grep %s" % (args.subnet , args.gateway, args.subnet)
subnet_check = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = subnet_check.communicate()
if subnet_check.returncode != 0 :
print('Gateway: %s not found for Subnet: %s' % (args.gateway , args.subnet))
switch_on_criticality()
else:
print('Gateway: %s set for Subnet: %s' % (args.gateway , args.subnet))
sys.exit(STATE_OK)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Check for checking a static route is present for multi subnets<commit_after>
|
#!/usr/bin/env python
#
# Checks static route for specific subnet
#
# Return CRITICAL if no route found
#
# Jose L Coello Enriquez <jlcoello@us.ibm.com>
import subprocess
import argparse
import sys
STATE_OK = 0
STATE_WARNING = 1
STATE_CRITICAL = 2
CRITICALITY = 'critical'
def switch_on_criticality():
if CRITICALITY == 'warning':
sys.exit(STATE_WARNING)
else:
sys.exit(STATE_CRITICAL)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--subnet', help='provide subnet ip')
parser.add_argument('-g', '--gateway', help='provide gateway')
parser.add_argument('-z', '--criticality', default='critical')
args = parser.parse_args()
CRITICALITY = args.criticality
if args.subnet is None or args.gateway is None:
parser.print_help()
switch_on_criticality()
cmd = "ip route show %s via %s | grep %s" % (args.subnet , args.gateway, args.subnet)
subnet_check = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = subnet_check.communicate()
if subnet_check.returncode != 0 :
print('Gateway: %s not found for Subnet: %s' % (args.gateway , args.subnet))
switch_on_criticality()
else:
print('Gateway: %s set for Subnet: %s' % (args.gateway , args.subnet))
sys.exit(STATE_OK)
if __name__ == "__main__":
main()
|
Check for checking a static route is present for multi subnets#!/usr/bin/env python
#
# Checks static route for specific subnet
#
# Return CRITICAL if no route found
#
# Jose L Coello Enriquez <jlcoello@us.ibm.com>
import subprocess
import argparse
import sys
STATE_OK = 0
STATE_WARNING = 1
STATE_CRITICAL = 2
CRITICALITY = 'critical'
def switch_on_criticality():
if CRITICALITY == 'warning':
sys.exit(STATE_WARNING)
else:
sys.exit(STATE_CRITICAL)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--subnet', help='provide subnet ip')
parser.add_argument('-g', '--gateway', help='provide gateway')
parser.add_argument('-z', '--criticality', default='critical')
args = parser.parse_args()
CRITICALITY = args.criticality
if args.subnet is None or args.gateway is None:
parser.print_help()
switch_on_criticality()
cmd = "ip route show %s via %s | grep %s" % (args.subnet , args.gateway, args.subnet)
subnet_check = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = subnet_check.communicate()
if subnet_check.returncode != 0 :
print('Gateway: %s not found for Subnet: %s' % (args.gateway , args.subnet))
switch_on_criticality()
else:
print('Gateway: %s set for Subnet: %s' % (args.gateway , args.subnet))
sys.exit(STATE_OK)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Check for checking a static route is present for multi subnets<commit_after>#!/usr/bin/env python
#
# Checks static route for specific subnet
#
# Return CRITICAL if no route found
#
# Jose L Coello Enriquez <jlcoello@us.ibm.com>
import subprocess
import argparse
import sys
STATE_OK = 0
STATE_WARNING = 1
STATE_CRITICAL = 2
CRITICALITY = 'critical'
def switch_on_criticality():
if CRITICALITY == 'warning':
sys.exit(STATE_WARNING)
else:
sys.exit(STATE_CRITICAL)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--subnet', help='provide subnet ip')
parser.add_argument('-g', '--gateway', help='provide gateway')
parser.add_argument('-z', '--criticality', default='critical')
args = parser.parse_args()
CRITICALITY = args.criticality
if args.subnet is None or args.gateway is None:
parser.print_help()
switch_on_criticality()
cmd = "ip route show %s via %s | grep %s" % (args.subnet , args.gateway, args.subnet)
subnet_check = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = subnet_check.communicate()
if subnet_check.returncode != 0 :
print('Gateway: %s not found for Subnet: %s' % (args.gateway , args.subnet))
switch_on_criticality()
else:
print('Gateway: %s set for Subnet: %s' % (args.gateway , args.subnet))
sys.exit(STATE_OK)
if __name__ == "__main__":
main()
|
|
e1138ebffbdfe31d4a4acdb4e164bdd767c6e8ea
|
saylua/wrappers.py
|
saylua/wrappers.py
|
from flask import redirect as _redirect, url_for, render_template, g
from functools import wraps
def login_required(f, redirect='login'):
"""Redirects non-logged in users to a specified location.
Usage: `@login_required`, `@login_required(redirect=<url>)`
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for(redirect))
return f(*args, **kwargs)
return decorated_function
def admin_access_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for('login'))
if not g.user.get_role().can_access_admin:
return render_template('403.html'), 403
return f(*args, **kwargs)
return decorated_function
|
from flask import redirect as _redirect, url_for, render_template, g
from functools import wraps
def login_required(f, redirect='login'):
"""Redirects non-logged in users to a specified location.
Usage: `@login_required`, `@login_required(redirect=<url>)`
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for(redirect))
return f(*args, **kwargs)
return decorated_function
def admin_access_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for('login'))
if not g.user.get_role() or not g.user.get_role().can_access_admin:
return render_template('403.html'), 403
return f(*args, **kwargs)
return decorated_function
|
Fix for no role in admin access wrapper
|
Fix for no role in admin access wrapper
|
Python
|
agpl-3.0
|
LikeMyBread/Saylua,saylua/SayluaV2,LikeMyBread/Saylua,saylua/SayluaV2,saylua/SayluaV2,LikeMyBread/Saylua,LikeMyBread/Saylua
|
from flask import redirect as _redirect, url_for, render_template, g
from functools import wraps
def login_required(f, redirect='login'):
"""Redirects non-logged in users to a specified location.
Usage: `@login_required`, `@login_required(redirect=<url>)`
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for(redirect))
return f(*args, **kwargs)
return decorated_function
def admin_access_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for('login'))
if not g.user.get_role().can_access_admin:
return render_template('403.html'), 403
return f(*args, **kwargs)
return decorated_function
Fix for no role in admin access wrapper
|
from flask import redirect as _redirect, url_for, render_template, g
from functools import wraps
def login_required(f, redirect='login'):
"""Redirects non-logged in users to a specified location.
Usage: `@login_required`, `@login_required(redirect=<url>)`
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for(redirect))
return f(*args, **kwargs)
return decorated_function
def admin_access_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for('login'))
if not g.user.get_role() or not g.user.get_role().can_access_admin:
return render_template('403.html'), 403
return f(*args, **kwargs)
return decorated_function
|
<commit_before>from flask import redirect as _redirect, url_for, render_template, g
from functools import wraps
def login_required(f, redirect='login'):
"""Redirects non-logged in users to a specified location.
Usage: `@login_required`, `@login_required(redirect=<url>)`
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for(redirect))
return f(*args, **kwargs)
return decorated_function
def admin_access_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for('login'))
if not g.user.get_role().can_access_admin:
return render_template('403.html'), 403
return f(*args, **kwargs)
return decorated_function
<commit_msg>Fix for no role in admin access wrapper<commit_after>
|
from flask import redirect as _redirect, url_for, render_template, g
from functools import wraps
def login_required(f, redirect='login'):
"""Redirects non-logged in users to a specified location.
Usage: `@login_required`, `@login_required(redirect=<url>)`
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for(redirect))
return f(*args, **kwargs)
return decorated_function
def admin_access_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for('login'))
if not g.user.get_role() or not g.user.get_role().can_access_admin:
return render_template('403.html'), 403
return f(*args, **kwargs)
return decorated_function
|
from flask import redirect as _redirect, url_for, render_template, g
from functools import wraps
def login_required(f, redirect='login'):
"""Redirects non-logged in users to a specified location.
Usage: `@login_required`, `@login_required(redirect=<url>)`
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for(redirect))
return f(*args, **kwargs)
return decorated_function
def admin_access_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for('login'))
if not g.user.get_role().can_access_admin:
return render_template('403.html'), 403
return f(*args, **kwargs)
return decorated_function
Fix for no role in admin access wrapperfrom flask import redirect as _redirect, url_for, render_template, g
from functools import wraps
def login_required(f, redirect='login'):
"""Redirects non-logged in users to a specified location.
Usage: `@login_required`, `@login_required(redirect=<url>)`
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for(redirect))
return f(*args, **kwargs)
return decorated_function
def admin_access_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for('login'))
if not g.user.get_role() or not g.user.get_role().can_access_admin:
return render_template('403.html'), 403
return f(*args, **kwargs)
return decorated_function
|
<commit_before>from flask import redirect as _redirect, url_for, render_template, g
from functools import wraps
def login_required(f, redirect='login'):
"""Redirects non-logged in users to a specified location.
Usage: `@login_required`, `@login_required(redirect=<url>)`
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for(redirect))
return f(*args, **kwargs)
return decorated_function
def admin_access_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for('login'))
if not g.user.get_role().can_access_admin:
return render_template('403.html'), 403
return f(*args, **kwargs)
return decorated_function
<commit_msg>Fix for no role in admin access wrapper<commit_after>from flask import redirect as _redirect, url_for, render_template, g
from functools import wraps
def login_required(f, redirect='login'):
"""Redirects non-logged in users to a specified location.
Usage: `@login_required`, `@login_required(redirect=<url>)`
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for(redirect))
return f(*args, **kwargs)
return decorated_function
def admin_access_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.logged_in:
return _redirect(url_for('login'))
if not g.user.get_role() or not g.user.get_role().can_access_admin:
return render_template('403.html'), 403
return f(*args, **kwargs)
return decorated_function
|
ca11355cb4ece27ffb9fee8e9df304b43dd6b6b8
|
server/proposal/migrations/0034_fix_updated.py
|
server/proposal/migrations/0034_fix_updated.py
|
import django.contrib.gis.db.models.fields
from django.db import migrations
from django.contrib.gis.db.models import Max
def fix_updated(apps, _):
Proposal = apps.get_model("proposal", "Proposal")
proposals = Proposal.objects.annotate(published=Max("documents__published"))
for proposal in proposals:
if proposal.published:
proposal.updated = proposal.published
proposal.save()
class Migration(migrations.Migration):
dependencies = [
('proposal', '0033_non_null_started'),
]
operations = [
migrations.RunPython(fix_updated),
]
|
Set proposal updated date to the mod date of most recent Document
|
Set proposal updated date to the mod date of most recent Document
|
Python
|
mit
|
cityofsomerville/cornerwise,cityofsomerville/citydash,cityofsomerville/cornerwise,codeforboston/cornerwise,cityofsomerville/citydash,cityofsomerville/citydash,cityofsomerville/cornerwise,cityofsomerville/citydash,cityofsomerville/cornerwise,codeforboston/cornerwise,codeforboston/cornerwise,codeforboston/cornerwise
|
Set proposal updated date to the mod date of most recent Document
|
import django.contrib.gis.db.models.fields
from django.db import migrations
from django.contrib.gis.db.models import Max
def fix_updated(apps, _):
Proposal = apps.get_model("proposal", "Proposal")
proposals = Proposal.objects.annotate(published=Max("documents__published"))
for proposal in proposals:
if proposal.published:
proposal.updated = proposal.published
proposal.save()
class Migration(migrations.Migration):
dependencies = [
('proposal', '0033_non_null_started'),
]
operations = [
migrations.RunPython(fix_updated),
]
|
<commit_before><commit_msg>Set proposal updated date to the mod date of most recent Document<commit_after>
|
import django.contrib.gis.db.models.fields
from django.db import migrations
from django.contrib.gis.db.models import Max
def fix_updated(apps, _):
Proposal = apps.get_model("proposal", "Proposal")
proposals = Proposal.objects.annotate(published=Max("documents__published"))
for proposal in proposals:
if proposal.published:
proposal.updated = proposal.published
proposal.save()
class Migration(migrations.Migration):
dependencies = [
('proposal', '0033_non_null_started'),
]
operations = [
migrations.RunPython(fix_updated),
]
|
Set proposal updated date to the mod date of most recent Documentimport django.contrib.gis.db.models.fields
from django.db import migrations
from django.contrib.gis.db.models import Max
def fix_updated(apps, _):
Proposal = apps.get_model("proposal", "Proposal")
proposals = Proposal.objects.annotate(published=Max("documents__published"))
for proposal in proposals:
if proposal.published:
proposal.updated = proposal.published
proposal.save()
class Migration(migrations.Migration):
dependencies = [
('proposal', '0033_non_null_started'),
]
operations = [
migrations.RunPython(fix_updated),
]
|
<commit_before><commit_msg>Set proposal updated date to the mod date of most recent Document<commit_after>import django.contrib.gis.db.models.fields
from django.db import migrations
from django.contrib.gis.db.models import Max
def fix_updated(apps, _):
Proposal = apps.get_model("proposal", "Proposal")
proposals = Proposal.objects.annotate(published=Max("documents__published"))
for proposal in proposals:
if proposal.published:
proposal.updated = proposal.published
proposal.save()
class Migration(migrations.Migration):
dependencies = [
('proposal', '0033_non_null_started'),
]
operations = [
migrations.RunPython(fix_updated),
]
|
|
5ca5736ec9c3357f7b68ed44b3154970561a8c3d
|
tests/test_specify_output_dir.py
|
tests/test_specify_output_dir.py
|
# -*- coding: utf-8 -*-
import pytest
from cookiecutter import main
@pytest.fixture
def context():
"""Fixture to return a valid context as known from a cookiecutter.json."""
return {
u'cookiecutter': {
u'email': u'raphael@hackebrot.de',
u'full_name': u'Raphael Pierzina',
u'github_username': u'hackebrot',
u'version': u'0.1.0',
}
}
@pytest.fixture
def output_dir(tmpdir):
return tmpdir.mkdir('output')
@pytest.fixture
def template(tmpdir):
return tmpdir.mkdir('template')
@pytest.fixture(autouse=True)
def mock_gen_context(mocker, context):
mocker.patch('cookiecutter.main.generate_context', return_value=context)
@pytest.fixture(autouse=True)
def mock_prompt(mocker):
mocker.patch('cookiecutter.main.prompt_for_config')
@pytest.fixture(autouse=True)
def mock_replay(mocker):
mocker.patch('cookiecutter.main.dump')
def test_api_invocation(mocker, template, output_dir, context):
mock_gen_files = mocker.patch('cookiecutter.main.generate_files')
main.cookiecutter(template, output_dir=output_dir)
mock_gen_files.assert_called_once_with(
template,
context,
output_dir=output_dir
)
|
Implement a test to make sure output_dir is passed along to generate_files
|
Implement a test to make sure output_dir is passed along to
generate_files
|
Python
|
bsd-3-clause
|
willingc/cookiecutter,pjbull/cookiecutter,luzfcb/cookiecutter,pjbull/cookiecutter,terryjbates/cookiecutter,stevepiercy/cookiecutter,dajose/cookiecutter,michaeljoseph/cookiecutter,cguardia/cookiecutter,hackebrot/cookiecutter,dajose/cookiecutter,willingc/cookiecutter,audreyr/cookiecutter,michaeljoseph/cookiecutter,cguardia/cookiecutter,luzfcb/cookiecutter,terryjbates/cookiecutter,Springerle/cookiecutter,audreyr/cookiecutter,hackebrot/cookiecutter,ramiroluz/cookiecutter,ramiroluz/cookiecutter,stevepiercy/cookiecutter,Springerle/cookiecutter
|
Implement a test to make sure output_dir is passed along to
generate_files
|
# -*- coding: utf-8 -*-
import pytest
from cookiecutter import main
@pytest.fixture
def context():
"""Fixture to return a valid context as known from a cookiecutter.json."""
return {
u'cookiecutter': {
u'email': u'raphael@hackebrot.de',
u'full_name': u'Raphael Pierzina',
u'github_username': u'hackebrot',
u'version': u'0.1.0',
}
}
@pytest.fixture
def output_dir(tmpdir):
return tmpdir.mkdir('output')
@pytest.fixture
def template(tmpdir):
return tmpdir.mkdir('template')
@pytest.fixture(autouse=True)
def mock_gen_context(mocker, context):
mocker.patch('cookiecutter.main.generate_context', return_value=context)
@pytest.fixture(autouse=True)
def mock_prompt(mocker):
mocker.patch('cookiecutter.main.prompt_for_config')
@pytest.fixture(autouse=True)
def mock_replay(mocker):
mocker.patch('cookiecutter.main.dump')
def test_api_invocation(mocker, template, output_dir, context):
mock_gen_files = mocker.patch('cookiecutter.main.generate_files')
main.cookiecutter(template, output_dir=output_dir)
mock_gen_files.assert_called_once_with(
template,
context,
output_dir=output_dir
)
|
<commit_before><commit_msg>Implement a test to make sure output_dir is passed along to
generate_files<commit_after>
|
# -*- coding: utf-8 -*-
import pytest
from cookiecutter import main
@pytest.fixture
def context():
"""Fixture to return a valid context as known from a cookiecutter.json."""
return {
u'cookiecutter': {
u'email': u'raphael@hackebrot.de',
u'full_name': u'Raphael Pierzina',
u'github_username': u'hackebrot',
u'version': u'0.1.0',
}
}
@pytest.fixture
def output_dir(tmpdir):
return tmpdir.mkdir('output')
@pytest.fixture
def template(tmpdir):
return tmpdir.mkdir('template')
@pytest.fixture(autouse=True)
def mock_gen_context(mocker, context):
mocker.patch('cookiecutter.main.generate_context', return_value=context)
@pytest.fixture(autouse=True)
def mock_prompt(mocker):
mocker.patch('cookiecutter.main.prompt_for_config')
@pytest.fixture(autouse=True)
def mock_replay(mocker):
mocker.patch('cookiecutter.main.dump')
def test_api_invocation(mocker, template, output_dir, context):
mock_gen_files = mocker.patch('cookiecutter.main.generate_files')
main.cookiecutter(template, output_dir=output_dir)
mock_gen_files.assert_called_once_with(
template,
context,
output_dir=output_dir
)
|
Implement a test to make sure output_dir is passed along to
generate_files# -*- coding: utf-8 -*-
import pytest
from cookiecutter import main
@pytest.fixture
def context():
"""Fixture to return a valid context as known from a cookiecutter.json."""
return {
u'cookiecutter': {
u'email': u'raphael@hackebrot.de',
u'full_name': u'Raphael Pierzina',
u'github_username': u'hackebrot',
u'version': u'0.1.0',
}
}
@pytest.fixture
def output_dir(tmpdir):
return tmpdir.mkdir('output')
@pytest.fixture
def template(tmpdir):
return tmpdir.mkdir('template')
@pytest.fixture(autouse=True)
def mock_gen_context(mocker, context):
mocker.patch('cookiecutter.main.generate_context', return_value=context)
@pytest.fixture(autouse=True)
def mock_prompt(mocker):
mocker.patch('cookiecutter.main.prompt_for_config')
@pytest.fixture(autouse=True)
def mock_replay(mocker):
mocker.patch('cookiecutter.main.dump')
def test_api_invocation(mocker, template, output_dir, context):
mock_gen_files = mocker.patch('cookiecutter.main.generate_files')
main.cookiecutter(template, output_dir=output_dir)
mock_gen_files.assert_called_once_with(
template,
context,
output_dir=output_dir
)
|
<commit_before><commit_msg>Implement a test to make sure output_dir is passed along to
generate_files<commit_after># -*- coding: utf-8 -*-
import pytest
from cookiecutter import main
@pytest.fixture
def context():
"""Fixture to return a valid context as known from a cookiecutter.json."""
return {
u'cookiecutter': {
u'email': u'raphael@hackebrot.de',
u'full_name': u'Raphael Pierzina',
u'github_username': u'hackebrot',
u'version': u'0.1.0',
}
}
@pytest.fixture
def output_dir(tmpdir):
return tmpdir.mkdir('output')
@pytest.fixture
def template(tmpdir):
return tmpdir.mkdir('template')
@pytest.fixture(autouse=True)
def mock_gen_context(mocker, context):
mocker.patch('cookiecutter.main.generate_context', return_value=context)
@pytest.fixture(autouse=True)
def mock_prompt(mocker):
mocker.patch('cookiecutter.main.prompt_for_config')
@pytest.fixture(autouse=True)
def mock_replay(mocker):
mocker.patch('cookiecutter.main.dump')
def test_api_invocation(mocker, template, output_dir, context):
mock_gen_files = mocker.patch('cookiecutter.main.generate_files')
main.cookiecutter(template, output_dir=output_dir)
mock_gen_files.assert_called_once_with(
template,
context,
output_dir=output_dir
)
|
|
e49331122001bf142e7478037d9ad8e932103657
|
migrations/versions/306f880b11c3_.py
|
migrations/versions/306f880b11c3_.py
|
"""empty message
Revision ID: 306f880b11c3
Revises: 255f81eff867
Create Date: 2018-08-03 15:07:44.354557
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '306f880b11c3'
down_revision = '255f81eff867'
branch_labels = None
depends_on = None
def upgrade():
op.execute("UPDATE cluster \
SET description='Spark local - 1 core / 1GB RAM', \
address='local[*]', \
executor_cores=1, \
executor_memory='1G', \
executors=1 \
WHERE id=1;")
def downgrade():
op.execute("UPDATE cluster \
SET description='Default cluster - change it', \
address='localhost', \
executor_cores=0, \
executor_memory='', \
executors=0 \
WHERE id=1;")
|
Update cluster database to make local spark work
|
Update cluster database to make local spark work
|
Python
|
apache-2.0
|
eubr-bigsea/stand,eubr-bigsea/stand
|
Update cluster database to make local spark work
|
"""empty message
Revision ID: 306f880b11c3
Revises: 255f81eff867
Create Date: 2018-08-03 15:07:44.354557
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '306f880b11c3'
down_revision = '255f81eff867'
branch_labels = None
depends_on = None
def upgrade():
op.execute("UPDATE cluster \
SET description='Spark local - 1 core / 1GB RAM', \
address='local[*]', \
executor_cores=1, \
executor_memory='1G', \
executors=1 \
WHERE id=1;")
def downgrade():
op.execute("UPDATE cluster \
SET description='Default cluster - change it', \
address='localhost', \
executor_cores=0, \
executor_memory='', \
executors=0 \
WHERE id=1;")
|
<commit_before><commit_msg>Update cluster database to make local spark work<commit_after>
|
"""empty message
Revision ID: 306f880b11c3
Revises: 255f81eff867
Create Date: 2018-08-03 15:07:44.354557
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '306f880b11c3'
down_revision = '255f81eff867'
branch_labels = None
depends_on = None
def upgrade():
op.execute("UPDATE cluster \
SET description='Spark local - 1 core / 1GB RAM', \
address='local[*]', \
executor_cores=1, \
executor_memory='1G', \
executors=1 \
WHERE id=1;")
def downgrade():
op.execute("UPDATE cluster \
SET description='Default cluster - change it', \
address='localhost', \
executor_cores=0, \
executor_memory='', \
executors=0 \
WHERE id=1;")
|
Update cluster database to make local spark work"""empty message
Revision ID: 306f880b11c3
Revises: 255f81eff867
Create Date: 2018-08-03 15:07:44.354557
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '306f880b11c3'
down_revision = '255f81eff867'
branch_labels = None
depends_on = None
def upgrade():
op.execute("UPDATE cluster \
SET description='Spark local - 1 core / 1GB RAM', \
address='local[*]', \
executor_cores=1, \
executor_memory='1G', \
executors=1 \
WHERE id=1;")
def downgrade():
op.execute("UPDATE cluster \
SET description='Default cluster - change it', \
address='localhost', \
executor_cores=0, \
executor_memory='', \
executors=0 \
WHERE id=1;")
|
<commit_before><commit_msg>Update cluster database to make local spark work<commit_after>"""empty message
Revision ID: 306f880b11c3
Revises: 255f81eff867
Create Date: 2018-08-03 15:07:44.354557
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '306f880b11c3'
down_revision = '255f81eff867'
branch_labels = None
depends_on = None
def upgrade():
op.execute("UPDATE cluster \
SET description='Spark local - 1 core / 1GB RAM', \
address='local[*]', \
executor_cores=1, \
executor_memory='1G', \
executors=1 \
WHERE id=1;")
def downgrade():
op.execute("UPDATE cluster \
SET description='Default cluster - change it', \
address='localhost', \
executor_cores=0, \
executor_memory='', \
executors=0 \
WHERE id=1;")
|
|
04a93e33afd41405fcc660e9cfd7de191def4657
|
backend/breach/tests/test_error_handling.py
|
backend/breach/tests/test_error_handling.py
|
from mock import patch
from django.utils import timezone
from django.test import TestCase
from breach.strategy import Strategy
from breach.models import Target, Victim, SampleSet
class ErrorHandlingTestCase(TestCase):
def setUp(self):
self.target = Target.objects.create(
endpoint='https://di.uoa.gr/?breach=%s',
prefix='test',
alphabet='01',
secretlength=5,
recordscardinality=1,
samplesize=2
)
self.victim = Victim.objects.create(
target=self.target,
sourceip='192.168.10.140',
snifferendpoint='http://localhost/',
recordscardinality=1
)
def tearDown(self):
for sampleset in SampleSet.objects.all():
sampleset.completed = timezone.now()
sampleset.save()
@patch('breach.strategy.Sniffer')
def test_calibration(self, Sniffer):
capture0 = {'data': 'bigbignextround', 'records': 3}
instance = Sniffer.return_value
instance.read.return_value = capture0
for _ in range(3):
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
self.assertEqual(self.victim.calibration_wait, 0.1)
@patch('breach.strategy.Sniffer')
def test_cardinality(self, Sniffer):
capture = {'data': 'bigbignextround', 'records': 2}
instance = Sniffer.return_value
instance.read.return_value = capture
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
capture = {'data': 'bigbignextround', 'records': 4}
instance = Sniffer.return_value
instance.read.return_value = capture
for _ in range(4):
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
self.assertEqual(self.victim.recordscardinality, 2)
self.assertEqual(self.target.recordscardinality, 2)
@patch('breach.strategy.Sniffer')
def test_success_error(self, Sniffer):
capture = {'data': 'bigbignextround', 'records': 2}
instance = Sniffer.return_value
instance.read.return_value = capture
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed(False)
self.assertTrue(not res)
|
Add strategy error handling tests
|
Add strategy error handling tests
|
Python
|
mit
|
dimriou/rupture,dimkarakostas/rupture,dimriou/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dimriou/rupture,dimriou/rupture,dimkarakostas/rupture,dimriou/rupture
|
Add strategy error handling tests
|
from mock import patch
from django.utils import timezone
from django.test import TestCase
from breach.strategy import Strategy
from breach.models import Target, Victim, SampleSet
class ErrorHandlingTestCase(TestCase):
def setUp(self):
self.target = Target.objects.create(
endpoint='https://di.uoa.gr/?breach=%s',
prefix='test',
alphabet='01',
secretlength=5,
recordscardinality=1,
samplesize=2
)
self.victim = Victim.objects.create(
target=self.target,
sourceip='192.168.10.140',
snifferendpoint='http://localhost/',
recordscardinality=1
)
def tearDown(self):
for sampleset in SampleSet.objects.all():
sampleset.completed = timezone.now()
sampleset.save()
@patch('breach.strategy.Sniffer')
def test_calibration(self, Sniffer):
capture0 = {'data': 'bigbignextround', 'records': 3}
instance = Sniffer.return_value
instance.read.return_value = capture0
for _ in range(3):
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
self.assertEqual(self.victim.calibration_wait, 0.1)
@patch('breach.strategy.Sniffer')
def test_cardinality(self, Sniffer):
capture = {'data': 'bigbignextround', 'records': 2}
instance = Sniffer.return_value
instance.read.return_value = capture
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
capture = {'data': 'bigbignextround', 'records': 4}
instance = Sniffer.return_value
instance.read.return_value = capture
for _ in range(4):
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
self.assertEqual(self.victim.recordscardinality, 2)
self.assertEqual(self.target.recordscardinality, 2)
@patch('breach.strategy.Sniffer')
def test_success_error(self, Sniffer):
capture = {'data': 'bigbignextround', 'records': 2}
instance = Sniffer.return_value
instance.read.return_value = capture
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed(False)
self.assertTrue(not res)
|
<commit_before><commit_msg>Add strategy error handling tests<commit_after>
|
from mock import patch
from django.utils import timezone
from django.test import TestCase
from breach.strategy import Strategy
from breach.models import Target, Victim, SampleSet
class ErrorHandlingTestCase(TestCase):
def setUp(self):
self.target = Target.objects.create(
endpoint='https://di.uoa.gr/?breach=%s',
prefix='test',
alphabet='01',
secretlength=5,
recordscardinality=1,
samplesize=2
)
self.victim = Victim.objects.create(
target=self.target,
sourceip='192.168.10.140',
snifferendpoint='http://localhost/',
recordscardinality=1
)
def tearDown(self):
for sampleset in SampleSet.objects.all():
sampleset.completed = timezone.now()
sampleset.save()
@patch('breach.strategy.Sniffer')
def test_calibration(self, Sniffer):
capture0 = {'data': 'bigbignextround', 'records': 3}
instance = Sniffer.return_value
instance.read.return_value = capture0
for _ in range(3):
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
self.assertEqual(self.victim.calibration_wait, 0.1)
@patch('breach.strategy.Sniffer')
def test_cardinality(self, Sniffer):
capture = {'data': 'bigbignextround', 'records': 2}
instance = Sniffer.return_value
instance.read.return_value = capture
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
capture = {'data': 'bigbignextround', 'records': 4}
instance = Sniffer.return_value
instance.read.return_value = capture
for _ in range(4):
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
self.assertEqual(self.victim.recordscardinality, 2)
self.assertEqual(self.target.recordscardinality, 2)
@patch('breach.strategy.Sniffer')
def test_success_error(self, Sniffer):
capture = {'data': 'bigbignextround', 'records': 2}
instance = Sniffer.return_value
instance.read.return_value = capture
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed(False)
self.assertTrue(not res)
|
Add strategy error handling testsfrom mock import patch
from django.utils import timezone
from django.test import TestCase
from breach.strategy import Strategy
from breach.models import Target, Victim, SampleSet
class ErrorHandlingTestCase(TestCase):
def setUp(self):
self.target = Target.objects.create(
endpoint='https://di.uoa.gr/?breach=%s',
prefix='test',
alphabet='01',
secretlength=5,
recordscardinality=1,
samplesize=2
)
self.victim = Victim.objects.create(
target=self.target,
sourceip='192.168.10.140',
snifferendpoint='http://localhost/',
recordscardinality=1
)
def tearDown(self):
for sampleset in SampleSet.objects.all():
sampleset.completed = timezone.now()
sampleset.save()
@patch('breach.strategy.Sniffer')
def test_calibration(self, Sniffer):
capture0 = {'data': 'bigbignextround', 'records': 3}
instance = Sniffer.return_value
instance.read.return_value = capture0
for _ in range(3):
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
self.assertEqual(self.victim.calibration_wait, 0.1)
@patch('breach.strategy.Sniffer')
def test_cardinality(self, Sniffer):
capture = {'data': 'bigbignextround', 'records': 2}
instance = Sniffer.return_value
instance.read.return_value = capture
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
capture = {'data': 'bigbignextround', 'records': 4}
instance = Sniffer.return_value
instance.read.return_value = capture
for _ in range(4):
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
self.assertEqual(self.victim.recordscardinality, 2)
self.assertEqual(self.target.recordscardinality, 2)
@patch('breach.strategy.Sniffer')
def test_success_error(self, Sniffer):
capture = {'data': 'bigbignextround', 'records': 2}
instance = Sniffer.return_value
instance.read.return_value = capture
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed(False)
self.assertTrue(not res)
|
<commit_before><commit_msg>Add strategy error handling tests<commit_after>from mock import patch
from django.utils import timezone
from django.test import TestCase
from breach.strategy import Strategy
from breach.models import Target, Victim, SampleSet
class ErrorHandlingTestCase(TestCase):
def setUp(self):
self.target = Target.objects.create(
endpoint='https://di.uoa.gr/?breach=%s',
prefix='test',
alphabet='01',
secretlength=5,
recordscardinality=1,
samplesize=2
)
self.victim = Victim.objects.create(
target=self.target,
sourceip='192.168.10.140',
snifferendpoint='http://localhost/',
recordscardinality=1
)
def tearDown(self):
for sampleset in SampleSet.objects.all():
sampleset.completed = timezone.now()
sampleset.save()
@patch('breach.strategy.Sniffer')
def test_calibration(self, Sniffer):
capture0 = {'data': 'bigbignextround', 'records': 3}
instance = Sniffer.return_value
instance.read.return_value = capture0
for _ in range(3):
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
self.assertEqual(self.victim.calibration_wait, 0.1)
@patch('breach.strategy.Sniffer')
def test_cardinality(self, Sniffer):
capture = {'data': 'bigbignextround', 'records': 2}
instance = Sniffer.return_value
instance.read.return_value = capture
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
capture = {'data': 'bigbignextround', 'records': 4}
instance = Sniffer.return_value
instance.read.return_value = capture
for _ in range(4):
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed()
self.assertTrue(not res)
self.assertEqual(self.victim.recordscardinality, 2)
self.assertEqual(self.target.recordscardinality, 2)
@patch('breach.strategy.Sniffer')
def test_success_error(self, Sniffer):
capture = {'data': 'bigbignextround', 'records': 2}
instance = Sniffer.return_value
instance.read.return_value = capture
strategy = Strategy(self.victim)
strategy.get_work()
res = strategy.work_completed(False)
self.assertTrue(not res)
|
|
4e48b4587d8af6cce0cc083db10287c8a9f933e3
|
libcloud/test/loadbalancer/test_ninefold.py
|
libcloud/test/loadbalancer/test_ninefold.py
|
import sys
import unittest
from libcloud.loadbalancer.types import Provider
from libcloud.loadbalancer.providers import get_driver
class NinefoldLbTestCase(unittest.TestCase):
def test_driver_instantiation(self):
cls = get_driver(Provider.NINEFOLD)
cls('username', 'key')
if __name__ == '__main__':
sys.exit(unittest.main())
|
Add a test case for ninefold loadbalancer driver.
|
Add a test case for ninefold loadbalancer driver.
|
Python
|
apache-2.0
|
mtekel/libcloud,Kami/libcloud,briancurtin/libcloud,mistio/libcloud,briancurtin/libcloud,Jc2k/libcloud,t-tran/libcloud,t-tran/libcloud,vongazman/libcloud,mgogoulos/libcloud,jimbobhickville/libcloud,cryptickp/libcloud,carletes/libcloud,wrigri/libcloud,sahildua2305/libcloud,munkiat/libcloud,thesquelched/libcloud,mtekel/libcloud,lochiiconnectivity/libcloud,atsaki/libcloud,samuelchong/libcloud,techhat/libcloud,SecurityCompass/libcloud,lochiiconnectivity/libcloud,Scalr/libcloud,pantheon-systems/libcloud,Verizon/libcloud,Scalr/libcloud,atsaki/libcloud,Cloud-Elasticity-Services/as-libcloud,StackPointCloud/libcloud,erjohnso/libcloud,ByteInternet/libcloud,kater169/libcloud,mtekel/libcloud,sfriesel/libcloud,kater169/libcloud,kater169/libcloud,NexusIS/libcloud,wido/libcloud,illfelder/libcloud,niteoweb/libcloud,JamesGuthrie/libcloud,ByteInternet/libcloud,NexusIS/libcloud,pantheon-systems/libcloud,Scalr/libcloud,schaubl/libcloud,cryptickp/libcloud,watermelo/libcloud,wuyuewen/libcloud,mathspace/libcloud,jerryblakley/libcloud,StackPointCloud/libcloud,marcinzaremba/libcloud,ByteInternet/libcloud,Kami/libcloud,thesquelched/libcloud,pantheon-systems/libcloud,mbrukman/libcloud,marcinzaremba/libcloud,curoverse/libcloud,mistio/libcloud,wrigri/libcloud,andrewsomething/libcloud,schaubl/libcloud,cloudControl/libcloud,Jc2k/libcloud,wido/libcloud,atsaki/libcloud,MrBasset/libcloud,watermelo/libcloud,iPlantCollaborativeOpenSource/libcloud,mistio/libcloud,sgammon/libcloud,Verizon/libcloud,erjohnso/libcloud,sergiorua/libcloud,jimbobhickville/libcloud,cloudControl/libcloud,wuyuewen/libcloud,illfelder/libcloud,Verizon/libcloud,SecurityCompass/libcloud,sergiorua/libcloud,watermelo/libcloud,pquentin/libcloud,JamesGuthrie/libcloud,MrBasset/libcloud,apache/libcloud,aleGpereira/libcloud,thesquelched/libcloud,JamesGuthrie/libcloud,Itxaka/libcloud,Kami/libcloud,wuyuewen/libcloud,DimensionDataCBUSydney/libcloud,MrBasset/libcloud,techhat/libcloud,ZuluPro/libcloud,mathspace/libcloud,munkiat/libcloud,wrigri/libcloud,jimbobhickville/libcloud,mgogoulos/libcloud,smaffulli/libcloud,ZuluPro/libcloud,StackPointCloud/libcloud,apache/libcloud,vongazman/libcloud,niteoweb/libcloud,sergiorua/libcloud,wido/libcloud,sfriesel/libcloud,sahildua2305/libcloud,marcinzaremba/libcloud,andrewsomething/libcloud,erjohnso/libcloud,samuelchong/libcloud,pquentin/libcloud,t-tran/libcloud,sfriesel/libcloud,aviweit/libcloud,supertom/libcloud,dcorbacho/libcloud,carletes/libcloud,briancurtin/libcloud,mathspace/libcloud,ZuluPro/libcloud,dcorbacho/libcloud,carletes/libcloud,ClusterHQ/libcloud,Itxaka/libcloud,aviweit/libcloud,munkiat/libcloud,jerryblakley/libcloud,illfelder/libcloud,mbrukman/libcloud,mbrukman/libcloud,NexusIS/libcloud,techhat/libcloud,smaffulli/libcloud,iPlantCollaborativeOpenSource/libcloud,pquentin/libcloud,andrewsomething/libcloud,cryptickp/libcloud,cloudControl/libcloud,curoverse/libcloud,supertom/libcloud,sgammon/libcloud,aleGpereira/libcloud,iPlantCollaborativeOpenSource/libcloud,DimensionDataCBUSydney/libcloud,schaubl/libcloud,SecurityCompass/libcloud,mgogoulos/libcloud,jerryblakley/libcloud,sahildua2305/libcloud,niteoweb/libcloud,vongazman/libcloud,DimensionDataCBUSydney/libcloud,aviweit/libcloud,supertom/libcloud,ClusterHQ/libcloud,dcorbacho/libcloud,samuelchong/libcloud,Itxaka/libcloud,apache/libcloud,Cloud-Elasticity-Services/as-libcloud,curoverse/libcloud,smaffulli/libcloud,lochiiconnectivity/libcloud,aleGpereira/libcloud,Cloud-Elasticity-Services/as-libcloud
|
Add a test case for ninefold loadbalancer driver.
|
import sys
import unittest
from libcloud.loadbalancer.types import Provider
from libcloud.loadbalancer.providers import get_driver
class NinefoldLbTestCase(unittest.TestCase):
def test_driver_instantiation(self):
cls = get_driver(Provider.NINEFOLD)
cls('username', 'key')
if __name__ == '__main__':
sys.exit(unittest.main())
|
<commit_before><commit_msg>Add a test case for ninefold loadbalancer driver.<commit_after>
|
import sys
import unittest
from libcloud.loadbalancer.types import Provider
from libcloud.loadbalancer.providers import get_driver
class NinefoldLbTestCase(unittest.TestCase):
def test_driver_instantiation(self):
cls = get_driver(Provider.NINEFOLD)
cls('username', 'key')
if __name__ == '__main__':
sys.exit(unittest.main())
|
Add a test case for ninefold loadbalancer driver.import sys
import unittest
from libcloud.loadbalancer.types import Provider
from libcloud.loadbalancer.providers import get_driver
class NinefoldLbTestCase(unittest.TestCase):
def test_driver_instantiation(self):
cls = get_driver(Provider.NINEFOLD)
cls('username', 'key')
if __name__ == '__main__':
sys.exit(unittest.main())
|
<commit_before><commit_msg>Add a test case for ninefold loadbalancer driver.<commit_after>import sys
import unittest
from libcloud.loadbalancer.types import Provider
from libcloud.loadbalancer.providers import get_driver
class NinefoldLbTestCase(unittest.TestCase):
def test_driver_instantiation(self):
cls = get_driver(Provider.NINEFOLD)
cls('username', 'key')
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
bb578d4237ccaf16fe5c38842cc100cdbefc0119
|
senlin/tests/functional/drivers/openstack/__init__.py
|
senlin/tests/functional/drivers/openstack/__init__.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.drivers.openstack import ceilometer_v2
from senlin.drivers.openstack import heat_v1
from senlin.drivers.openstack import lbaas
from senlin.drivers.openstack import neutron_v2
from senlin.tests.functional.drivers.openstack import nova_v2
# Currently, only fake nova_v2 driver is supported
def compute(params):
return nova_v2.NovaClient(params)
def loadbalancing(params):
return lbaas.LoadBalancerDriver(params)
def network(params):
return neutron_v2.NeutronClient(params)
def orchestration(params):
return heat_v1.HeatClient(params)
def telemetry(params):
return ceilometer_v2.CeilometerClient(params)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.drivers.openstack import ceilometer_v2
from senlin.drivers.openstack import heat_v1
from senlin.drivers.openstack import keystone_v3
from senlin.drivers.openstack import lbaas
from senlin.drivers.openstack import neutron_v2
from senlin.tests.functional.drivers.openstack import nova_v2
# Currently, only fake nova_v2 driver is supported
compute = nova_v2.NovaClient
identity = keystone_v3.KeystoneClient
loadbalancing = lbaas.LoadBalancerDriver
network = neutron_v2.NeutronClient
orchestration = heat_v1.HeatClient
telemetry = ceilometer_v2.CeilometerClient
|
Add keystone driver plugin for functional test
|
Add keystone driver plugin for functional test
This patch adds keystone driver plugin for functional test.
Change-Id: Iefa9c1b8956854ae75f672627aa3d2f9f7d22c0e
|
Python
|
apache-2.0
|
openstack/senlin,stackforge/senlin,tengqm/senlin-container,stackforge/senlin,openstack/senlin,tengqm/senlin-container,Alzon/senlin,Alzon/senlin,openstack/senlin
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.drivers.openstack import ceilometer_v2
from senlin.drivers.openstack import heat_v1
from senlin.drivers.openstack import lbaas
from senlin.drivers.openstack import neutron_v2
from senlin.tests.functional.drivers.openstack import nova_v2
# Currently, only fake nova_v2 driver is supported
def compute(params):
return nova_v2.NovaClient(params)
def loadbalancing(params):
return lbaas.LoadBalancerDriver(params)
def network(params):
return neutron_v2.NeutronClient(params)
def orchestration(params):
return heat_v1.HeatClient(params)
def telemetry(params):
return ceilometer_v2.CeilometerClient(params)
Add keystone driver plugin for functional test
This patch adds keystone driver plugin for functional test.
Change-Id: Iefa9c1b8956854ae75f672627aa3d2f9f7d22c0e
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.drivers.openstack import ceilometer_v2
from senlin.drivers.openstack import heat_v1
from senlin.drivers.openstack import keystone_v3
from senlin.drivers.openstack import lbaas
from senlin.drivers.openstack import neutron_v2
from senlin.tests.functional.drivers.openstack import nova_v2
# Currently, only fake nova_v2 driver is supported
compute = nova_v2.NovaClient
identity = keystone_v3.KeystoneClient
loadbalancing = lbaas.LoadBalancerDriver
network = neutron_v2.NeutronClient
orchestration = heat_v1.HeatClient
telemetry = ceilometer_v2.CeilometerClient
|
<commit_before># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.drivers.openstack import ceilometer_v2
from senlin.drivers.openstack import heat_v1
from senlin.drivers.openstack import lbaas
from senlin.drivers.openstack import neutron_v2
from senlin.tests.functional.drivers.openstack import nova_v2
# Currently, only fake nova_v2 driver is supported
def compute(params):
return nova_v2.NovaClient(params)
def loadbalancing(params):
return lbaas.LoadBalancerDriver(params)
def network(params):
return neutron_v2.NeutronClient(params)
def orchestration(params):
return heat_v1.HeatClient(params)
def telemetry(params):
return ceilometer_v2.CeilometerClient(params)
<commit_msg>Add keystone driver plugin for functional test
This patch adds keystone driver plugin for functional test.
Change-Id: Iefa9c1b8956854ae75f672627aa3d2f9f7d22c0e<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.drivers.openstack import ceilometer_v2
from senlin.drivers.openstack import heat_v1
from senlin.drivers.openstack import keystone_v3
from senlin.drivers.openstack import lbaas
from senlin.drivers.openstack import neutron_v2
from senlin.tests.functional.drivers.openstack import nova_v2
# Currently, only fake nova_v2 driver is supported
compute = nova_v2.NovaClient
identity = keystone_v3.KeystoneClient
loadbalancing = lbaas.LoadBalancerDriver
network = neutron_v2.NeutronClient
orchestration = heat_v1.HeatClient
telemetry = ceilometer_v2.CeilometerClient
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.drivers.openstack import ceilometer_v2
from senlin.drivers.openstack import heat_v1
from senlin.drivers.openstack import lbaas
from senlin.drivers.openstack import neutron_v2
from senlin.tests.functional.drivers.openstack import nova_v2
# Currently, only fake nova_v2 driver is supported
def compute(params):
return nova_v2.NovaClient(params)
def loadbalancing(params):
return lbaas.LoadBalancerDriver(params)
def network(params):
return neutron_v2.NeutronClient(params)
def orchestration(params):
return heat_v1.HeatClient(params)
def telemetry(params):
return ceilometer_v2.CeilometerClient(params)
Add keystone driver plugin for functional test
This patch adds keystone driver plugin for functional test.
Change-Id: Iefa9c1b8956854ae75f672627aa3d2f9f7d22c0e# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.drivers.openstack import ceilometer_v2
from senlin.drivers.openstack import heat_v1
from senlin.drivers.openstack import keystone_v3
from senlin.drivers.openstack import lbaas
from senlin.drivers.openstack import neutron_v2
from senlin.tests.functional.drivers.openstack import nova_v2
# Currently, only fake nova_v2 driver is supported
compute = nova_v2.NovaClient
identity = keystone_v3.KeystoneClient
loadbalancing = lbaas.LoadBalancerDriver
network = neutron_v2.NeutronClient
orchestration = heat_v1.HeatClient
telemetry = ceilometer_v2.CeilometerClient
|
<commit_before># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.drivers.openstack import ceilometer_v2
from senlin.drivers.openstack import heat_v1
from senlin.drivers.openstack import lbaas
from senlin.drivers.openstack import neutron_v2
from senlin.tests.functional.drivers.openstack import nova_v2
# Currently, only fake nova_v2 driver is supported
def compute(params):
return nova_v2.NovaClient(params)
def loadbalancing(params):
return lbaas.LoadBalancerDriver(params)
def network(params):
return neutron_v2.NeutronClient(params)
def orchestration(params):
return heat_v1.HeatClient(params)
def telemetry(params):
return ceilometer_v2.CeilometerClient(params)
<commit_msg>Add keystone driver plugin for functional test
This patch adds keystone driver plugin for functional test.
Change-Id: Iefa9c1b8956854ae75f672627aa3d2f9f7d22c0e<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from senlin.drivers.openstack import ceilometer_v2
from senlin.drivers.openstack import heat_v1
from senlin.drivers.openstack import keystone_v3
from senlin.drivers.openstack import lbaas
from senlin.drivers.openstack import neutron_v2
from senlin.tests.functional.drivers.openstack import nova_v2
# Currently, only fake nova_v2 driver is supported
compute = nova_v2.NovaClient
identity = keystone_v3.KeystoneClient
loadbalancing = lbaas.LoadBalancerDriver
network = neutron_v2.NeutronClient
orchestration = heat_v1.HeatClient
telemetry = ceilometer_v2.CeilometerClient
|
b483840cd7ec275090d41ee9a3b59b6300ea879e
|
glitch/renderer.py
|
glitch/renderer.py
|
from aiohttp import web
import asyncio
app = web.Application()
async def moosic(req):
print("req", type(req))
resp = web.StreamResponse()
resp.content_type = "text/plain" # "audio/mpeg"
await resp.prepare(req)
resp.write(b"Hello, world!")
for i in range(5):
print(i,"...")
await asyncio.sleep(i)
resp.write(b"\n\nWaited %d seconds" % i)
await resp.drain()
resp.write(b"\n\nDone waiting.")
print("Done.")
await resp.drain()
await resp.write_eof()
return resp
app.router.add_get("/all.mp3", moosic)
web.run_app(app, port=8889)
|
Create POC streaming server using asyncio
|
Create POC streaming server using asyncio
|
Python
|
artistic-2.0
|
Rosuav/appension,MikeiLL/appension,Rosuav/appension,Rosuav/appension,MikeiLL/appension,MikeiLL/appension,Rosuav/appension,MikeiLL/appension
|
Create POC streaming server using asyncio
|
from aiohttp import web
import asyncio
app = web.Application()
async def moosic(req):
print("req", type(req))
resp = web.StreamResponse()
resp.content_type = "text/plain" # "audio/mpeg"
await resp.prepare(req)
resp.write(b"Hello, world!")
for i in range(5):
print(i,"...")
await asyncio.sleep(i)
resp.write(b"\n\nWaited %d seconds" % i)
await resp.drain()
resp.write(b"\n\nDone waiting.")
print("Done.")
await resp.drain()
await resp.write_eof()
return resp
app.router.add_get("/all.mp3", moosic)
web.run_app(app, port=8889)
|
<commit_before><commit_msg>Create POC streaming server using asyncio<commit_after>
|
from aiohttp import web
import asyncio
app = web.Application()
async def moosic(req):
print("req", type(req))
resp = web.StreamResponse()
resp.content_type = "text/plain" # "audio/mpeg"
await resp.prepare(req)
resp.write(b"Hello, world!")
for i in range(5):
print(i,"...")
await asyncio.sleep(i)
resp.write(b"\n\nWaited %d seconds" % i)
await resp.drain()
resp.write(b"\n\nDone waiting.")
print("Done.")
await resp.drain()
await resp.write_eof()
return resp
app.router.add_get("/all.mp3", moosic)
web.run_app(app, port=8889)
|
Create POC streaming server using asynciofrom aiohttp import web
import asyncio
app = web.Application()
async def moosic(req):
print("req", type(req))
resp = web.StreamResponse()
resp.content_type = "text/plain" # "audio/mpeg"
await resp.prepare(req)
resp.write(b"Hello, world!")
for i in range(5):
print(i,"...")
await asyncio.sleep(i)
resp.write(b"\n\nWaited %d seconds" % i)
await resp.drain()
resp.write(b"\n\nDone waiting.")
print("Done.")
await resp.drain()
await resp.write_eof()
return resp
app.router.add_get("/all.mp3", moosic)
web.run_app(app, port=8889)
|
<commit_before><commit_msg>Create POC streaming server using asyncio<commit_after>from aiohttp import web
import asyncio
app = web.Application()
async def moosic(req):
print("req", type(req))
resp = web.StreamResponse()
resp.content_type = "text/plain" # "audio/mpeg"
await resp.prepare(req)
resp.write(b"Hello, world!")
for i in range(5):
print(i,"...")
await asyncio.sleep(i)
resp.write(b"\n\nWaited %d seconds" % i)
await resp.drain()
resp.write(b"\n\nDone waiting.")
print("Done.")
await resp.drain()
await resp.write_eof()
return resp
app.router.add_get("/all.mp3", moosic)
web.run_app(app, port=8889)
|
|
0485a265c5076402acc482db6789fb46bc313451
|
proc_images.py
|
proc_images.py
|
import numpy as np
import cv
from pytesser import *
from common import *
def proc_ex (ex):
try:
text = (image_file_to_string(img_filename(ex[0])),)
except IOError:
print "Exception thrown"
text = ("",)
return text + tuple(ex)
def write_txt (fname, txt):
with open(fname, 'wb') as w:
w.write(txt)
def all_data (dataset):
"""Gets all data for i'th training set"""
for i in xrange(CV_SEGMENTS):
fname = data_filename(dataset, i)
with open(fname) as f:
f.readline()
lines = map(lambda x: map(int, x.strip().split()), f.readlines())
exs = map(lambda x: proc_ex(x), lines)
yield exs #(image_file_to_string('data/images/txt_msg0.jpg'), exs)
def all_training_data ():
return all_data('train')
def all_testing_data ():
return all_data('test')
def put_img_text ():
for exs in all_training_data():
for ex in exs:
fname = txt_filename(ex[1])
print "Writing %s" % fname
write_txt(fname, ex[0])
if __name__ == '__main__':
#text = image_file_to_string('data/images/txt_msg0.jpg')
#print text
mk_folder_if_ne(TXT_DIR)
put_img_text()
"""
img = cv.imread('data/images/txt_msg0.jpg')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, thresh = cv.threshold(gray, 1, 255, cv.THRESH_BINARY)
contours, hierarchy = cv.findContours(thresh, cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
x,y,w,h = cv.boundyRect(cnt)
"""
|
Add code to lift text out of images
|
Add code to lift text out of images
|
Python
|
mit
|
hausdorff/i-like-you,hausdorff/i-like-you
|
Add code to lift text out of images
|
import numpy as np
import cv
from pytesser import *
from common import *
def proc_ex (ex):
try:
text = (image_file_to_string(img_filename(ex[0])),)
except IOError:
print "Exception thrown"
text = ("",)
return text + tuple(ex)
def write_txt (fname, txt):
with open(fname, 'wb') as w:
w.write(txt)
def all_data (dataset):
"""Gets all data for i'th training set"""
for i in xrange(CV_SEGMENTS):
fname = data_filename(dataset, i)
with open(fname) as f:
f.readline()
lines = map(lambda x: map(int, x.strip().split()), f.readlines())
exs = map(lambda x: proc_ex(x), lines)
yield exs #(image_file_to_string('data/images/txt_msg0.jpg'), exs)
def all_training_data ():
return all_data('train')
def all_testing_data ():
return all_data('test')
def put_img_text ():
for exs in all_training_data():
for ex in exs:
fname = txt_filename(ex[1])
print "Writing %s" % fname
write_txt(fname, ex[0])
if __name__ == '__main__':
#text = image_file_to_string('data/images/txt_msg0.jpg')
#print text
mk_folder_if_ne(TXT_DIR)
put_img_text()
"""
img = cv.imread('data/images/txt_msg0.jpg')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, thresh = cv.threshold(gray, 1, 255, cv.THRESH_BINARY)
contours, hierarchy = cv.findContours(thresh, cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
x,y,w,h = cv.boundyRect(cnt)
"""
|
<commit_before><commit_msg>Add code to lift text out of images<commit_after>
|
import numpy as np
import cv
from pytesser import *
from common import *
def proc_ex (ex):
try:
text = (image_file_to_string(img_filename(ex[0])),)
except IOError:
print "Exception thrown"
text = ("",)
return text + tuple(ex)
def write_txt (fname, txt):
with open(fname, 'wb') as w:
w.write(txt)
def all_data (dataset):
"""Gets all data for i'th training set"""
for i in xrange(CV_SEGMENTS):
fname = data_filename(dataset, i)
with open(fname) as f:
f.readline()
lines = map(lambda x: map(int, x.strip().split()), f.readlines())
exs = map(lambda x: proc_ex(x), lines)
yield exs #(image_file_to_string('data/images/txt_msg0.jpg'), exs)
def all_training_data ():
return all_data('train')
def all_testing_data ():
return all_data('test')
def put_img_text ():
for exs in all_training_data():
for ex in exs:
fname = txt_filename(ex[1])
print "Writing %s" % fname
write_txt(fname, ex[0])
if __name__ == '__main__':
#text = image_file_to_string('data/images/txt_msg0.jpg')
#print text
mk_folder_if_ne(TXT_DIR)
put_img_text()
"""
img = cv.imread('data/images/txt_msg0.jpg')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, thresh = cv.threshold(gray, 1, 255, cv.THRESH_BINARY)
contours, hierarchy = cv.findContours(thresh, cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
x,y,w,h = cv.boundyRect(cnt)
"""
|
Add code to lift text out of imagesimport numpy as np
import cv
from pytesser import *
from common import *
def proc_ex (ex):
try:
text = (image_file_to_string(img_filename(ex[0])),)
except IOError:
print "Exception thrown"
text = ("",)
return text + tuple(ex)
def write_txt (fname, txt):
with open(fname, 'wb') as w:
w.write(txt)
def all_data (dataset):
"""Gets all data for i'th training set"""
for i in xrange(CV_SEGMENTS):
fname = data_filename(dataset, i)
with open(fname) as f:
f.readline()
lines = map(lambda x: map(int, x.strip().split()), f.readlines())
exs = map(lambda x: proc_ex(x), lines)
yield exs #(image_file_to_string('data/images/txt_msg0.jpg'), exs)
def all_training_data ():
return all_data('train')
def all_testing_data ():
return all_data('test')
def put_img_text ():
for exs in all_training_data():
for ex in exs:
fname = txt_filename(ex[1])
print "Writing %s" % fname
write_txt(fname, ex[0])
if __name__ == '__main__':
#text = image_file_to_string('data/images/txt_msg0.jpg')
#print text
mk_folder_if_ne(TXT_DIR)
put_img_text()
"""
img = cv.imread('data/images/txt_msg0.jpg')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, thresh = cv.threshold(gray, 1, 255, cv.THRESH_BINARY)
contours, hierarchy = cv.findContours(thresh, cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
x,y,w,h = cv.boundyRect(cnt)
"""
|
<commit_before><commit_msg>Add code to lift text out of images<commit_after>import numpy as np
import cv
from pytesser import *
from common import *
def proc_ex (ex):
try:
text = (image_file_to_string(img_filename(ex[0])),)
except IOError:
print "Exception thrown"
text = ("",)
return text + tuple(ex)
def write_txt (fname, txt):
with open(fname, 'wb') as w:
w.write(txt)
def all_data (dataset):
"""Gets all data for i'th training set"""
for i in xrange(CV_SEGMENTS):
fname = data_filename(dataset, i)
with open(fname) as f:
f.readline()
lines = map(lambda x: map(int, x.strip().split()), f.readlines())
exs = map(lambda x: proc_ex(x), lines)
yield exs #(image_file_to_string('data/images/txt_msg0.jpg'), exs)
def all_training_data ():
return all_data('train')
def all_testing_data ():
return all_data('test')
def put_img_text ():
for exs in all_training_data():
for ex in exs:
fname = txt_filename(ex[1])
print "Writing %s" % fname
write_txt(fname, ex[0])
if __name__ == '__main__':
#text = image_file_to_string('data/images/txt_msg0.jpg')
#print text
mk_folder_if_ne(TXT_DIR)
put_img_text()
"""
img = cv.imread('data/images/txt_msg0.jpg')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, thresh = cv.threshold(gray, 1, 255, cv.THRESH_BINARY)
contours, hierarchy = cv.findContours(thresh, cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
x,y,w,h = cv.boundyRect(cnt)
"""
|
|
c27f7651a933fa831f936ca90fd25ede3f79aa50
|
pythran/analyses/ast_matcher.py
|
pythran/analyses/ast_matcher.py
|
""" Module to looks for a specified pattern in a given AST. """
from ast import AST, iter_fields
import ast
class AST_no_cond(object):
""" Class to specify we don't care about a field value in ast. """
def match(node, pattern):
"""
Check matching between an ast.Node and a pattern.
AST_no_cond permit to specify we don't care about a field value
"""
if node.__class__.__name__ != pattern.__class__.__name__:
return False
zipped_nodes = zip(iter_fields(node), iter_fields(pattern))
for (_, value_n), (_, value_p) in zipped_nodes:
if isinstance(value_p, AST_no_cond):
# We don't check this field
continue
elif type(value_n) != type(value_p):
# Type mismatch, so node differ
return False
bad_list_matching = (isinstance(value_n, list) and
not list_matching(value_n, value_p))
bad_ast_matching = (isinstance(value_n, AST) and
not match(value_n, value_p))
bad_matching = value_n != value_p
# If values are not matching list, matching ast node or equal value
# pattern and node differ
if bad_list_matching and bad_ast_matching and bad_matching:
return False
return True
def list_matching(node_list, pattern_list):
""" Check matching between nodes and patterns of ast.Node. """
if len(node_list) != len(pattern_list):
return False
for item_n, item_p in zip(node_list, pattern_list):
if isinstance(item_p, AST_no_cond):
continue
elif isinstance(item_n, AST) and not match(item_n, item_p):
return False
elif item_n != item_p:
return False
return True
class ASTMatcher(ast.NodeVisitor):
"""
Visitor to gather node matching with a given pattern.
Examples
--------
>>> import ast
>>> code = "[(i, j) for i in xrange(a) for j in xrange(b)]"
>>> pattern = ast.Call(func=ast.Name(id='xrange', ctx=ast.Load()),
... args=AST_no_cond(), keywords=[],
... starargs=None, kwargs=None)
>>> len(ASTMatcher(pattern).get(ast.parse(code)))
2
"""
def __init__(self, pattern):
""" Basic initialiser saving pattern and initialising result set. """
self.pattern = pattern
self.result = set()
def visit(self, node):
"""
Visitor looking for matching between current node and pattern.
If it match, save it but whatever happen, keep going.
"""
if match(node, self.pattern):
self.result.add(node)
self.generic_visit(node)
def get(self, node):
""" Facility to get values of the matcher for a given node. """
self.visit(node)
return self.result
|
Add ast matcher for pattern recognition
|
Add ast matcher for pattern recognition
|
Python
|
bsd-3-clause
|
artas360/pythran,pbrunet/pythran,pbrunet/pythran,hainm/pythran,artas360/pythran,artas360/pythran,hainm/pythran,pombredanne/pythran,pombredanne/pythran,serge-sans-paille/pythran,pombredanne/pythran,hainm/pythran,serge-sans-paille/pythran,pbrunet/pythran
|
Add ast matcher for pattern recognition
|
""" Module to looks for a specified pattern in a given AST. """
from ast import AST, iter_fields
import ast
class AST_no_cond(object):
""" Class to specify we don't care about a field value in ast. """
def match(node, pattern):
"""
Check matching between an ast.Node and a pattern.
AST_no_cond permit to specify we don't care about a field value
"""
if node.__class__.__name__ != pattern.__class__.__name__:
return False
zipped_nodes = zip(iter_fields(node), iter_fields(pattern))
for (_, value_n), (_, value_p) in zipped_nodes:
if isinstance(value_p, AST_no_cond):
# We don't check this field
continue
elif type(value_n) != type(value_p):
# Type mismatch, so node differ
return False
bad_list_matching = (isinstance(value_n, list) and
not list_matching(value_n, value_p))
bad_ast_matching = (isinstance(value_n, AST) and
not match(value_n, value_p))
bad_matching = value_n != value_p
# If values are not matching list, matching ast node or equal value
# pattern and node differ
if bad_list_matching and bad_ast_matching and bad_matching:
return False
return True
def list_matching(node_list, pattern_list):
""" Check matching between nodes and patterns of ast.Node. """
if len(node_list) != len(pattern_list):
return False
for item_n, item_p in zip(node_list, pattern_list):
if isinstance(item_p, AST_no_cond):
continue
elif isinstance(item_n, AST) and not match(item_n, item_p):
return False
elif item_n != item_p:
return False
return True
class ASTMatcher(ast.NodeVisitor):
"""
Visitor to gather node matching with a given pattern.
Examples
--------
>>> import ast
>>> code = "[(i, j) for i in xrange(a) for j in xrange(b)]"
>>> pattern = ast.Call(func=ast.Name(id='xrange', ctx=ast.Load()),
... args=AST_no_cond(), keywords=[],
... starargs=None, kwargs=None)
>>> len(ASTMatcher(pattern).get(ast.parse(code)))
2
"""
def __init__(self, pattern):
""" Basic initialiser saving pattern and initialising result set. """
self.pattern = pattern
self.result = set()
def visit(self, node):
"""
Visitor looking for matching between current node and pattern.
If it match, save it but whatever happen, keep going.
"""
if match(node, self.pattern):
self.result.add(node)
self.generic_visit(node)
def get(self, node):
""" Facility to get values of the matcher for a given node. """
self.visit(node)
return self.result
|
<commit_before><commit_msg>Add ast matcher for pattern recognition<commit_after>
|
""" Module to looks for a specified pattern in a given AST. """
from ast import AST, iter_fields
import ast
class AST_no_cond(object):
""" Class to specify we don't care about a field value in ast. """
def match(node, pattern):
"""
Check matching between an ast.Node and a pattern.
AST_no_cond permit to specify we don't care about a field value
"""
if node.__class__.__name__ != pattern.__class__.__name__:
return False
zipped_nodes = zip(iter_fields(node), iter_fields(pattern))
for (_, value_n), (_, value_p) in zipped_nodes:
if isinstance(value_p, AST_no_cond):
# We don't check this field
continue
elif type(value_n) != type(value_p):
# Type mismatch, so node differ
return False
bad_list_matching = (isinstance(value_n, list) and
not list_matching(value_n, value_p))
bad_ast_matching = (isinstance(value_n, AST) and
not match(value_n, value_p))
bad_matching = value_n != value_p
# If values are not matching list, matching ast node or equal value
# pattern and node differ
if bad_list_matching and bad_ast_matching and bad_matching:
return False
return True
def list_matching(node_list, pattern_list):
""" Check matching between nodes and patterns of ast.Node. """
if len(node_list) != len(pattern_list):
return False
for item_n, item_p in zip(node_list, pattern_list):
if isinstance(item_p, AST_no_cond):
continue
elif isinstance(item_n, AST) and not match(item_n, item_p):
return False
elif item_n != item_p:
return False
return True
class ASTMatcher(ast.NodeVisitor):
"""
Visitor to gather node matching with a given pattern.
Examples
--------
>>> import ast
>>> code = "[(i, j) for i in xrange(a) for j in xrange(b)]"
>>> pattern = ast.Call(func=ast.Name(id='xrange', ctx=ast.Load()),
... args=AST_no_cond(), keywords=[],
... starargs=None, kwargs=None)
>>> len(ASTMatcher(pattern).get(ast.parse(code)))
2
"""
def __init__(self, pattern):
""" Basic initialiser saving pattern and initialising result set. """
self.pattern = pattern
self.result = set()
def visit(self, node):
"""
Visitor looking for matching between current node and pattern.
If it match, save it but whatever happen, keep going.
"""
if match(node, self.pattern):
self.result.add(node)
self.generic_visit(node)
def get(self, node):
""" Facility to get values of the matcher for a given node. """
self.visit(node)
return self.result
|
Add ast matcher for pattern recognition""" Module to looks for a specified pattern in a given AST. """
from ast import AST, iter_fields
import ast
class AST_no_cond(object):
""" Class to specify we don't care about a field value in ast. """
def match(node, pattern):
"""
Check matching between an ast.Node and a pattern.
AST_no_cond permit to specify we don't care about a field value
"""
if node.__class__.__name__ != pattern.__class__.__name__:
return False
zipped_nodes = zip(iter_fields(node), iter_fields(pattern))
for (_, value_n), (_, value_p) in zipped_nodes:
if isinstance(value_p, AST_no_cond):
# We don't check this field
continue
elif type(value_n) != type(value_p):
# Type mismatch, so node differ
return False
bad_list_matching = (isinstance(value_n, list) and
not list_matching(value_n, value_p))
bad_ast_matching = (isinstance(value_n, AST) and
not match(value_n, value_p))
bad_matching = value_n != value_p
# If values are not matching list, matching ast node or equal value
# pattern and node differ
if bad_list_matching and bad_ast_matching and bad_matching:
return False
return True
def list_matching(node_list, pattern_list):
""" Check matching between nodes and patterns of ast.Node. """
if len(node_list) != len(pattern_list):
return False
for item_n, item_p in zip(node_list, pattern_list):
if isinstance(item_p, AST_no_cond):
continue
elif isinstance(item_n, AST) and not match(item_n, item_p):
return False
elif item_n != item_p:
return False
return True
class ASTMatcher(ast.NodeVisitor):
"""
Visitor to gather node matching with a given pattern.
Examples
--------
>>> import ast
>>> code = "[(i, j) for i in xrange(a) for j in xrange(b)]"
>>> pattern = ast.Call(func=ast.Name(id='xrange', ctx=ast.Load()),
... args=AST_no_cond(), keywords=[],
... starargs=None, kwargs=None)
>>> len(ASTMatcher(pattern).get(ast.parse(code)))
2
"""
def __init__(self, pattern):
""" Basic initialiser saving pattern and initialising result set. """
self.pattern = pattern
self.result = set()
def visit(self, node):
"""
Visitor looking for matching between current node and pattern.
If it match, save it but whatever happen, keep going.
"""
if match(node, self.pattern):
self.result.add(node)
self.generic_visit(node)
def get(self, node):
""" Facility to get values of the matcher for a given node. """
self.visit(node)
return self.result
|
<commit_before><commit_msg>Add ast matcher for pattern recognition<commit_after>""" Module to looks for a specified pattern in a given AST. """
from ast import AST, iter_fields
import ast
class AST_no_cond(object):
""" Class to specify we don't care about a field value in ast. """
def match(node, pattern):
"""
Check matching between an ast.Node and a pattern.
AST_no_cond permit to specify we don't care about a field value
"""
if node.__class__.__name__ != pattern.__class__.__name__:
return False
zipped_nodes = zip(iter_fields(node), iter_fields(pattern))
for (_, value_n), (_, value_p) in zipped_nodes:
if isinstance(value_p, AST_no_cond):
# We don't check this field
continue
elif type(value_n) != type(value_p):
# Type mismatch, so node differ
return False
bad_list_matching = (isinstance(value_n, list) and
not list_matching(value_n, value_p))
bad_ast_matching = (isinstance(value_n, AST) and
not match(value_n, value_p))
bad_matching = value_n != value_p
# If values are not matching list, matching ast node or equal value
# pattern and node differ
if bad_list_matching and bad_ast_matching and bad_matching:
return False
return True
def list_matching(node_list, pattern_list):
""" Check matching between nodes and patterns of ast.Node. """
if len(node_list) != len(pattern_list):
return False
for item_n, item_p in zip(node_list, pattern_list):
if isinstance(item_p, AST_no_cond):
continue
elif isinstance(item_n, AST) and not match(item_n, item_p):
return False
elif item_n != item_p:
return False
return True
class ASTMatcher(ast.NodeVisitor):
"""
Visitor to gather node matching with a given pattern.
Examples
--------
>>> import ast
>>> code = "[(i, j) for i in xrange(a) for j in xrange(b)]"
>>> pattern = ast.Call(func=ast.Name(id='xrange', ctx=ast.Load()),
... args=AST_no_cond(), keywords=[],
... starargs=None, kwargs=None)
>>> len(ASTMatcher(pattern).get(ast.parse(code)))
2
"""
def __init__(self, pattern):
""" Basic initialiser saving pattern and initialising result set. """
self.pattern = pattern
self.result = set()
def visit(self, node):
"""
Visitor looking for matching between current node and pattern.
If it match, save it but whatever happen, keep going.
"""
if match(node, self.pattern):
self.result.add(node)
self.generic_visit(node)
def get(self, node):
""" Facility to get values of the matcher for a given node. """
self.visit(node)
return self.result
|
|
bb8774152e63ce8a09dbb75d43579ba145bbe08f
|
tests/cli/test_task.py
|
tests/cli/test_task.py
|
"""
Copyright (c) 2021 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import flexmock
from atomic_reactor.cli import task
from atomic_reactor.tasks import orchestrator, worker, sources, common
TASK_ARGS = {
"build_dir": "/build",
"context_dir": "/context",
"config_file": "reactor-config-map.yaml",
"user_params": '{"some_param": "some_value"}',
}
TASK_RESULT = object()
def mock(task_cls):
params = flexmock()
(
# mock the common TaskParams because child classes do not override from_cli_args
flexmock(common.TaskParams)
.should_receive("from_cli_args")
.with_args(TASK_ARGS)
.and_return(params)
)
flexmock(task_cls).should_receive("__init__").with_args(params)
flexmock(task_cls).should_receive("execute").and_return(TASK_RESULT)
def test_orchestrator():
mock(orchestrator.OrchestratorTask)
assert task.orchestrator(TASK_ARGS) == TASK_RESULT
def test_worker():
mock(worker.WorkerTask)
assert task.worker(TASK_ARGS) == TASK_RESULT
def test_source_build():
mock(sources.SourceBuildTask)
assert task.source_build(TASK_ARGS) == TASK_RESULT
|
Add unit tests for cli.task
|
Add unit tests for cli.task
CLOUDBLD-6300
Signed-off-by: Adam Cmiel <1217f9865bd733d1bcad0d0d64be310272c5592c@redhat.com>
|
Python
|
bsd-3-clause
|
fr34k8/atomic-reactor,projectatomic/atomic-reactor,projectatomic/atomic-reactor,fr34k8/atomic-reactor
|
Add unit tests for cli.task
CLOUDBLD-6300
Signed-off-by: Adam Cmiel <1217f9865bd733d1bcad0d0d64be310272c5592c@redhat.com>
|
"""
Copyright (c) 2021 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import flexmock
from atomic_reactor.cli import task
from atomic_reactor.tasks import orchestrator, worker, sources, common
TASK_ARGS = {
"build_dir": "/build",
"context_dir": "/context",
"config_file": "reactor-config-map.yaml",
"user_params": '{"some_param": "some_value"}',
}
TASK_RESULT = object()
def mock(task_cls):
params = flexmock()
(
# mock the common TaskParams because child classes do not override from_cli_args
flexmock(common.TaskParams)
.should_receive("from_cli_args")
.with_args(TASK_ARGS)
.and_return(params)
)
flexmock(task_cls).should_receive("__init__").with_args(params)
flexmock(task_cls).should_receive("execute").and_return(TASK_RESULT)
def test_orchestrator():
mock(orchestrator.OrchestratorTask)
assert task.orchestrator(TASK_ARGS) == TASK_RESULT
def test_worker():
mock(worker.WorkerTask)
assert task.worker(TASK_ARGS) == TASK_RESULT
def test_source_build():
mock(sources.SourceBuildTask)
assert task.source_build(TASK_ARGS) == TASK_RESULT
|
<commit_before><commit_msg>Add unit tests for cli.task
CLOUDBLD-6300
Signed-off-by: Adam Cmiel <1217f9865bd733d1bcad0d0d64be310272c5592c@redhat.com><commit_after>
|
"""
Copyright (c) 2021 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import flexmock
from atomic_reactor.cli import task
from atomic_reactor.tasks import orchestrator, worker, sources, common
TASK_ARGS = {
"build_dir": "/build",
"context_dir": "/context",
"config_file": "reactor-config-map.yaml",
"user_params": '{"some_param": "some_value"}',
}
TASK_RESULT = object()
def mock(task_cls):
params = flexmock()
(
# mock the common TaskParams because child classes do not override from_cli_args
flexmock(common.TaskParams)
.should_receive("from_cli_args")
.with_args(TASK_ARGS)
.and_return(params)
)
flexmock(task_cls).should_receive("__init__").with_args(params)
flexmock(task_cls).should_receive("execute").and_return(TASK_RESULT)
def test_orchestrator():
mock(orchestrator.OrchestratorTask)
assert task.orchestrator(TASK_ARGS) == TASK_RESULT
def test_worker():
mock(worker.WorkerTask)
assert task.worker(TASK_ARGS) == TASK_RESULT
def test_source_build():
mock(sources.SourceBuildTask)
assert task.source_build(TASK_ARGS) == TASK_RESULT
|
Add unit tests for cli.task
CLOUDBLD-6300
Signed-off-by: Adam Cmiel <1217f9865bd733d1bcad0d0d64be310272c5592c@redhat.com>"""
Copyright (c) 2021 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import flexmock
from atomic_reactor.cli import task
from atomic_reactor.tasks import orchestrator, worker, sources, common
TASK_ARGS = {
"build_dir": "/build",
"context_dir": "/context",
"config_file": "reactor-config-map.yaml",
"user_params": '{"some_param": "some_value"}',
}
TASK_RESULT = object()
def mock(task_cls):
params = flexmock()
(
# mock the common TaskParams because child classes do not override from_cli_args
flexmock(common.TaskParams)
.should_receive("from_cli_args")
.with_args(TASK_ARGS)
.and_return(params)
)
flexmock(task_cls).should_receive("__init__").with_args(params)
flexmock(task_cls).should_receive("execute").and_return(TASK_RESULT)
def test_orchestrator():
mock(orchestrator.OrchestratorTask)
assert task.orchestrator(TASK_ARGS) == TASK_RESULT
def test_worker():
mock(worker.WorkerTask)
assert task.worker(TASK_ARGS) == TASK_RESULT
def test_source_build():
mock(sources.SourceBuildTask)
assert task.source_build(TASK_ARGS) == TASK_RESULT
|
<commit_before><commit_msg>Add unit tests for cli.task
CLOUDBLD-6300
Signed-off-by: Adam Cmiel <1217f9865bd733d1bcad0d0d64be310272c5592c@redhat.com><commit_after>"""
Copyright (c) 2021 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import flexmock
from atomic_reactor.cli import task
from atomic_reactor.tasks import orchestrator, worker, sources, common
TASK_ARGS = {
"build_dir": "/build",
"context_dir": "/context",
"config_file": "reactor-config-map.yaml",
"user_params": '{"some_param": "some_value"}',
}
TASK_RESULT = object()
def mock(task_cls):
params = flexmock()
(
# mock the common TaskParams because child classes do not override from_cli_args
flexmock(common.TaskParams)
.should_receive("from_cli_args")
.with_args(TASK_ARGS)
.and_return(params)
)
flexmock(task_cls).should_receive("__init__").with_args(params)
flexmock(task_cls).should_receive("execute").and_return(TASK_RESULT)
def test_orchestrator():
mock(orchestrator.OrchestratorTask)
assert task.orchestrator(TASK_ARGS) == TASK_RESULT
def test_worker():
mock(worker.WorkerTask)
assert task.worker(TASK_ARGS) == TASK_RESULT
def test_source_build():
mock(sources.SourceBuildTask)
assert task.source_build(TASK_ARGS) == TASK_RESULT
|
|
45eb018082d035f36d6b5d207f6f44e77a65a895
|
wk1/MeanOfASetOfFITSFiles.py
|
wk1/MeanOfASetOfFITSFiles.py
|
# Write your mean_fits function here:
from astropy.io import fits
import numpy as np
def mean_fits(fitsfiles):
count = 0
for fitsfile in fitsfiles:
hdulist = fits.open(fitsfile)
newdata = hdulist[0].data
if count == 0:
data = newdata
else:
data = data + newdata #adds matrixes
count = count + 1
data = data/count #calculate the average for each value
return data
if __name__ == '__main__':
# Test your function with examples from the question
data = mean_fits(['image0.fits', 'image1.fits', 'image2.fits'])
print(data[100, 100])
# You can also plot the result:
#import matplotlib.pyplot as plt
#plt.imshow(data.T, cmap=plt.cm.viridis)
#plt.colorbar()
#plt.show()
|
Read a set of FITS files and calculate the mean (average). Based on code from two previous exercizes.
|
Read a set of FITS files and calculate the mean (average). Based on code from two previous exercizes.
|
Python
|
mit
|
lokijota/datadrivenastronomymooc
|
Read a set of FITS files and calculate the mean (average). Based on code from two previous exercizes.
|
# Write your mean_fits function here:
from astropy.io import fits
import numpy as np
def mean_fits(fitsfiles):
count = 0
for fitsfile in fitsfiles:
hdulist = fits.open(fitsfile)
newdata = hdulist[0].data
if count == 0:
data = newdata
else:
data = data + newdata #adds matrixes
count = count + 1
data = data/count #calculate the average for each value
return data
if __name__ == '__main__':
# Test your function with examples from the question
data = mean_fits(['image0.fits', 'image1.fits', 'image2.fits'])
print(data[100, 100])
# You can also plot the result:
#import matplotlib.pyplot as plt
#plt.imshow(data.T, cmap=plt.cm.viridis)
#plt.colorbar()
#plt.show()
|
<commit_before><commit_msg>Read a set of FITS files and calculate the mean (average). Based on code from two previous exercizes.<commit_after>
|
# Write your mean_fits function here:
from astropy.io import fits
import numpy as np
def mean_fits(fitsfiles):
count = 0
for fitsfile in fitsfiles:
hdulist = fits.open(fitsfile)
newdata = hdulist[0].data
if count == 0:
data = newdata
else:
data = data + newdata #adds matrixes
count = count + 1
data = data/count #calculate the average for each value
return data
if __name__ == '__main__':
# Test your function with examples from the question
data = mean_fits(['image0.fits', 'image1.fits', 'image2.fits'])
print(data[100, 100])
# You can also plot the result:
#import matplotlib.pyplot as plt
#plt.imshow(data.T, cmap=plt.cm.viridis)
#plt.colorbar()
#plt.show()
|
Read a set of FITS files and calculate the mean (average). Based on code from two previous exercizes.# Write your mean_fits function here:
from astropy.io import fits
import numpy as np
def mean_fits(fitsfiles):
count = 0
for fitsfile in fitsfiles:
hdulist = fits.open(fitsfile)
newdata = hdulist[0].data
if count == 0:
data = newdata
else:
data = data + newdata #adds matrixes
count = count + 1
data = data/count #calculate the average for each value
return data
if __name__ == '__main__':
# Test your function with examples from the question
data = mean_fits(['image0.fits', 'image1.fits', 'image2.fits'])
print(data[100, 100])
# You can also plot the result:
#import matplotlib.pyplot as plt
#plt.imshow(data.T, cmap=plt.cm.viridis)
#plt.colorbar()
#plt.show()
|
<commit_before><commit_msg>Read a set of FITS files and calculate the mean (average). Based on code from two previous exercizes.<commit_after># Write your mean_fits function here:
from astropy.io import fits
import numpy as np
def mean_fits(fitsfiles):
count = 0
for fitsfile in fitsfiles:
hdulist = fits.open(fitsfile)
newdata = hdulist[0].data
if count == 0:
data = newdata
else:
data = data + newdata #adds matrixes
count = count + 1
data = data/count #calculate the average for each value
return data
if __name__ == '__main__':
# Test your function with examples from the question
data = mean_fits(['image0.fits', 'image1.fits', 'image2.fits'])
print(data[100, 100])
# You can also plot the result:
#import matplotlib.pyplot as plt
#plt.imshow(data.T, cmap=plt.cm.viridis)
#plt.colorbar()
#plt.show()
|
|
cac3636a2e49e088c2a43b2f21a7aa31af66d215
|
src/tpn/data_io.py
|
src/tpn/data_io.py
|
#!/usr/bin/env python
import zipfile
import cPickle
import numpy as np
"""
track_obj: {
frames: 1 by n numpy array,
anchors: 1 by n numpy array,
features: m by n numpy array,
scores: c by n numpy array,
boxes: 4 by n numpy array,
rois: 4 by n numpy array
}
"""
def save_track_proto_to_zip(track_proto, save_file):
zf = zipfile.ZipFile(save_file, 'w')
print "Writing to zip file {}...".format(save_file)
for track_id, track in enumerate(track_proto['tracks']):
track_obj = {}
track_obj['frames'] = np.asarray([box['frame'] for box in track])
track_obj['anchors'] = np.asarray([box['anchor'] for box in track])
track_obj['scores'] = np.asarray([box['scores'] for box in track])
track_obj['features'] = np.asarray([box['feature'] for box in track])
track_obj['boxes'] = np.asarray([box['bbox'] for box in track])
track_obj['rois'] = np.asarray([box['roi'] for box in track])
zf.writestr('{:06d}.pkl'.format(track_id),
cPickle.dumps(track_obj, cPickle.HIGHEST_PROTOCOL))
if (track_id + 1) % 1000 == 0:
print "\t{} tracks written.".format(track_id + 1)
print "\tTotally {} tracks written.".format(track_id + 1)
zf.close()
|
Add function to save track proto to zip files for efficient storage and usage.
|
Add function to save track proto to zip files for efficient storage and usage.
|
Python
|
mit
|
myfavouritekk/TPN
|
Add function to save track proto to zip files for efficient storage and usage.
|
#!/usr/bin/env python
import zipfile
import cPickle
import numpy as np
"""
track_obj: {
frames: 1 by n numpy array,
anchors: 1 by n numpy array,
features: m by n numpy array,
scores: c by n numpy array,
boxes: 4 by n numpy array,
rois: 4 by n numpy array
}
"""
def save_track_proto_to_zip(track_proto, save_file):
zf = zipfile.ZipFile(save_file, 'w')
print "Writing to zip file {}...".format(save_file)
for track_id, track in enumerate(track_proto['tracks']):
track_obj = {}
track_obj['frames'] = np.asarray([box['frame'] for box in track])
track_obj['anchors'] = np.asarray([box['anchor'] for box in track])
track_obj['scores'] = np.asarray([box['scores'] for box in track])
track_obj['features'] = np.asarray([box['feature'] for box in track])
track_obj['boxes'] = np.asarray([box['bbox'] for box in track])
track_obj['rois'] = np.asarray([box['roi'] for box in track])
zf.writestr('{:06d}.pkl'.format(track_id),
cPickle.dumps(track_obj, cPickle.HIGHEST_PROTOCOL))
if (track_id + 1) % 1000 == 0:
print "\t{} tracks written.".format(track_id + 1)
print "\tTotally {} tracks written.".format(track_id + 1)
zf.close()
|
<commit_before><commit_msg>Add function to save track proto to zip files for efficient storage and usage.<commit_after>
|
#!/usr/bin/env python
import zipfile
import cPickle
import numpy as np
"""
track_obj: {
frames: 1 by n numpy array,
anchors: 1 by n numpy array,
features: m by n numpy array,
scores: c by n numpy array,
boxes: 4 by n numpy array,
rois: 4 by n numpy array
}
"""
def save_track_proto_to_zip(track_proto, save_file):
zf = zipfile.ZipFile(save_file, 'w')
print "Writing to zip file {}...".format(save_file)
for track_id, track in enumerate(track_proto['tracks']):
track_obj = {}
track_obj['frames'] = np.asarray([box['frame'] for box in track])
track_obj['anchors'] = np.asarray([box['anchor'] for box in track])
track_obj['scores'] = np.asarray([box['scores'] for box in track])
track_obj['features'] = np.asarray([box['feature'] for box in track])
track_obj['boxes'] = np.asarray([box['bbox'] for box in track])
track_obj['rois'] = np.asarray([box['roi'] for box in track])
zf.writestr('{:06d}.pkl'.format(track_id),
cPickle.dumps(track_obj, cPickle.HIGHEST_PROTOCOL))
if (track_id + 1) % 1000 == 0:
print "\t{} tracks written.".format(track_id + 1)
print "\tTotally {} tracks written.".format(track_id + 1)
zf.close()
|
Add function to save track proto to zip files for efficient storage and usage.#!/usr/bin/env python
import zipfile
import cPickle
import numpy as np
"""
track_obj: {
frames: 1 by n numpy array,
anchors: 1 by n numpy array,
features: m by n numpy array,
scores: c by n numpy array,
boxes: 4 by n numpy array,
rois: 4 by n numpy array
}
"""
def save_track_proto_to_zip(track_proto, save_file):
zf = zipfile.ZipFile(save_file, 'w')
print "Writing to zip file {}...".format(save_file)
for track_id, track in enumerate(track_proto['tracks']):
track_obj = {}
track_obj['frames'] = np.asarray([box['frame'] for box in track])
track_obj['anchors'] = np.asarray([box['anchor'] for box in track])
track_obj['scores'] = np.asarray([box['scores'] for box in track])
track_obj['features'] = np.asarray([box['feature'] for box in track])
track_obj['boxes'] = np.asarray([box['bbox'] for box in track])
track_obj['rois'] = np.asarray([box['roi'] for box in track])
zf.writestr('{:06d}.pkl'.format(track_id),
cPickle.dumps(track_obj, cPickle.HIGHEST_PROTOCOL))
if (track_id + 1) % 1000 == 0:
print "\t{} tracks written.".format(track_id + 1)
print "\tTotally {} tracks written.".format(track_id + 1)
zf.close()
|
<commit_before><commit_msg>Add function to save track proto to zip files for efficient storage and usage.<commit_after>#!/usr/bin/env python
import zipfile
import cPickle
import numpy as np
"""
track_obj: {
frames: 1 by n numpy array,
anchors: 1 by n numpy array,
features: m by n numpy array,
scores: c by n numpy array,
boxes: 4 by n numpy array,
rois: 4 by n numpy array
}
"""
def save_track_proto_to_zip(track_proto, save_file):
zf = zipfile.ZipFile(save_file, 'w')
print "Writing to zip file {}...".format(save_file)
for track_id, track in enumerate(track_proto['tracks']):
track_obj = {}
track_obj['frames'] = np.asarray([box['frame'] for box in track])
track_obj['anchors'] = np.asarray([box['anchor'] for box in track])
track_obj['scores'] = np.asarray([box['scores'] for box in track])
track_obj['features'] = np.asarray([box['feature'] for box in track])
track_obj['boxes'] = np.asarray([box['bbox'] for box in track])
track_obj['rois'] = np.asarray([box['roi'] for box in track])
zf.writestr('{:06d}.pkl'.format(track_id),
cPickle.dumps(track_obj, cPickle.HIGHEST_PROTOCOL))
if (track_id + 1) % 1000 == 0:
print "\t{} tracks written.".format(track_id + 1)
print "\tTotally {} tracks written.".format(track_id + 1)
zf.close()
|
|
2c175426c93f446cad6846bee141cbd9b29c5593
|
src/xml2csv.py
|
src/xml2csv.py
|
from bs4 import BeautifulSoup
import csv
import sys
XML_FILE_PATH = sys.argv[1]
CSV_FILE_PATH = sys.argv[2]
xml_file = open(XML_FILE_PATH, 'rb').read()
xml_soup = BeautifulSoup(xml_file, 'lxml-xml')
variables_file = open('data/datasets_format.html', 'rb').read()
variables_soup = BeautifulSoup(variables_file, 'lxml')
variables = variables_soup.select('.tabela-2 td')
fieldnames = ['ideDocumento'] + \
[var.text.strip() for index, var in enumerate(variables) if index % 3 == 0]
with open(CSV_FILE_PATH, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
records = xml_soup.find_all('DESPESA')
for record in records:
record_attributes = \
dict([(attr.name, attr.string) for attr in record.contents])
writer.writerow(record_attributes)
|
Add script for converting XML datasets to CSV
|
Add script for converting XML datasets to CSV
|
Python
|
mit
|
wisner23/serenata-de-amor,marcusrehm/serenata-de-amor,wisner23/serenata-de-amor,datasciencebr/serenata-de-amor,datasciencebr/serenata-de-amor,marcusrehm/serenata-de-amor,marcusrehm/serenata-de-amor,marcusrehm/serenata-de-amor
|
Add script for converting XML datasets to CSV
|
from bs4 import BeautifulSoup
import csv
import sys
XML_FILE_PATH = sys.argv[1]
CSV_FILE_PATH = sys.argv[2]
xml_file = open(XML_FILE_PATH, 'rb').read()
xml_soup = BeautifulSoup(xml_file, 'lxml-xml')
variables_file = open('data/datasets_format.html', 'rb').read()
variables_soup = BeautifulSoup(variables_file, 'lxml')
variables = variables_soup.select('.tabela-2 td')
fieldnames = ['ideDocumento'] + \
[var.text.strip() for index, var in enumerate(variables) if index % 3 == 0]
with open(CSV_FILE_PATH, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
records = xml_soup.find_all('DESPESA')
for record in records:
record_attributes = \
dict([(attr.name, attr.string) for attr in record.contents])
writer.writerow(record_attributes)
|
<commit_before><commit_msg>Add script for converting XML datasets to CSV<commit_after>
|
from bs4 import BeautifulSoup
import csv
import sys
XML_FILE_PATH = sys.argv[1]
CSV_FILE_PATH = sys.argv[2]
xml_file = open(XML_FILE_PATH, 'rb').read()
xml_soup = BeautifulSoup(xml_file, 'lxml-xml')
variables_file = open('data/datasets_format.html', 'rb').read()
variables_soup = BeautifulSoup(variables_file, 'lxml')
variables = variables_soup.select('.tabela-2 td')
fieldnames = ['ideDocumento'] + \
[var.text.strip() for index, var in enumerate(variables) if index % 3 == 0]
with open(CSV_FILE_PATH, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
records = xml_soup.find_all('DESPESA')
for record in records:
record_attributes = \
dict([(attr.name, attr.string) for attr in record.contents])
writer.writerow(record_attributes)
|
Add script for converting XML datasets to CSVfrom bs4 import BeautifulSoup
import csv
import sys
XML_FILE_PATH = sys.argv[1]
CSV_FILE_PATH = sys.argv[2]
xml_file = open(XML_FILE_PATH, 'rb').read()
xml_soup = BeautifulSoup(xml_file, 'lxml-xml')
variables_file = open('data/datasets_format.html', 'rb').read()
variables_soup = BeautifulSoup(variables_file, 'lxml')
variables = variables_soup.select('.tabela-2 td')
fieldnames = ['ideDocumento'] + \
[var.text.strip() for index, var in enumerate(variables) if index % 3 == 0]
with open(CSV_FILE_PATH, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
records = xml_soup.find_all('DESPESA')
for record in records:
record_attributes = \
dict([(attr.name, attr.string) for attr in record.contents])
writer.writerow(record_attributes)
|
<commit_before><commit_msg>Add script for converting XML datasets to CSV<commit_after>from bs4 import BeautifulSoup
import csv
import sys
XML_FILE_PATH = sys.argv[1]
CSV_FILE_PATH = sys.argv[2]
xml_file = open(XML_FILE_PATH, 'rb').read()
xml_soup = BeautifulSoup(xml_file, 'lxml-xml')
variables_file = open('data/datasets_format.html', 'rb').read()
variables_soup = BeautifulSoup(variables_file, 'lxml')
variables = variables_soup.select('.tabela-2 td')
fieldnames = ['ideDocumento'] + \
[var.text.strip() for index, var in enumerate(variables) if index % 3 == 0]
with open(CSV_FILE_PATH, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
records = xml_soup.find_all('DESPESA')
for record in records:
record_attributes = \
dict([(attr.name, attr.string) for attr in record.contents])
writer.writerow(record_attributes)
|
|
50c21151b54759d297fdbef222aaa45c17f70027
|
horizon/conf/__init__.py
|
horizon/conf/__init__.py
|
import copy
from django.utils.functional import LazyObject, empty
from .default import HORIZON_CONFIG as DEFAULT_CONFIG
class LazySettings(LazyObject):
def _setup(self, name=None):
from django.conf import settings
HORIZON_CONFIG = copy.copy(DEFAULT_CONFIG)
HORIZON_CONFIG.update(settings.HORIZON_CONFIG)
# Ensure we always have our exception configuration...
for exc_category in ['unauthorized', 'not_found', 'recoverable']:
if exc_category not in HORIZON_CONFIG['exceptions']:
default_exc_config = DEFAULT_CONFIG['exceptions'][exc_category]
HORIZON_CONFIG['exceptions'][exc_category] = default_exc_config
# Ensure our password validator always exists...
if 'regex' not in HORIZON_CONFIG['password_validator']:
default_pw_regex = DEFAULT_CONFIG['password_validator']['regex']
HORIZON_CONFIG['password_validator']['regex'] = default_pw_regex
if 'help_text' not in HORIZON_CONFIG['password_validator']:
default_pw_help = DEFAULT_CONFIG['password_validator']['help_text']
HORIZON_CONFIG['password_validator']['help_text'] = default_pw_help
self._wrapped = HORIZON_CONFIG
def __getitem__(self, name, fallback=None):
if self._wrapped is empty:
self._setup(name)
return self._wrapped.get(name, fallback)
HORIZON_CONFIG = LazySettings()
|
import copy
from django.utils.functional import LazyObject, empty
class LazySettings(LazyObject):
def _setup(self, name=None):
from django.conf import settings
from .default import HORIZON_CONFIG as DEFAULT_CONFIG
HORIZON_CONFIG = copy.copy(DEFAULT_CONFIG)
HORIZON_CONFIG.update(settings.HORIZON_CONFIG)
# Ensure we always have our exception configuration...
for exc_category in ['unauthorized', 'not_found', 'recoverable']:
if exc_category not in HORIZON_CONFIG['exceptions']:
default_exc_config = DEFAULT_CONFIG['exceptions'][exc_category]
HORIZON_CONFIG['exceptions'][exc_category] = default_exc_config
# Ensure our password validator always exists...
if 'regex' not in HORIZON_CONFIG['password_validator']:
default_pw_regex = DEFAULT_CONFIG['password_validator']['regex']
HORIZON_CONFIG['password_validator']['regex'] = default_pw_regex
if 'help_text' not in HORIZON_CONFIG['password_validator']:
default_pw_help = DEFAULT_CONFIG['password_validator']['help_text']
HORIZON_CONFIG['password_validator']['help_text'] = default_pw_help
self._wrapped = HORIZON_CONFIG
def __getitem__(self, name, fallback=None):
if self._wrapped is empty:
self._setup(name)
return self._wrapped.get(name, fallback)
HORIZON_CONFIG = LazySettings()
|
Fix circular dependencies in dashboard settings
|
Fix circular dependencies in dashboard settings
Importing horizon.utils from dashboard local_settings.py to generate
SECRET_KEY results in a sequence of imports, and horizon.conf.default
module gets imported at some point. During initialization of default
HORIZON_CONFIG this module uses settings.LOGIN_REDIRECT_URL and
ugettext() call. Both of them need django settings to be ready to use,
therefore settings initialization starts again before it could finish.
Since Python processes module only when it is imported the first time,
this process stops, but the 'inner' settings object contains only
parameters that were set above the point of import of local_settings.
Therefore Django complains about missing SECRET_KEY when it processes
'inner' settings.
The fix moves the import of horizon.conf.default to
LazySetting._setup(). If keys of HORIZON_CONFIG obtained from
horizon.conf are not used within openstask_dashboard settings.py
or local_settings.py, the circular import won't happen.
Fixes bug #1154564
Change-Id: If63ab1920ecc8e646fd5b6cc52c106ae0876fa2d
|
Python
|
apache-2.0
|
dan1/horizon-x509,rickerc/horizon_audit,Tesora/tesora-horizon,tuskar/tuskar-ui,maestro-hybrid-cloud/horizon,orbitfp7/horizon,VaneCloud/horizon,CiscoSystems/horizon,nvoron23/avos,openstack-ja/horizon,tqtran7/horizon,JioCloud/horizon,promptworks/horizon,Mirantis/mos-horizon,FNST-OpenStack/horizon,Tesora/tesora-horizon,Frostman/eho-horizon,mandeepdhami/horizon,FNST-OpenStack/horizon,redhat-openstack/horizon,CiscoSystems/avos,izadorozhna/dashboard_integration_tests,pranavtendolkr/horizon,takeshineshiro/horizon,Dark-Hacker/horizon,JioCloud/horizon,kaiweifan/horizon,Metaswitch/horizon,zouyapeng/horizon,endorphinl/horizon-fork,CiscoSystems/avos,wangxiangyu/horizon,philoniare/horizon,rickerc/horizon_audit,ChameleonCloud/horizon,mrunge/horizon,bac/horizon,mdavid/horizon,henaras/horizon,agileblaze/OpenStackTwoFactorAuthentication,newrocknj/horizon,endorphinl/horizon-fork,aaronorosen/horizon-congress,CiscoSystems/avos,watonyweng/horizon,kaiweifan/horizon,maestro-hybrid-cloud/horizon,tuskar/tuskar-ui,j4/horizon,coreycb/horizon,froyobin/horizon,Solinea/horizon,j4/horizon,zouyapeng/horizon,Solinea/horizon,eayunstack/horizon,noironetworks/horizon,netscaler/horizon,dan1/horizon-proto,netscaler/horizon,xinwu/horizon,yanheven/console,liyitest/rr,CiscoSystems/horizon,davidcusatis/horizon,NeCTAR-RC/horizon,gochist/horizon,takeshineshiro/horizon,yeming233/horizon,flochaz/horizon,rdo-management/tuskar-ui,promptworks/horizon,FNST-OpenStack/horizon,aaronorosen/horizon-congress,openstack-ja/horizon,openstack/horizon,henaras/horizon,orbitfp7/horizon,Dark-Hacker/horizon,flochaz/horizon,Hodorable/0602,coreycb/horizon,tellesnobrega/horizon,Mirantis/mos-horizon,mandeepdhami/horizon,Dark-Hacker/horizon,endorphinl/horizon,gerrive/horizon,NeCTAR-RC/horizon,mandeepdhami/horizon,Tesora/tesora-horizon,Metaswitch/horizon,yanheven/console,agileblaze/OpenStackTwoFactorAuthentication,takeshineshiro/horizon,Mirantis/mos-horizon,gochist/horizon,Daniex/horizon,yanheven/console,RudoCris/horizon,RudoCris/horizon,sandvine/horizon,icloudrnd/automation_tools,aaronorosen/horizon-congress,ChameleonCloud/horizon,watonyweng/horizon,pranavtendolkr/horizon,doug-fish/horizon,mdavid/horizon,BiznetGIO/horizon,bac/horizon,bac/horizon,doug-fish/horizon,saydulk/horizon,karthik-suresh/horizon,NeCTAR-RC/horizon,saydulk/horizon,redhat-openstack/horizon,endorphinl/horizon,flochaz/horizon,mrunge/horizon,froyobin/horizon,philoniare/horizon,Daniex/horizon,netscaler/horizon,django-leonardo/horizon,kfox1111/horizon,rdo-management/tuskar-ui,Hodorable/0602,xme1226/horizon,Frostman/eho-horizon,mdavid/horizon,mrunge/openstack_horizon,doug-fish/horizon,henaras/horizon,VaneCloud/horizon,noironetworks/horizon,VaneCloud/horizon,icloudrnd/automation_tools,tsufiev/horizon,liyitest/rr,Solinea/horizon,ChameleonCloud/horizon,VaneCloud/horizon,endorphinl/horizon,doug-fish/horizon,wolverineav/horizon,Tesora/tesora-horizon,idjaw/horizon,icloudrnd/automation_tools,bigswitch/horizon,vladryk/horizon,mrunge/horizon_lib,ikargis/horizon_fod,mrunge/openstack_horizon,blueboxgroup/horizon,eayunstack/horizon,wangxiangyu/horizon,coreycb/horizon,RudoCris/horizon,promptworks/horizon,Metaswitch/horizon,redhat-cip/horizon,orbitfp7/horizon,blueboxgroup/horizon,henaras/horizon,NCI-Cloud/horizon,BiznetGIO/horizon,dan1/horizon-x509,anthonydillon/horizon,gerrive/horizon,yjxtogo/horizon,luhanhan/horizon,yjxtogo/horizon,CiscoSystems/horizon,JioCloud/horizon,RudoCris/horizon,pranavtendolkr/horizon,dan1/horizon-x509,anthonydillon/horizon,yjxtogo/horizon,rdo-management/tuskar-ui,damien-dg/horizon,openstack/horizon,CiscoSystems/horizon,kfox1111/horizon,bigswitch/horizon,rdo-management/tuskar-ui,zouyapeng/horizon,r-icarus/openstack_microserver,xinwu/horizon,Hodorable/0602,endorphinl/horizon-fork,nvoron23/avos,redhat-openstack/horizon,django-leonardo/horizon,philoniare/horizon,yeming233/horizon,tellesnobrega/horizon,rickerc/horizon_audit,newrocknj/horizon,gerrive/horizon,xinwu/horizon,dan1/horizon-proto,newrocknj/horizon,davidcusatis/horizon,karthik-suresh/horizon,yjxtogo/horizon,tqtran7/horizon,kfox1111/horizon,karthik-suresh/horizon,Daniex/horizon,xme1226/horizon,spring-week-topos/horizon-week,tqtran7/horizon,tanglei528/horizon,Frostman/eho-horizon,damien-dg/horizon,bigswitch/horizon,watonyweng/horizon,anthonydillon/horizon,openstack/horizon,CiscoSystems/avos,gochist/horizon,django-leonardo/horizon,eayunstack/horizon,ikargis/horizon_fod,tsufiev/horizon,dan1/horizon-proto,spring-week-topos/horizon-week,j4/horizon,wolverineav/horizon,mandeepdhami/horizon,dan1/horizon-proto,takeshineshiro/horizon,tellesnobrega/horizon,BiznetGIO/horizon,sandvine/horizon,BiznetGIO/horizon,newrocknj/horizon,davidcusatis/horizon,sandvine/horizon,bigswitch/horizon,ging/horizon,vladryk/horizon,redhat-openstack/horizon,endorphinl/horizon-fork,redhat-cip/horizon,philoniare/horizon,tqtran7/horizon,ging/horizon,tsufiev/horizon,watonyweng/horizon,dan1/horizon-x509,mrunge/openstack_horizon,r-icarus/openstack_microserver,blueboxgroup/horizon,xinwu/horizon,NeCTAR-RC/horizon,kaiweifan/horizon,zouyapeng/horizon,redhat-cip/horizon,agileblaze/OpenStackTwoFactorAuthentication,maestro-hybrid-cloud/horizon,blueboxgroup/horizon,noironetworks/horizon,NCI-Cloud/horizon,ChameleonCloud/horizon,nvoron23/avos,promptworks/horizon,FNST-OpenStack/horizon,anthonydillon/horizon,tsufiev/horizon,davidcusatis/horizon,wolverineav/horizon,Mirantis/mos-horizon,mrunge/horizon,saydulk/horizon,openstack/horizon,citrix-openstack-build/horizon,tanglei528/horizon,liyitest/rr,endorphinl/horizon,mrunge/horizon_lib,liyitest/rr,idjaw/horizon,citrix-openstack-build/horizon,vladryk/horizon,noironetworks/horizon,damien-dg/horizon,ikargis/horizon_fod,NCI-Cloud/horizon,flochaz/horizon,gerrive/horizon,wolverineav/horizon,r-icarus/openstack_microserver,Metaswitch/horizon,j4/horizon,redhat-cip/horizon,Daniex/horizon,luhanhan/horizon,mrunge/horizon_lib,izadorozhna/dashboard_integration_tests,idjaw/horizon,luhanhan/horizon,ging/horizon,pranavtendolkr/horizon,Dark-Hacker/horizon,karthik-suresh/horizon,django-leonardo/horizon,bac/horizon,Solinea/horizon,spring-week-topos/horizon-week,tanglei528/horizon,wangxiangyu/horizon,coreycb/horizon,idjaw/horizon,icloudrnd/automation_tools,orbitfp7/horizon,nvoron23/avos,agileblaze/OpenStackTwoFactorAuthentication,tuskar/tuskar-ui,ging/horizon,mdavid/horizon,yeming233/horizon,sandvine/horizon,luhanhan/horizon,vladryk/horizon,saydulk/horizon,yeming233/horizon,xme1226/horizon,kfox1111/horizon,openstack-ja/horizon,damien-dg/horizon,NCI-Cloud/horizon,tellesnobrega/horizon,froyobin/horizon,Hodorable/0602,wangxiangyu/horizon,citrix-openstack-build/horizon,maestro-hybrid-cloud/horizon
|
import copy
from django.utils.functional import LazyObject, empty
from .default import HORIZON_CONFIG as DEFAULT_CONFIG
class LazySettings(LazyObject):
def _setup(self, name=None):
from django.conf import settings
HORIZON_CONFIG = copy.copy(DEFAULT_CONFIG)
HORIZON_CONFIG.update(settings.HORIZON_CONFIG)
# Ensure we always have our exception configuration...
for exc_category in ['unauthorized', 'not_found', 'recoverable']:
if exc_category not in HORIZON_CONFIG['exceptions']:
default_exc_config = DEFAULT_CONFIG['exceptions'][exc_category]
HORIZON_CONFIG['exceptions'][exc_category] = default_exc_config
# Ensure our password validator always exists...
if 'regex' not in HORIZON_CONFIG['password_validator']:
default_pw_regex = DEFAULT_CONFIG['password_validator']['regex']
HORIZON_CONFIG['password_validator']['regex'] = default_pw_regex
if 'help_text' not in HORIZON_CONFIG['password_validator']:
default_pw_help = DEFAULT_CONFIG['password_validator']['help_text']
HORIZON_CONFIG['password_validator']['help_text'] = default_pw_help
self._wrapped = HORIZON_CONFIG
def __getitem__(self, name, fallback=None):
if self._wrapped is empty:
self._setup(name)
return self._wrapped.get(name, fallback)
HORIZON_CONFIG = LazySettings()
Fix circular dependencies in dashboard settings
Importing horizon.utils from dashboard local_settings.py to generate
SECRET_KEY results in a sequence of imports, and horizon.conf.default
module gets imported at some point. During initialization of default
HORIZON_CONFIG this module uses settings.LOGIN_REDIRECT_URL and
ugettext() call. Both of them need django settings to be ready to use,
therefore settings initialization starts again before it could finish.
Since Python processes module only when it is imported the first time,
this process stops, but the 'inner' settings object contains only
parameters that were set above the point of import of local_settings.
Therefore Django complains about missing SECRET_KEY when it processes
'inner' settings.
The fix moves the import of horizon.conf.default to
LazySetting._setup(). If keys of HORIZON_CONFIG obtained from
horizon.conf are not used within openstask_dashboard settings.py
or local_settings.py, the circular import won't happen.
Fixes bug #1154564
Change-Id: If63ab1920ecc8e646fd5b6cc52c106ae0876fa2d
|
import copy
from django.utils.functional import LazyObject, empty
class LazySettings(LazyObject):
def _setup(self, name=None):
from django.conf import settings
from .default import HORIZON_CONFIG as DEFAULT_CONFIG
HORIZON_CONFIG = copy.copy(DEFAULT_CONFIG)
HORIZON_CONFIG.update(settings.HORIZON_CONFIG)
# Ensure we always have our exception configuration...
for exc_category in ['unauthorized', 'not_found', 'recoverable']:
if exc_category not in HORIZON_CONFIG['exceptions']:
default_exc_config = DEFAULT_CONFIG['exceptions'][exc_category]
HORIZON_CONFIG['exceptions'][exc_category] = default_exc_config
# Ensure our password validator always exists...
if 'regex' not in HORIZON_CONFIG['password_validator']:
default_pw_regex = DEFAULT_CONFIG['password_validator']['regex']
HORIZON_CONFIG['password_validator']['regex'] = default_pw_regex
if 'help_text' not in HORIZON_CONFIG['password_validator']:
default_pw_help = DEFAULT_CONFIG['password_validator']['help_text']
HORIZON_CONFIG['password_validator']['help_text'] = default_pw_help
self._wrapped = HORIZON_CONFIG
def __getitem__(self, name, fallback=None):
if self._wrapped is empty:
self._setup(name)
return self._wrapped.get(name, fallback)
HORIZON_CONFIG = LazySettings()
|
<commit_before>import copy
from django.utils.functional import LazyObject, empty
from .default import HORIZON_CONFIG as DEFAULT_CONFIG
class LazySettings(LazyObject):
def _setup(self, name=None):
from django.conf import settings
HORIZON_CONFIG = copy.copy(DEFAULT_CONFIG)
HORIZON_CONFIG.update(settings.HORIZON_CONFIG)
# Ensure we always have our exception configuration...
for exc_category in ['unauthorized', 'not_found', 'recoverable']:
if exc_category not in HORIZON_CONFIG['exceptions']:
default_exc_config = DEFAULT_CONFIG['exceptions'][exc_category]
HORIZON_CONFIG['exceptions'][exc_category] = default_exc_config
# Ensure our password validator always exists...
if 'regex' not in HORIZON_CONFIG['password_validator']:
default_pw_regex = DEFAULT_CONFIG['password_validator']['regex']
HORIZON_CONFIG['password_validator']['regex'] = default_pw_regex
if 'help_text' not in HORIZON_CONFIG['password_validator']:
default_pw_help = DEFAULT_CONFIG['password_validator']['help_text']
HORIZON_CONFIG['password_validator']['help_text'] = default_pw_help
self._wrapped = HORIZON_CONFIG
def __getitem__(self, name, fallback=None):
if self._wrapped is empty:
self._setup(name)
return self._wrapped.get(name, fallback)
HORIZON_CONFIG = LazySettings()
<commit_msg>Fix circular dependencies in dashboard settings
Importing horizon.utils from dashboard local_settings.py to generate
SECRET_KEY results in a sequence of imports, and horizon.conf.default
module gets imported at some point. During initialization of default
HORIZON_CONFIG this module uses settings.LOGIN_REDIRECT_URL and
ugettext() call. Both of them need django settings to be ready to use,
therefore settings initialization starts again before it could finish.
Since Python processes module only when it is imported the first time,
this process stops, but the 'inner' settings object contains only
parameters that were set above the point of import of local_settings.
Therefore Django complains about missing SECRET_KEY when it processes
'inner' settings.
The fix moves the import of horizon.conf.default to
LazySetting._setup(). If keys of HORIZON_CONFIG obtained from
horizon.conf are not used within openstask_dashboard settings.py
or local_settings.py, the circular import won't happen.
Fixes bug #1154564
Change-Id: If63ab1920ecc8e646fd5b6cc52c106ae0876fa2d<commit_after>
|
import copy
from django.utils.functional import LazyObject, empty
class LazySettings(LazyObject):
def _setup(self, name=None):
from django.conf import settings
from .default import HORIZON_CONFIG as DEFAULT_CONFIG
HORIZON_CONFIG = copy.copy(DEFAULT_CONFIG)
HORIZON_CONFIG.update(settings.HORIZON_CONFIG)
# Ensure we always have our exception configuration...
for exc_category in ['unauthorized', 'not_found', 'recoverable']:
if exc_category not in HORIZON_CONFIG['exceptions']:
default_exc_config = DEFAULT_CONFIG['exceptions'][exc_category]
HORIZON_CONFIG['exceptions'][exc_category] = default_exc_config
# Ensure our password validator always exists...
if 'regex' not in HORIZON_CONFIG['password_validator']:
default_pw_regex = DEFAULT_CONFIG['password_validator']['regex']
HORIZON_CONFIG['password_validator']['regex'] = default_pw_regex
if 'help_text' not in HORIZON_CONFIG['password_validator']:
default_pw_help = DEFAULT_CONFIG['password_validator']['help_text']
HORIZON_CONFIG['password_validator']['help_text'] = default_pw_help
self._wrapped = HORIZON_CONFIG
def __getitem__(self, name, fallback=None):
if self._wrapped is empty:
self._setup(name)
return self._wrapped.get(name, fallback)
HORIZON_CONFIG = LazySettings()
|
import copy
from django.utils.functional import LazyObject, empty
from .default import HORIZON_CONFIG as DEFAULT_CONFIG
class LazySettings(LazyObject):
def _setup(self, name=None):
from django.conf import settings
HORIZON_CONFIG = copy.copy(DEFAULT_CONFIG)
HORIZON_CONFIG.update(settings.HORIZON_CONFIG)
# Ensure we always have our exception configuration...
for exc_category in ['unauthorized', 'not_found', 'recoverable']:
if exc_category not in HORIZON_CONFIG['exceptions']:
default_exc_config = DEFAULT_CONFIG['exceptions'][exc_category]
HORIZON_CONFIG['exceptions'][exc_category] = default_exc_config
# Ensure our password validator always exists...
if 'regex' not in HORIZON_CONFIG['password_validator']:
default_pw_regex = DEFAULT_CONFIG['password_validator']['regex']
HORIZON_CONFIG['password_validator']['regex'] = default_pw_regex
if 'help_text' not in HORIZON_CONFIG['password_validator']:
default_pw_help = DEFAULT_CONFIG['password_validator']['help_text']
HORIZON_CONFIG['password_validator']['help_text'] = default_pw_help
self._wrapped = HORIZON_CONFIG
def __getitem__(self, name, fallback=None):
if self._wrapped is empty:
self._setup(name)
return self._wrapped.get(name, fallback)
HORIZON_CONFIG = LazySettings()
Fix circular dependencies in dashboard settings
Importing horizon.utils from dashboard local_settings.py to generate
SECRET_KEY results in a sequence of imports, and horizon.conf.default
module gets imported at some point. During initialization of default
HORIZON_CONFIG this module uses settings.LOGIN_REDIRECT_URL and
ugettext() call. Both of them need django settings to be ready to use,
therefore settings initialization starts again before it could finish.
Since Python processes module only when it is imported the first time,
this process stops, but the 'inner' settings object contains only
parameters that were set above the point of import of local_settings.
Therefore Django complains about missing SECRET_KEY when it processes
'inner' settings.
The fix moves the import of horizon.conf.default to
LazySetting._setup(). If keys of HORIZON_CONFIG obtained from
horizon.conf are not used within openstask_dashboard settings.py
or local_settings.py, the circular import won't happen.
Fixes bug #1154564
Change-Id: If63ab1920ecc8e646fd5b6cc52c106ae0876fa2dimport copy
from django.utils.functional import LazyObject, empty
class LazySettings(LazyObject):
def _setup(self, name=None):
from django.conf import settings
from .default import HORIZON_CONFIG as DEFAULT_CONFIG
HORIZON_CONFIG = copy.copy(DEFAULT_CONFIG)
HORIZON_CONFIG.update(settings.HORIZON_CONFIG)
# Ensure we always have our exception configuration...
for exc_category in ['unauthorized', 'not_found', 'recoverable']:
if exc_category not in HORIZON_CONFIG['exceptions']:
default_exc_config = DEFAULT_CONFIG['exceptions'][exc_category]
HORIZON_CONFIG['exceptions'][exc_category] = default_exc_config
# Ensure our password validator always exists...
if 'regex' not in HORIZON_CONFIG['password_validator']:
default_pw_regex = DEFAULT_CONFIG['password_validator']['regex']
HORIZON_CONFIG['password_validator']['regex'] = default_pw_regex
if 'help_text' not in HORIZON_CONFIG['password_validator']:
default_pw_help = DEFAULT_CONFIG['password_validator']['help_text']
HORIZON_CONFIG['password_validator']['help_text'] = default_pw_help
self._wrapped = HORIZON_CONFIG
def __getitem__(self, name, fallback=None):
if self._wrapped is empty:
self._setup(name)
return self._wrapped.get(name, fallback)
HORIZON_CONFIG = LazySettings()
|
<commit_before>import copy
from django.utils.functional import LazyObject, empty
from .default import HORIZON_CONFIG as DEFAULT_CONFIG
class LazySettings(LazyObject):
def _setup(self, name=None):
from django.conf import settings
HORIZON_CONFIG = copy.copy(DEFAULT_CONFIG)
HORIZON_CONFIG.update(settings.HORIZON_CONFIG)
# Ensure we always have our exception configuration...
for exc_category in ['unauthorized', 'not_found', 'recoverable']:
if exc_category not in HORIZON_CONFIG['exceptions']:
default_exc_config = DEFAULT_CONFIG['exceptions'][exc_category]
HORIZON_CONFIG['exceptions'][exc_category] = default_exc_config
# Ensure our password validator always exists...
if 'regex' not in HORIZON_CONFIG['password_validator']:
default_pw_regex = DEFAULT_CONFIG['password_validator']['regex']
HORIZON_CONFIG['password_validator']['regex'] = default_pw_regex
if 'help_text' not in HORIZON_CONFIG['password_validator']:
default_pw_help = DEFAULT_CONFIG['password_validator']['help_text']
HORIZON_CONFIG['password_validator']['help_text'] = default_pw_help
self._wrapped = HORIZON_CONFIG
def __getitem__(self, name, fallback=None):
if self._wrapped is empty:
self._setup(name)
return self._wrapped.get(name, fallback)
HORIZON_CONFIG = LazySettings()
<commit_msg>Fix circular dependencies in dashboard settings
Importing horizon.utils from dashboard local_settings.py to generate
SECRET_KEY results in a sequence of imports, and horizon.conf.default
module gets imported at some point. During initialization of default
HORIZON_CONFIG this module uses settings.LOGIN_REDIRECT_URL and
ugettext() call. Both of them need django settings to be ready to use,
therefore settings initialization starts again before it could finish.
Since Python processes module only when it is imported the first time,
this process stops, but the 'inner' settings object contains only
parameters that were set above the point of import of local_settings.
Therefore Django complains about missing SECRET_KEY when it processes
'inner' settings.
The fix moves the import of horizon.conf.default to
LazySetting._setup(). If keys of HORIZON_CONFIG obtained from
horizon.conf are not used within openstask_dashboard settings.py
or local_settings.py, the circular import won't happen.
Fixes bug #1154564
Change-Id: If63ab1920ecc8e646fd5b6cc52c106ae0876fa2d<commit_after>import copy
from django.utils.functional import LazyObject, empty
class LazySettings(LazyObject):
def _setup(self, name=None):
from django.conf import settings
from .default import HORIZON_CONFIG as DEFAULT_CONFIG
HORIZON_CONFIG = copy.copy(DEFAULT_CONFIG)
HORIZON_CONFIG.update(settings.HORIZON_CONFIG)
# Ensure we always have our exception configuration...
for exc_category in ['unauthorized', 'not_found', 'recoverable']:
if exc_category not in HORIZON_CONFIG['exceptions']:
default_exc_config = DEFAULT_CONFIG['exceptions'][exc_category]
HORIZON_CONFIG['exceptions'][exc_category] = default_exc_config
# Ensure our password validator always exists...
if 'regex' not in HORIZON_CONFIG['password_validator']:
default_pw_regex = DEFAULT_CONFIG['password_validator']['regex']
HORIZON_CONFIG['password_validator']['regex'] = default_pw_regex
if 'help_text' not in HORIZON_CONFIG['password_validator']:
default_pw_help = DEFAULT_CONFIG['password_validator']['help_text']
HORIZON_CONFIG['password_validator']['help_text'] = default_pw_help
self._wrapped = HORIZON_CONFIG
def __getitem__(self, name, fallback=None):
if self._wrapped is empty:
self._setup(name)
return self._wrapped.get(name, fallback)
HORIZON_CONFIG = LazySettings()
|
ca6de0795babfd911c49a0f66ca27cd063faf1f1
|
tests/test_tool.py
|
tests/test_tool.py
|
import pytest
import sys
import binascii
import base64
from unittest import mock
from io import StringIO, BytesIO, TextIOWrapper
import cbor2.tool
def test_stdin(monkeypatch, tmpdir):
f = tmpdir.join('outfile')
argv = ['-o', str(f)]
inbuf = TextIOWrapper(BytesIO(binascii.unhexlify('02')))
with monkeypatch.context() as m:
m.setattr('sys.argv', [''] + argv)
m.setattr('sys.stdin', inbuf)
cbor2.tool.main()
assert f.read() == '2\n'
def test_readfrom(monkeypatch, tmpdir):
f = tmpdir.join('infile')
outfile = tmpdir.join('outfile')
f.write_binary(binascii.unhexlify('02'))
argv = ['-o', str(outfile), str(f)]
with monkeypatch.context() as m:
m.setattr('sys.argv', [''] + argv)
cbor2.tool.main()
assert outfile.read() == '2\n'
|
Write a test for the command line tool
|
Write a test for the command line tool
|
Python
|
mit
|
agronholm/cbor2,agronholm/cbor2,agronholm/cbor2
|
Write a test for the command line tool
|
import pytest
import sys
import binascii
import base64
from unittest import mock
from io import StringIO, BytesIO, TextIOWrapper
import cbor2.tool
def test_stdin(monkeypatch, tmpdir):
f = tmpdir.join('outfile')
argv = ['-o', str(f)]
inbuf = TextIOWrapper(BytesIO(binascii.unhexlify('02')))
with monkeypatch.context() as m:
m.setattr('sys.argv', [''] + argv)
m.setattr('sys.stdin', inbuf)
cbor2.tool.main()
assert f.read() == '2\n'
def test_readfrom(monkeypatch, tmpdir):
f = tmpdir.join('infile')
outfile = tmpdir.join('outfile')
f.write_binary(binascii.unhexlify('02'))
argv = ['-o', str(outfile), str(f)]
with monkeypatch.context() as m:
m.setattr('sys.argv', [''] + argv)
cbor2.tool.main()
assert outfile.read() == '2\n'
|
<commit_before><commit_msg>Write a test for the command line tool<commit_after>
|
import pytest
import sys
import binascii
import base64
from unittest import mock
from io import StringIO, BytesIO, TextIOWrapper
import cbor2.tool
def test_stdin(monkeypatch, tmpdir):
f = tmpdir.join('outfile')
argv = ['-o', str(f)]
inbuf = TextIOWrapper(BytesIO(binascii.unhexlify('02')))
with monkeypatch.context() as m:
m.setattr('sys.argv', [''] + argv)
m.setattr('sys.stdin', inbuf)
cbor2.tool.main()
assert f.read() == '2\n'
def test_readfrom(monkeypatch, tmpdir):
f = tmpdir.join('infile')
outfile = tmpdir.join('outfile')
f.write_binary(binascii.unhexlify('02'))
argv = ['-o', str(outfile), str(f)]
with monkeypatch.context() as m:
m.setattr('sys.argv', [''] + argv)
cbor2.tool.main()
assert outfile.read() == '2\n'
|
Write a test for the command line toolimport pytest
import sys
import binascii
import base64
from unittest import mock
from io import StringIO, BytesIO, TextIOWrapper
import cbor2.tool
def test_stdin(monkeypatch, tmpdir):
f = tmpdir.join('outfile')
argv = ['-o', str(f)]
inbuf = TextIOWrapper(BytesIO(binascii.unhexlify('02')))
with monkeypatch.context() as m:
m.setattr('sys.argv', [''] + argv)
m.setattr('sys.stdin', inbuf)
cbor2.tool.main()
assert f.read() == '2\n'
def test_readfrom(monkeypatch, tmpdir):
f = tmpdir.join('infile')
outfile = tmpdir.join('outfile')
f.write_binary(binascii.unhexlify('02'))
argv = ['-o', str(outfile), str(f)]
with monkeypatch.context() as m:
m.setattr('sys.argv', [''] + argv)
cbor2.tool.main()
assert outfile.read() == '2\n'
|
<commit_before><commit_msg>Write a test for the command line tool<commit_after>import pytest
import sys
import binascii
import base64
from unittest import mock
from io import StringIO, BytesIO, TextIOWrapper
import cbor2.tool
def test_stdin(monkeypatch, tmpdir):
f = tmpdir.join('outfile')
argv = ['-o', str(f)]
inbuf = TextIOWrapper(BytesIO(binascii.unhexlify('02')))
with monkeypatch.context() as m:
m.setattr('sys.argv', [''] + argv)
m.setattr('sys.stdin', inbuf)
cbor2.tool.main()
assert f.read() == '2\n'
def test_readfrom(monkeypatch, tmpdir):
f = tmpdir.join('infile')
outfile = tmpdir.join('outfile')
f.write_binary(binascii.unhexlify('02'))
argv = ['-o', str(outfile), str(f)]
with monkeypatch.context() as m:
m.setattr('sys.argv', [''] + argv)
cbor2.tool.main()
assert outfile.read() == '2\n'
|
|
a6db84b191f5df54bab66615d48d9d0ade903229
|
website/addons/dropbox/tests/webtest_tests.py
|
website/addons/dropbox/tests/webtest_tests.py
|
# -*- coding: utf-8 -*-
import unittest
from nose.tools import * # PEP8 asserts
from webtest_plus import TestApp
from website.app import init_app
from website.util import web_url_for, api_url_for
from website.project.model import ensure_schemas
from tests.base import DbTestCase
from tests.factories import AuthUserFactory
app = init_app(set_backends=False, routes=True)
class TestDropboxIntegration(DbTestCase):
def setUp(self):
ensure_schemas()
self.app = TestApp(app)
self.user = AuthUserFactory()
# User is logged in
self.app.authenticate(*self.user.auth)
def test_user_can_create_access_token_on_settings_page(self):
with app.test_request_context():
url = web_url_for('profile_settings')
res = self.app.get(url)
assert_not_in('Create Access Token', res)
form = res.forms['selectAddonsForm']
form['dropbox'] = True
res = form.submit()
assert_equal(res.status_code, 200)
assert_in('Create Access Token', res)
|
Add failing webtest for user settings page
|
Add failing webtest for user settings page
|
Python
|
apache-2.0
|
amyshi188/osf.io,asanfilippo7/osf.io,samchrisinger/osf.io,jnayak1/osf.io,brandonPurvis/osf.io,mluo613/osf.io,felliott/osf.io,kch8qx/osf.io,mfraezz/osf.io,caseyrollins/osf.io,dplorimer/osf,jnayak1/osf.io,asanfilippo7/osf.io,dplorimer/osf,binoculars/osf.io,Nesiehr/osf.io,TomBaxter/osf.io,doublebits/osf.io,billyhunt/osf.io,ckc6cz/osf.io,alexschiller/osf.io,zkraime/osf.io,Nesiehr/osf.io,Nesiehr/osf.io,HalcyonChimera/osf.io,chennan47/osf.io,acshi/osf.io,MerlinZhang/osf.io,hmoco/osf.io,HalcyonChimera/osf.io,acshi/osf.io,TomBaxter/osf.io,rdhyee/osf.io,caseyrygt/osf.io,Ghalko/osf.io,wearpants/osf.io,emetsger/osf.io,alexschiller/osf.io,bdyetton/prettychart,cosenal/osf.io,kch8qx/osf.io,cldershem/osf.io,ticklemepierce/osf.io,SSJohns/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,mattclark/osf.io,laurenrevere/osf.io,brianjgeiger/osf.io,revanthkolli/osf.io,fabianvf/osf.io,cslzchen/osf.io,caneruguz/osf.io,jeffreyliu3230/osf.io,erinspace/osf.io,doublebits/osf.io,jeffreyliu3230/osf.io,samanehsan/osf.io,KAsante95/osf.io,GaryKriebel/osf.io,kwierman/osf.io,jmcarp/osf.io,leb2dg/osf.io,kwierman/osf.io,samanehsan/osf.io,cwisecarver/osf.io,asanfilippo7/osf.io,kushG/osf.io,amyshi188/osf.io,jinluyuan/osf.io,crcresearch/osf.io,caseyrygt/osf.io,jmcarp/osf.io,abought/osf.io,MerlinZhang/osf.io,cldershem/osf.io,mluke93/osf.io,sbt9uc/osf.io,fabianvf/osf.io,CenterForOpenScience/osf.io,barbour-em/osf.io,sbt9uc/osf.io,SSJohns/osf.io,haoyuchen1992/osf.io,TomHeatwole/osf.io,lamdnhan/osf.io,ckc6cz/osf.io,crcresearch/osf.io,lyndsysimon/osf.io,njantrania/osf.io,mfraezz/osf.io,emetsger/osf.io,acshi/osf.io,reinaH/osf.io,DanielSBrown/osf.io,KAsante95/osf.io,doublebits/osf.io,laurenrevere/osf.io,adlius/osf.io,TomHeatwole/osf.io,abought/osf.io,caseyrollins/osf.io,petermalcolm/osf.io,pattisdr/osf.io,cslzchen/osf.io,haoyuchen1992/osf.io,billyhunt/osf.io,sloria/osf.io,aaxelb/osf.io,danielneis/osf.io,hmoco/osf.io,mluke93/osf.io,sloria/osf.io,ckc6cz/osf.io,DanielSBrown/osf.io,samchrisinger/osf.io,lyndsysimon/osf.io,bdyetton/prettychart,Johnetordoff/osf.io,TomHeatwole/osf.io,billyhunt/osf.io,kushG/osf.io,amyshi188/osf.io,AndrewSallans/osf.io,rdhyee/osf.io,jolene-esposito/osf.io,TomHeatwole/osf.io,zamattiac/osf.io,GaryKriebel/osf.io,HarryRybacki/osf.io,saradbowman/osf.io,jinluyuan/osf.io,petermalcolm/osf.io,DanielSBrown/osf.io,ZobairAlijan/osf.io,samchrisinger/osf.io,zkraime/osf.io,brandonPurvis/osf.io,adlius/osf.io,acshi/osf.io,chrisseto/osf.io,ZobairAlijan/osf.io,chrisseto/osf.io,caneruguz/osf.io,ticklemepierce/osf.io,zamattiac/osf.io,CenterForOpenScience/osf.io,Ghalko/osf.io,wearpants/osf.io,jinluyuan/osf.io,njantrania/osf.io,reinaH/osf.io,billyhunt/osf.io,sloria/osf.io,binoculars/osf.io,zachjanicki/osf.io,cwisecarver/osf.io,CenterForOpenScience/osf.io,leb2dg/osf.io,SSJohns/osf.io,cslzchen/osf.io,bdyetton/prettychart,mluo613/osf.io,cslzchen/osf.io,caseyrollins/osf.io,mattclark/osf.io,icereval/osf.io,kushG/osf.io,himanshuo/osf.io,fabianvf/osf.io,pattisdr/osf.io,reinaH/osf.io,RomanZWang/osf.io,ticklemepierce/osf.io,caseyrygt/osf.io,monikagrabowska/osf.io,jeffreyliu3230/osf.io,arpitar/osf.io,kch8qx/osf.io,Nesiehr/osf.io,arpitar/osf.io,jnayak1/osf.io,felliott/osf.io,hmoco/osf.io,doublebits/osf.io,wearpants/osf.io,sbt9uc/osf.io,rdhyee/osf.io,aaxelb/osf.io,jolene-esposito/osf.io,saradbowman/osf.io,baylee-d/osf.io,laurenrevere/osf.io,alexschiller/osf.io,CenterForOpenScience/osf.io,lamdnhan/osf.io,samanehsan/osf.io,monikagrabowska/osf.io,HarryRybacki/osf.io,RomanZWang/osf.io,danielneis/osf.io,lamdnhan/osf.io,acshi/osf.io,doublebits/osf.io,ZobairAlijan/osf.io,monikagrabowska/osf.io,leb2dg/osf.io,haoyuchen1992/osf.io,kwierman/osf.io,cosenal/osf.io,mluo613/osf.io,arpitar/osf.io,baylee-d/osf.io,caseyrygt/osf.io,monikagrabowska/osf.io,samchrisinger/osf.io,hmoco/osf.io,HarryRybacki/osf.io,zachjanicki/osf.io,GageGaskins/osf.io,KAsante95/osf.io,adlius/osf.io,jnayak1/osf.io,GageGaskins/osf.io,brianjgeiger/osf.io,danielneis/osf.io,adlius/osf.io,aaxelb/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,mluke93/osf.io,MerlinZhang/osf.io,Ghalko/osf.io,chrisseto/osf.io,brandonPurvis/osf.io,revanthkolli/osf.io,samanehsan/osf.io,zachjanicki/osf.io,cosenal/osf.io,jmcarp/osf.io,cldershem/osf.io,erinspace/osf.io,lyndsysimon/osf.io,aaxelb/osf.io,RomanZWang/osf.io,petermalcolm/osf.io,erinspace/osf.io,MerlinZhang/osf.io,icereval/osf.io,reinaH/osf.io,danielneis/osf.io,zamattiac/osf.io,revanthkolli/osf.io,GageGaskins/osf.io,zachjanicki/osf.io,binoculars/osf.io,HarryRybacki/osf.io,rdhyee/osf.io,petermalcolm/osf.io,wearpants/osf.io,alexschiller/osf.io,ckc6cz/osf.io,felliott/osf.io,crcresearch/osf.io,GageGaskins/osf.io,dplorimer/osf,billyhunt/osf.io,Johnetordoff/osf.io,revanthkolli/osf.io,jolene-esposito/osf.io,ZobairAlijan/osf.io,AndrewSallans/osf.io,cwisecarver/osf.io,chennan47/osf.io,himanshuo/osf.io,sbt9uc/osf.io,GaryKriebel/osf.io,himanshuo/osf.io,kushG/osf.io,RomanZWang/osf.io,GaryKriebel/osf.io,jolene-esposito/osf.io,DanielSBrown/osf.io,lamdnhan/osf.io,jinluyuan/osf.io,monikagrabowska/osf.io,abought/osf.io,ticklemepierce/osf.io,himanshuo/osf.io,jeffreyliu3230/osf.io,KAsante95/osf.io,chennan47/osf.io,zamattiac/osf.io,cldershem/osf.io,icereval/osf.io,bdyetton/prettychart,KAsante95/osf.io,mluo613/osf.io,Johnetordoff/osf.io,njantrania/osf.io,chrisseto/osf.io,njantrania/osf.io,felliott/osf.io,kch8qx/osf.io,mattclark/osf.io,brandonPurvis/osf.io,haoyuchen1992/osf.io,brandonPurvis/osf.io,GageGaskins/osf.io,HalcyonChimera/osf.io,caneruguz/osf.io,kwierman/osf.io,RomanZWang/osf.io,barbour-em/osf.io,mluke93/osf.io,caneruguz/osf.io,jmcarp/osf.io,pattisdr/osf.io,mluo613/osf.io,Ghalko/osf.io,SSJohns/osf.io,arpitar/osf.io,zkraime/osf.io,brianjgeiger/osf.io,TomBaxter/osf.io,dplorimer/osf,baylee-d/osf.io,cosenal/osf.io,emetsger/osf.io,barbour-em/osf.io,Johnetordoff/osf.io,zkraime/osf.io,abought/osf.io,leb2dg/osf.io,asanfilippo7/osf.io,lyndsysimon/osf.io,barbour-em/osf.io,cwisecarver/osf.io,amyshi188/osf.io,alexschiller/osf.io,fabianvf/osf.io,emetsger/osf.io,kch8qx/osf.io
|
Add failing webtest for user settings page
|
# -*- coding: utf-8 -*-
import unittest
from nose.tools import * # PEP8 asserts
from webtest_plus import TestApp
from website.app import init_app
from website.util import web_url_for, api_url_for
from website.project.model import ensure_schemas
from tests.base import DbTestCase
from tests.factories import AuthUserFactory
app = init_app(set_backends=False, routes=True)
class TestDropboxIntegration(DbTestCase):
def setUp(self):
ensure_schemas()
self.app = TestApp(app)
self.user = AuthUserFactory()
# User is logged in
self.app.authenticate(*self.user.auth)
def test_user_can_create_access_token_on_settings_page(self):
with app.test_request_context():
url = web_url_for('profile_settings')
res = self.app.get(url)
assert_not_in('Create Access Token', res)
form = res.forms['selectAddonsForm']
form['dropbox'] = True
res = form.submit()
assert_equal(res.status_code, 200)
assert_in('Create Access Token', res)
|
<commit_before><commit_msg>Add failing webtest for user settings page<commit_after>
|
# -*- coding: utf-8 -*-
import unittest
from nose.tools import * # PEP8 asserts
from webtest_plus import TestApp
from website.app import init_app
from website.util import web_url_for, api_url_for
from website.project.model import ensure_schemas
from tests.base import DbTestCase
from tests.factories import AuthUserFactory
app = init_app(set_backends=False, routes=True)
class TestDropboxIntegration(DbTestCase):
def setUp(self):
ensure_schemas()
self.app = TestApp(app)
self.user = AuthUserFactory()
# User is logged in
self.app.authenticate(*self.user.auth)
def test_user_can_create_access_token_on_settings_page(self):
with app.test_request_context():
url = web_url_for('profile_settings')
res = self.app.get(url)
assert_not_in('Create Access Token', res)
form = res.forms['selectAddonsForm']
form['dropbox'] = True
res = form.submit()
assert_equal(res.status_code, 200)
assert_in('Create Access Token', res)
|
Add failing webtest for user settings page# -*- coding: utf-8 -*-
import unittest
from nose.tools import * # PEP8 asserts
from webtest_plus import TestApp
from website.app import init_app
from website.util import web_url_for, api_url_for
from website.project.model import ensure_schemas
from tests.base import DbTestCase
from tests.factories import AuthUserFactory
app = init_app(set_backends=False, routes=True)
class TestDropboxIntegration(DbTestCase):
def setUp(self):
ensure_schemas()
self.app = TestApp(app)
self.user = AuthUserFactory()
# User is logged in
self.app.authenticate(*self.user.auth)
def test_user_can_create_access_token_on_settings_page(self):
with app.test_request_context():
url = web_url_for('profile_settings')
res = self.app.get(url)
assert_not_in('Create Access Token', res)
form = res.forms['selectAddonsForm']
form['dropbox'] = True
res = form.submit()
assert_equal(res.status_code, 200)
assert_in('Create Access Token', res)
|
<commit_before><commit_msg>Add failing webtest for user settings page<commit_after># -*- coding: utf-8 -*-
import unittest
from nose.tools import * # PEP8 asserts
from webtest_plus import TestApp
from website.app import init_app
from website.util import web_url_for, api_url_for
from website.project.model import ensure_schemas
from tests.base import DbTestCase
from tests.factories import AuthUserFactory
app = init_app(set_backends=False, routes=True)
class TestDropboxIntegration(DbTestCase):
def setUp(self):
ensure_schemas()
self.app = TestApp(app)
self.user = AuthUserFactory()
# User is logged in
self.app.authenticate(*self.user.auth)
def test_user_can_create_access_token_on_settings_page(self):
with app.test_request_context():
url = web_url_for('profile_settings')
res = self.app.get(url)
assert_not_in('Create Access Token', res)
form = res.forms['selectAddonsForm']
form['dropbox'] = True
res = form.submit()
assert_equal(res.status_code, 200)
assert_in('Create Access Token', res)
|
|
81cba73a64e72fc10170cd8a5082a109037ad27e
|
examples/service/test_hello_service.py
|
examples/service/test_hello_service.py
|
from pyon.util.int_test import IonIntegrationTestCase
from interface.services.examples.hello.ihello_service import HelloServiceClient
class TestHelloService(IonIntegrationTestCase):
def setUp(self):
self._start_container()
self.container.start_rel_from_url('res/deploy/examples/hello.yml')
self.hsc = HelloServiceClient()
def test_hello(self):
ret = self.hsc.hello("emm")
self.assertEquals(ret, "BACK:emm")
|
Add simple examples test for HelloService RPC
|
Add simple examples test for HelloService RPC
|
Python
|
bsd-2-clause
|
mkl-/scioncc,scionrep/scioncc,scionrep/scioncc,ooici/pyon,crchemist/scioncc,crchemist/scioncc,mkl-/scioncc,crchemist/scioncc,scionrep/scioncc,mkl-/scioncc,ooici/pyon
|
Add simple examples test for HelloService RPC
|
from pyon.util.int_test import IonIntegrationTestCase
from interface.services.examples.hello.ihello_service import HelloServiceClient
class TestHelloService(IonIntegrationTestCase):
def setUp(self):
self._start_container()
self.container.start_rel_from_url('res/deploy/examples/hello.yml')
self.hsc = HelloServiceClient()
def test_hello(self):
ret = self.hsc.hello("emm")
self.assertEquals(ret, "BACK:emm")
|
<commit_before><commit_msg>Add simple examples test for HelloService RPC<commit_after>
|
from pyon.util.int_test import IonIntegrationTestCase
from interface.services.examples.hello.ihello_service import HelloServiceClient
class TestHelloService(IonIntegrationTestCase):
def setUp(self):
self._start_container()
self.container.start_rel_from_url('res/deploy/examples/hello.yml')
self.hsc = HelloServiceClient()
def test_hello(self):
ret = self.hsc.hello("emm")
self.assertEquals(ret, "BACK:emm")
|
Add simple examples test for HelloService RPCfrom pyon.util.int_test import IonIntegrationTestCase
from interface.services.examples.hello.ihello_service import HelloServiceClient
class TestHelloService(IonIntegrationTestCase):
def setUp(self):
self._start_container()
self.container.start_rel_from_url('res/deploy/examples/hello.yml')
self.hsc = HelloServiceClient()
def test_hello(self):
ret = self.hsc.hello("emm")
self.assertEquals(ret, "BACK:emm")
|
<commit_before><commit_msg>Add simple examples test for HelloService RPC<commit_after>from pyon.util.int_test import IonIntegrationTestCase
from interface.services.examples.hello.ihello_service import HelloServiceClient
class TestHelloService(IonIntegrationTestCase):
def setUp(self):
self._start_container()
self.container.start_rel_from_url('res/deploy/examples/hello.yml')
self.hsc = HelloServiceClient()
def test_hello(self):
ret = self.hsc.hello("emm")
self.assertEquals(ret, "BACK:emm")
|
|
d99dd117c41e850781056371043ef7de546f0c18
|
test_vpoker.py
|
test_vpoker.py
|
# Copyright (c) 2016 Kirill 'Kolyat' Kiselnikov
# This file is the part of vpoker, released under modified MIT license
# See the file LICENSE.txt included in this distribution
"""
Unit tests for main module
"""
import unittest
from vpoker import CARD_BACKGROUND_HEIGHT
from vpoker import SCREEN_WIDTH
from vpoker import Card
class TestCard(unittest.TestCase):
"""Tests for Card class"""
def test_init(self):
"""Tests for main class constructor"""
# Positive test
card = Card(1)
self.assertEqual(card.centerx, 1)
self.assertEqual(card.centery, int(CARD_BACKGROUND_HEIGHT/2))
self.assertEqual(card.active, False)
self.assertEqual(card.back, True)
self.assertEqual(card.suit, '')
self.assertEqual(card.rank, '')
self.assertEqual(card.held, False)
# Negative scenarios
card = Card(-1)
self.assertEqual(card.centerx, 0)
card = Card(0)
self.assertEqual(card.centerx, 0)
card = Card(10.5)
self.assertEqual(card.centerx, int(10.5))
card = Card(SCREEN_WIDTH-1)
self.assertEqual(card.centerx, SCREEN_WIDTH-1)
card = Card(SCREEN_WIDTH)
self.assertEqual(card.centerx, SCREEN_WIDTH)
card = Card(SCREEN_WIDTH+1)
self.assertEqual(card.centerx, SCREEN_WIDTH)
card = Card()
self.assertEqual(card.centerx, 0)
card = Card('123')
self.assertEqual(card.centerx, 0)
def test_set_card(self):
"""Test for set_card() method"""
card = Card()
# Positive test
card.set_card(('S', '10'))
self.assertEqual(card.suit, 'S')
self.assertEqual(card.rank, '10')
self.assertEqual(card.back, False)
# Negative tests
self.assertRaises(TypeError, card.set_card, {'2', '3'})
self.assertRaises(ValueError, card.set_card, ('S', '1', '0'))
self.assertRaises(KeyError, card.set_card, ('A', '10'))
self.assertRaises(KeyError, card.set_card, ('S', '1'))
self.assertRaises(KeyError, card.set_card, ('X', 'V'))
if __name__ == '__main__':
unittest.main()
|
Add file with tests for main module
|
Add file with tests for main module
|
Python
|
mit
|
kolyat/vpoker
|
Add file with tests for main module
|
# Copyright (c) 2016 Kirill 'Kolyat' Kiselnikov
# This file is the part of vpoker, released under modified MIT license
# See the file LICENSE.txt included in this distribution
"""
Unit tests for main module
"""
import unittest
from vpoker import CARD_BACKGROUND_HEIGHT
from vpoker import SCREEN_WIDTH
from vpoker import Card
class TestCard(unittest.TestCase):
"""Tests for Card class"""
def test_init(self):
"""Tests for main class constructor"""
# Positive test
card = Card(1)
self.assertEqual(card.centerx, 1)
self.assertEqual(card.centery, int(CARD_BACKGROUND_HEIGHT/2))
self.assertEqual(card.active, False)
self.assertEqual(card.back, True)
self.assertEqual(card.suit, '')
self.assertEqual(card.rank, '')
self.assertEqual(card.held, False)
# Negative scenarios
card = Card(-1)
self.assertEqual(card.centerx, 0)
card = Card(0)
self.assertEqual(card.centerx, 0)
card = Card(10.5)
self.assertEqual(card.centerx, int(10.5))
card = Card(SCREEN_WIDTH-1)
self.assertEqual(card.centerx, SCREEN_WIDTH-1)
card = Card(SCREEN_WIDTH)
self.assertEqual(card.centerx, SCREEN_WIDTH)
card = Card(SCREEN_WIDTH+1)
self.assertEqual(card.centerx, SCREEN_WIDTH)
card = Card()
self.assertEqual(card.centerx, 0)
card = Card('123')
self.assertEqual(card.centerx, 0)
def test_set_card(self):
"""Test for set_card() method"""
card = Card()
# Positive test
card.set_card(('S', '10'))
self.assertEqual(card.suit, 'S')
self.assertEqual(card.rank, '10')
self.assertEqual(card.back, False)
# Negative tests
self.assertRaises(TypeError, card.set_card, {'2', '3'})
self.assertRaises(ValueError, card.set_card, ('S', '1', '0'))
self.assertRaises(KeyError, card.set_card, ('A', '10'))
self.assertRaises(KeyError, card.set_card, ('S', '1'))
self.assertRaises(KeyError, card.set_card, ('X', 'V'))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add file with tests for main module<commit_after>
|
# Copyright (c) 2016 Kirill 'Kolyat' Kiselnikov
# This file is the part of vpoker, released under modified MIT license
# See the file LICENSE.txt included in this distribution
"""
Unit tests for main module
"""
import unittest
from vpoker import CARD_BACKGROUND_HEIGHT
from vpoker import SCREEN_WIDTH
from vpoker import Card
class TestCard(unittest.TestCase):
"""Tests for Card class"""
def test_init(self):
"""Tests for main class constructor"""
# Positive test
card = Card(1)
self.assertEqual(card.centerx, 1)
self.assertEqual(card.centery, int(CARD_BACKGROUND_HEIGHT/2))
self.assertEqual(card.active, False)
self.assertEqual(card.back, True)
self.assertEqual(card.suit, '')
self.assertEqual(card.rank, '')
self.assertEqual(card.held, False)
# Negative scenarios
card = Card(-1)
self.assertEqual(card.centerx, 0)
card = Card(0)
self.assertEqual(card.centerx, 0)
card = Card(10.5)
self.assertEqual(card.centerx, int(10.5))
card = Card(SCREEN_WIDTH-1)
self.assertEqual(card.centerx, SCREEN_WIDTH-1)
card = Card(SCREEN_WIDTH)
self.assertEqual(card.centerx, SCREEN_WIDTH)
card = Card(SCREEN_WIDTH+1)
self.assertEqual(card.centerx, SCREEN_WIDTH)
card = Card()
self.assertEqual(card.centerx, 0)
card = Card('123')
self.assertEqual(card.centerx, 0)
def test_set_card(self):
"""Test for set_card() method"""
card = Card()
# Positive test
card.set_card(('S', '10'))
self.assertEqual(card.suit, 'S')
self.assertEqual(card.rank, '10')
self.assertEqual(card.back, False)
# Negative tests
self.assertRaises(TypeError, card.set_card, {'2', '3'})
self.assertRaises(ValueError, card.set_card, ('S', '1', '0'))
self.assertRaises(KeyError, card.set_card, ('A', '10'))
self.assertRaises(KeyError, card.set_card, ('S', '1'))
self.assertRaises(KeyError, card.set_card, ('X', 'V'))
if __name__ == '__main__':
unittest.main()
|
Add file with tests for main module# Copyright (c) 2016 Kirill 'Kolyat' Kiselnikov
# This file is the part of vpoker, released under modified MIT license
# See the file LICENSE.txt included in this distribution
"""
Unit tests for main module
"""
import unittest
from vpoker import CARD_BACKGROUND_HEIGHT
from vpoker import SCREEN_WIDTH
from vpoker import Card
class TestCard(unittest.TestCase):
"""Tests for Card class"""
def test_init(self):
"""Tests for main class constructor"""
# Positive test
card = Card(1)
self.assertEqual(card.centerx, 1)
self.assertEqual(card.centery, int(CARD_BACKGROUND_HEIGHT/2))
self.assertEqual(card.active, False)
self.assertEqual(card.back, True)
self.assertEqual(card.suit, '')
self.assertEqual(card.rank, '')
self.assertEqual(card.held, False)
# Negative scenarios
card = Card(-1)
self.assertEqual(card.centerx, 0)
card = Card(0)
self.assertEqual(card.centerx, 0)
card = Card(10.5)
self.assertEqual(card.centerx, int(10.5))
card = Card(SCREEN_WIDTH-1)
self.assertEqual(card.centerx, SCREEN_WIDTH-1)
card = Card(SCREEN_WIDTH)
self.assertEqual(card.centerx, SCREEN_WIDTH)
card = Card(SCREEN_WIDTH+1)
self.assertEqual(card.centerx, SCREEN_WIDTH)
card = Card()
self.assertEqual(card.centerx, 0)
card = Card('123')
self.assertEqual(card.centerx, 0)
def test_set_card(self):
"""Test for set_card() method"""
card = Card()
# Positive test
card.set_card(('S', '10'))
self.assertEqual(card.suit, 'S')
self.assertEqual(card.rank, '10')
self.assertEqual(card.back, False)
# Negative tests
self.assertRaises(TypeError, card.set_card, {'2', '3'})
self.assertRaises(ValueError, card.set_card, ('S', '1', '0'))
self.assertRaises(KeyError, card.set_card, ('A', '10'))
self.assertRaises(KeyError, card.set_card, ('S', '1'))
self.assertRaises(KeyError, card.set_card, ('X', 'V'))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add file with tests for main module<commit_after># Copyright (c) 2016 Kirill 'Kolyat' Kiselnikov
# This file is the part of vpoker, released under modified MIT license
# See the file LICENSE.txt included in this distribution
"""
Unit tests for main module
"""
import unittest
from vpoker import CARD_BACKGROUND_HEIGHT
from vpoker import SCREEN_WIDTH
from vpoker import Card
class TestCard(unittest.TestCase):
"""Tests for Card class"""
def test_init(self):
"""Tests for main class constructor"""
# Positive test
card = Card(1)
self.assertEqual(card.centerx, 1)
self.assertEqual(card.centery, int(CARD_BACKGROUND_HEIGHT/2))
self.assertEqual(card.active, False)
self.assertEqual(card.back, True)
self.assertEqual(card.suit, '')
self.assertEqual(card.rank, '')
self.assertEqual(card.held, False)
# Negative scenarios
card = Card(-1)
self.assertEqual(card.centerx, 0)
card = Card(0)
self.assertEqual(card.centerx, 0)
card = Card(10.5)
self.assertEqual(card.centerx, int(10.5))
card = Card(SCREEN_WIDTH-1)
self.assertEqual(card.centerx, SCREEN_WIDTH-1)
card = Card(SCREEN_WIDTH)
self.assertEqual(card.centerx, SCREEN_WIDTH)
card = Card(SCREEN_WIDTH+1)
self.assertEqual(card.centerx, SCREEN_WIDTH)
card = Card()
self.assertEqual(card.centerx, 0)
card = Card('123')
self.assertEqual(card.centerx, 0)
def test_set_card(self):
"""Test for set_card() method"""
card = Card()
# Positive test
card.set_card(('S', '10'))
self.assertEqual(card.suit, 'S')
self.assertEqual(card.rank, '10')
self.assertEqual(card.back, False)
# Negative tests
self.assertRaises(TypeError, card.set_card, {'2', '3'})
self.assertRaises(ValueError, card.set_card, ('S', '1', '0'))
self.assertRaises(KeyError, card.set_card, ('A', '10'))
self.assertRaises(KeyError, card.set_card, ('S', '1'))
self.assertRaises(KeyError, card.set_card, ('X', 'V'))
if __name__ == '__main__':
unittest.main()
|
|
444a6d3a53e2e373e5abe2156b875f5342f371ae
|
pdc/apps/package/migrations/0014_auto_20170412_1331.py
|
pdc/apps/package/migrations/0014_auto_20170412_1331.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('package', '0013_set_default_subvariant_to_empty_string'),
]
operations = [
migrations.AddField(
model_name='rpm',
name='srpm_commit_branch',
field=models.CharField(db_index=True, max_length=200, null=True, blank=True),
),
migrations.AddField(
model_name='rpm',
name='srpm_commit_hash',
field=models.CharField(db_index=True, max_length=200, null=True, blank=True),
),
]
|
Add missing database migration script for srpm_commit_hash and srpm_commit_branch.
|
Add missing database migration script for srpm_commit_hash and srpm_commit_branch.
Signed-off-by: Jan Kaluza <24a219eb5881fc77986f139dd507482b6c2e7698@redhat.com>
|
Python
|
mit
|
release-engineering/product-definition-center,product-definition-center/product-definition-center,product-definition-center/product-definition-center,release-engineering/product-definition-center,product-definition-center/product-definition-center,release-engineering/product-definition-center,release-engineering/product-definition-center,product-definition-center/product-definition-center
|
Add missing database migration script for srpm_commit_hash and srpm_commit_branch.
Signed-off-by: Jan Kaluza <24a219eb5881fc77986f139dd507482b6c2e7698@redhat.com>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('package', '0013_set_default_subvariant_to_empty_string'),
]
operations = [
migrations.AddField(
model_name='rpm',
name='srpm_commit_branch',
field=models.CharField(db_index=True, max_length=200, null=True, blank=True),
),
migrations.AddField(
model_name='rpm',
name='srpm_commit_hash',
field=models.CharField(db_index=True, max_length=200, null=True, blank=True),
),
]
|
<commit_before><commit_msg>Add missing database migration script for srpm_commit_hash and srpm_commit_branch.
Signed-off-by: Jan Kaluza <24a219eb5881fc77986f139dd507482b6c2e7698@redhat.com><commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('package', '0013_set_default_subvariant_to_empty_string'),
]
operations = [
migrations.AddField(
model_name='rpm',
name='srpm_commit_branch',
field=models.CharField(db_index=True, max_length=200, null=True, blank=True),
),
migrations.AddField(
model_name='rpm',
name='srpm_commit_hash',
field=models.CharField(db_index=True, max_length=200, null=True, blank=True),
),
]
|
Add missing database migration script for srpm_commit_hash and srpm_commit_branch.
Signed-off-by: Jan Kaluza <24a219eb5881fc77986f139dd507482b6c2e7698@redhat.com># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('package', '0013_set_default_subvariant_to_empty_string'),
]
operations = [
migrations.AddField(
model_name='rpm',
name='srpm_commit_branch',
field=models.CharField(db_index=True, max_length=200, null=True, blank=True),
),
migrations.AddField(
model_name='rpm',
name='srpm_commit_hash',
field=models.CharField(db_index=True, max_length=200, null=True, blank=True),
),
]
|
<commit_before><commit_msg>Add missing database migration script for srpm_commit_hash and srpm_commit_branch.
Signed-off-by: Jan Kaluza <24a219eb5881fc77986f139dd507482b6c2e7698@redhat.com><commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('package', '0013_set_default_subvariant_to_empty_string'),
]
operations = [
migrations.AddField(
model_name='rpm',
name='srpm_commit_branch',
field=models.CharField(db_index=True, max_length=200, null=True, blank=True),
),
migrations.AddField(
model_name='rpm',
name='srpm_commit_hash',
field=models.CharField(db_index=True, max_length=200, null=True, blank=True),
),
]
|
|
e7a46e772aa09e760b9c817afb618860eeb7e33c
|
helenae/gui/widgets/CompleteRegCtrl.py
|
helenae/gui/widgets/CompleteRegCtrl.py
|
# -*- coding: utf-8 -*-
import wx
ID_BUTTON_CLOSE_MSG = 1000
ID_LABLE_TEXT_INFO = 1001
ID_ICON_INFO = 1002
class CompleteRegCtrl(wx.Frame):
def __init__(self, parent, id, title, ico_folder):
wx.Frame.__init__(self, parent, -1, title, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
# lables, which contains some text
self.txt = wx.StaticText(self, id=ID_LABLE_TEXT_INFO, label="После перезапуска приложения Вы", pos=(60, 25))
self.txt.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0))
self.txt = wx.StaticText(self, id=ID_LABLE_TEXT_INFO, label="можете авторизироваться", pos=(85, 40))
self.txt.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0))
# info icon
bitmap = wx.Bitmap(ico_folder + '/icons/ui/info.png', type=wx.BITMAP_TYPE_PNG)
self.info_icon = wx.StaticBitmap(self, id=ID_ICON_INFO, bitmap=bitmap, pos=(15, 25))
# buttons
self.cancel_button = wx.Button(self, id=ID_BUTTON_CLOSE_MSG, label='Закрыть', pos=(225, 115))
# form settings
size = (320, 150)
self.SetSize(size)
self.icon = wx.Icon(ico_folder + '/icons/app.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(self.icon)
if __name__ =='__main__':
app = wx.App(0)
ico_folder = '..'
frame = CompleteRegCtrl(None, -1, 'Сообщение', ico_folder)
frame.Show()
app.MainLoop()
|
Complete register window for widget
|
Complete register window for widget
|
Python
|
mit
|
Relrin/Helenae,Relrin/Helenae,Relrin/Helenae
|
Complete register window for widget
|
# -*- coding: utf-8 -*-
import wx
ID_BUTTON_CLOSE_MSG = 1000
ID_LABLE_TEXT_INFO = 1001
ID_ICON_INFO = 1002
class CompleteRegCtrl(wx.Frame):
def __init__(self, parent, id, title, ico_folder):
wx.Frame.__init__(self, parent, -1, title, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
# lables, which contains some text
self.txt = wx.StaticText(self, id=ID_LABLE_TEXT_INFO, label="После перезапуска приложения Вы", pos=(60, 25))
self.txt.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0))
self.txt = wx.StaticText(self, id=ID_LABLE_TEXT_INFO, label="можете авторизироваться", pos=(85, 40))
self.txt.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0))
# info icon
bitmap = wx.Bitmap(ico_folder + '/icons/ui/info.png', type=wx.BITMAP_TYPE_PNG)
self.info_icon = wx.StaticBitmap(self, id=ID_ICON_INFO, bitmap=bitmap, pos=(15, 25))
# buttons
self.cancel_button = wx.Button(self, id=ID_BUTTON_CLOSE_MSG, label='Закрыть', pos=(225, 115))
# form settings
size = (320, 150)
self.SetSize(size)
self.icon = wx.Icon(ico_folder + '/icons/app.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(self.icon)
if __name__ =='__main__':
app = wx.App(0)
ico_folder = '..'
frame = CompleteRegCtrl(None, -1, 'Сообщение', ico_folder)
frame.Show()
app.MainLoop()
|
<commit_before><commit_msg>Complete register window for widget<commit_after>
|
# -*- coding: utf-8 -*-
import wx
ID_BUTTON_CLOSE_MSG = 1000
ID_LABLE_TEXT_INFO = 1001
ID_ICON_INFO = 1002
class CompleteRegCtrl(wx.Frame):
def __init__(self, parent, id, title, ico_folder):
wx.Frame.__init__(self, parent, -1, title, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
# lables, which contains some text
self.txt = wx.StaticText(self, id=ID_LABLE_TEXT_INFO, label="После перезапуска приложения Вы", pos=(60, 25))
self.txt.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0))
self.txt = wx.StaticText(self, id=ID_LABLE_TEXT_INFO, label="можете авторизироваться", pos=(85, 40))
self.txt.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0))
# info icon
bitmap = wx.Bitmap(ico_folder + '/icons/ui/info.png', type=wx.BITMAP_TYPE_PNG)
self.info_icon = wx.StaticBitmap(self, id=ID_ICON_INFO, bitmap=bitmap, pos=(15, 25))
# buttons
self.cancel_button = wx.Button(self, id=ID_BUTTON_CLOSE_MSG, label='Закрыть', pos=(225, 115))
# form settings
size = (320, 150)
self.SetSize(size)
self.icon = wx.Icon(ico_folder + '/icons/app.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(self.icon)
if __name__ =='__main__':
app = wx.App(0)
ico_folder = '..'
frame = CompleteRegCtrl(None, -1, 'Сообщение', ico_folder)
frame.Show()
app.MainLoop()
|
Complete register window for widget# -*- coding: utf-8 -*-
import wx
ID_BUTTON_CLOSE_MSG = 1000
ID_LABLE_TEXT_INFO = 1001
ID_ICON_INFO = 1002
class CompleteRegCtrl(wx.Frame):
def __init__(self, parent, id, title, ico_folder):
wx.Frame.__init__(self, parent, -1, title, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
# lables, which contains some text
self.txt = wx.StaticText(self, id=ID_LABLE_TEXT_INFO, label="После перезапуска приложения Вы", pos=(60, 25))
self.txt.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0))
self.txt = wx.StaticText(self, id=ID_LABLE_TEXT_INFO, label="можете авторизироваться", pos=(85, 40))
self.txt.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0))
# info icon
bitmap = wx.Bitmap(ico_folder + '/icons/ui/info.png', type=wx.BITMAP_TYPE_PNG)
self.info_icon = wx.StaticBitmap(self, id=ID_ICON_INFO, bitmap=bitmap, pos=(15, 25))
# buttons
self.cancel_button = wx.Button(self, id=ID_BUTTON_CLOSE_MSG, label='Закрыть', pos=(225, 115))
# form settings
size = (320, 150)
self.SetSize(size)
self.icon = wx.Icon(ico_folder + '/icons/app.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(self.icon)
if __name__ =='__main__':
app = wx.App(0)
ico_folder = '..'
frame = CompleteRegCtrl(None, -1, 'Сообщение', ico_folder)
frame.Show()
app.MainLoop()
|
<commit_before><commit_msg>Complete register window for widget<commit_after># -*- coding: utf-8 -*-
import wx
ID_BUTTON_CLOSE_MSG = 1000
ID_LABLE_TEXT_INFO = 1001
ID_ICON_INFO = 1002
class CompleteRegCtrl(wx.Frame):
def __init__(self, parent, id, title, ico_folder):
wx.Frame.__init__(self, parent, -1, title, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
# lables, which contains some text
self.txt = wx.StaticText(self, id=ID_LABLE_TEXT_INFO, label="После перезапуска приложения Вы", pos=(60, 25))
self.txt.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0))
self.txt = wx.StaticText(self, id=ID_LABLE_TEXT_INFO, label="можете авторизироваться", pos=(85, 40))
self.txt.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0))
# info icon
bitmap = wx.Bitmap(ico_folder + '/icons/ui/info.png', type=wx.BITMAP_TYPE_PNG)
self.info_icon = wx.StaticBitmap(self, id=ID_ICON_INFO, bitmap=bitmap, pos=(15, 25))
# buttons
self.cancel_button = wx.Button(self, id=ID_BUTTON_CLOSE_MSG, label='Закрыть', pos=(225, 115))
# form settings
size = (320, 150)
self.SetSize(size)
self.icon = wx.Icon(ico_folder + '/icons/app.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(self.icon)
if __name__ =='__main__':
app = wx.App(0)
ico_folder = '..'
frame = CompleteRegCtrl(None, -1, 'Сообщение', ico_folder)
frame.Show()
app.MainLoop()
|
|
2acfc552c42846628304e54a3b87e2bf3a59af07
|
conf/ci/appveyor/get-artifacts.py
|
conf/ci/appveyor/get-artifacts.py
|
#!/usr/bin/env python
# Author: Lisandro Dalcin
# Contact: dalcinl@gmail.com
import os
import requests
APIURL = 'https://ci.appveyor.com/api'
ACCOUNT = 'mpi4py/mpi4py'
BRANCH = 'master'
BRANCH = 'maint'
branch_url = APIURL + '/projects/' + ACCOUNT + "/branch/" + BRANCH
branch = requests.get(branch_url).json()
jobs = branch['build']['jobs']
jobids = [job['jobId'] for job in jobs]
for jobid in jobids:
artifacts_url = APIURL + '/buildjobs/' + jobid + '/artifacts'
artifacts = requests.get(artifacts_url).json()
filenames = [a['fileName'] for a in artifacts]
for filename in filenames:
download_url = artifacts_url + '/' + filename
print("GET " + download_url)
continue
data = requests.get(download_url).content
with open(os.path.basename(filename), "wb") as f:
f.write(data)
|
Add helper script to download build artifacts
|
AppVeyor: Add helper script to download build artifacts
|
Python
|
bsd-2-clause
|
mpi4py/mpi4py,mpi4py/mpi4py,mpi4py/mpi4py
|
AppVeyor: Add helper script to download build artifacts
|
#!/usr/bin/env python
# Author: Lisandro Dalcin
# Contact: dalcinl@gmail.com
import os
import requests
APIURL = 'https://ci.appveyor.com/api'
ACCOUNT = 'mpi4py/mpi4py'
BRANCH = 'master'
BRANCH = 'maint'
branch_url = APIURL + '/projects/' + ACCOUNT + "/branch/" + BRANCH
branch = requests.get(branch_url).json()
jobs = branch['build']['jobs']
jobids = [job['jobId'] for job in jobs]
for jobid in jobids:
artifacts_url = APIURL + '/buildjobs/' + jobid + '/artifacts'
artifacts = requests.get(artifacts_url).json()
filenames = [a['fileName'] for a in artifacts]
for filename in filenames:
download_url = artifacts_url + '/' + filename
print("GET " + download_url)
continue
data = requests.get(download_url).content
with open(os.path.basename(filename), "wb") as f:
f.write(data)
|
<commit_before><commit_msg>AppVeyor: Add helper script to download build artifacts<commit_after>
|
#!/usr/bin/env python
# Author: Lisandro Dalcin
# Contact: dalcinl@gmail.com
import os
import requests
APIURL = 'https://ci.appveyor.com/api'
ACCOUNT = 'mpi4py/mpi4py'
BRANCH = 'master'
BRANCH = 'maint'
branch_url = APIURL + '/projects/' + ACCOUNT + "/branch/" + BRANCH
branch = requests.get(branch_url).json()
jobs = branch['build']['jobs']
jobids = [job['jobId'] for job in jobs]
for jobid in jobids:
artifacts_url = APIURL + '/buildjobs/' + jobid + '/artifacts'
artifacts = requests.get(artifacts_url).json()
filenames = [a['fileName'] for a in artifacts]
for filename in filenames:
download_url = artifacts_url + '/' + filename
print("GET " + download_url)
continue
data = requests.get(download_url).content
with open(os.path.basename(filename), "wb") as f:
f.write(data)
|
AppVeyor: Add helper script to download build artifacts#!/usr/bin/env python
# Author: Lisandro Dalcin
# Contact: dalcinl@gmail.com
import os
import requests
APIURL = 'https://ci.appveyor.com/api'
ACCOUNT = 'mpi4py/mpi4py'
BRANCH = 'master'
BRANCH = 'maint'
branch_url = APIURL + '/projects/' + ACCOUNT + "/branch/" + BRANCH
branch = requests.get(branch_url).json()
jobs = branch['build']['jobs']
jobids = [job['jobId'] for job in jobs]
for jobid in jobids:
artifacts_url = APIURL + '/buildjobs/' + jobid + '/artifacts'
artifacts = requests.get(artifacts_url).json()
filenames = [a['fileName'] for a in artifacts]
for filename in filenames:
download_url = artifacts_url + '/' + filename
print("GET " + download_url)
continue
data = requests.get(download_url).content
with open(os.path.basename(filename), "wb") as f:
f.write(data)
|
<commit_before><commit_msg>AppVeyor: Add helper script to download build artifacts<commit_after>#!/usr/bin/env python
# Author: Lisandro Dalcin
# Contact: dalcinl@gmail.com
import os
import requests
APIURL = 'https://ci.appveyor.com/api'
ACCOUNT = 'mpi4py/mpi4py'
BRANCH = 'master'
BRANCH = 'maint'
branch_url = APIURL + '/projects/' + ACCOUNT + "/branch/" + BRANCH
branch = requests.get(branch_url).json()
jobs = branch['build']['jobs']
jobids = [job['jobId'] for job in jobs]
for jobid in jobids:
artifacts_url = APIURL + '/buildjobs/' + jobid + '/artifacts'
artifacts = requests.get(artifacts_url).json()
filenames = [a['fileName'] for a in artifacts]
for filename in filenames:
download_url = artifacts_url + '/' + filename
print("GET " + download_url)
continue
data = requests.get(download_url).content
with open(os.path.basename(filename), "wb") as f:
f.write(data)
|
|
4fe9d2ceb7b1d8be1b89167944c06c6c2c92d1c4
|
csv2ofx/mappings/ingdirect.py
|
csv2ofx/mappings/ingdirect.py
|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
# pylint: disable=invalid-name
"""
csv2ofx.mappings.ingdirect
~~~~~~~~~~~~~~~~~~~~~~~~
Provides a mapping for transactions obtained via ING Direct
(Australian bank)
"""
from __future__ import absolute_import
from operator import itemgetter
mapping = {
'is_split': False,
'has_header': True,
'account': itemgetter('Account'),
'date': itemgetter('Date'),
'amount': lambda tr: tr['Credit'] + tr['Debit'],
'desc': itemgetter('Description'),
}
|
Create mapping for ING Direct
|
Create mapping for ING Direct
|
Python
|
mit
|
reubano/csv2ofx,reubano/csv2ofx
|
Create mapping for ING Direct
|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
# pylint: disable=invalid-name
"""
csv2ofx.mappings.ingdirect
~~~~~~~~~~~~~~~~~~~~~~~~
Provides a mapping for transactions obtained via ING Direct
(Australian bank)
"""
from __future__ import absolute_import
from operator import itemgetter
mapping = {
'is_split': False,
'has_header': True,
'account': itemgetter('Account'),
'date': itemgetter('Date'),
'amount': lambda tr: tr['Credit'] + tr['Debit'],
'desc': itemgetter('Description'),
}
|
<commit_before><commit_msg>Create mapping for ING Direct<commit_after>
|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
# pylint: disable=invalid-name
"""
csv2ofx.mappings.ingdirect
~~~~~~~~~~~~~~~~~~~~~~~~
Provides a mapping for transactions obtained via ING Direct
(Australian bank)
"""
from __future__ import absolute_import
from operator import itemgetter
mapping = {
'is_split': False,
'has_header': True,
'account': itemgetter('Account'),
'date': itemgetter('Date'),
'amount': lambda tr: tr['Credit'] + tr['Debit'],
'desc': itemgetter('Description'),
}
|
Create mapping for ING Direct# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
# pylint: disable=invalid-name
"""
csv2ofx.mappings.ingdirect
~~~~~~~~~~~~~~~~~~~~~~~~
Provides a mapping for transactions obtained via ING Direct
(Australian bank)
"""
from __future__ import absolute_import
from operator import itemgetter
mapping = {
'is_split': False,
'has_header': True,
'account': itemgetter('Account'),
'date': itemgetter('Date'),
'amount': lambda tr: tr['Credit'] + tr['Debit'],
'desc': itemgetter('Description'),
}
|
<commit_before><commit_msg>Create mapping for ING Direct<commit_after># -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
# pylint: disable=invalid-name
"""
csv2ofx.mappings.ingdirect
~~~~~~~~~~~~~~~~~~~~~~~~
Provides a mapping for transactions obtained via ING Direct
(Australian bank)
"""
from __future__ import absolute_import
from operator import itemgetter
mapping = {
'is_split': False,
'has_header': True,
'account': itemgetter('Account'),
'date': itemgetter('Date'),
'amount': lambda tr: tr['Credit'] + tr['Debit'],
'desc': itemgetter('Description'),
}
|
|
a0844c1dd37e0d487350187874804e973c106445
|
TH02.py
|
TH02.py
|
#TH02 by Trent Monahan, 2015
#ported from Intel's UPM TH02 in C
import mraa
TH02_ADDR = 0x40 # device address
TH02_REG_STATUS = 0x00
TH02_REG_DATA_H = 0x01
TH02_REG_DATA_L = 0x02
TH02_REG_CONFIG = 0x03
TH02_REG_ID = 0x11
TH02_STATUS_RDY_MASK = 0x01
TH02_CMD_MEASURE_HUMI = 0x01
TH02_CMD_MEASURE_TEMP = 0x11
class TH02:
def __init__(self, bus=0, addr=TH02_ADDR):
"""
Instantiates a TH02 object
"""
self._address = addr
self._name = "TH02"
self.i2c = mraa.I2c(bus)
if self.i2c.address(self._address) != mraa.SUCCESS:
raise ValueError("mraa_i2c_address() failed")
#TODO: Check if its MRAA_SUCCESS
def getTemperature(self):
"""
Gets the temperature value from the sensor
"""
if self.i2c.writeReg(TH02_REG_CONFIG, TH02_CMD_MEASURE_TEMP):
raise RuntimeError("I2C.WriteReg() failed")
return 0.0
while self.getStatus() == False:
pass
temperature = self.i2c.readReg(TH02_REG_DATA_H) << 8
temperature = temperature | self.i2c.readReg(TH02_REG_DATA_L)
temperature = temperature >> 2
return (temperature / 32.0) - 50.0 #is conversion needed?
def getHumidity(self):
"""
Gets the humidity value from the sensor
"""
if self.i2c.writeReg(TH02_REG_CONFIG, TH02_CMD_MEASURE_HUMI):
raise RuntimeError("I2C.WriteReg() failed")
return 0.0
while self.getStatus() == False:
pass
humidity = self.i2c.readReg(TH02_REG_DATA_H) << 8
humidity = humidity | self.i2c.readReg(TH02_REG_DATA_L)
humidity = humidity >> 4
return (humidity / 16.0) - 24.0 #is conversion needed?
def getStatus(self):
"""
Gets the sensor status
"""
status = self.i2c.readReg(TH02_REG_STATUS)
if status & TH02_STATUS_RDY_MASK:
return False
else:
return True
def name(self):
"""
Returns the name of the component
"""
return self._name #TH02
|
Add Temp & Humi API
|
Add Temp & Humi API
|
Python
|
mit
|
SinZ163/EdisonSandbox
|
Add Temp & Humi API
|
#TH02 by Trent Monahan, 2015
#ported from Intel's UPM TH02 in C
import mraa
TH02_ADDR = 0x40 # device address
TH02_REG_STATUS = 0x00
TH02_REG_DATA_H = 0x01
TH02_REG_DATA_L = 0x02
TH02_REG_CONFIG = 0x03
TH02_REG_ID = 0x11
TH02_STATUS_RDY_MASK = 0x01
TH02_CMD_MEASURE_HUMI = 0x01
TH02_CMD_MEASURE_TEMP = 0x11
class TH02:
def __init__(self, bus=0, addr=TH02_ADDR):
"""
Instantiates a TH02 object
"""
self._address = addr
self._name = "TH02"
self.i2c = mraa.I2c(bus)
if self.i2c.address(self._address) != mraa.SUCCESS:
raise ValueError("mraa_i2c_address() failed")
#TODO: Check if its MRAA_SUCCESS
def getTemperature(self):
"""
Gets the temperature value from the sensor
"""
if self.i2c.writeReg(TH02_REG_CONFIG, TH02_CMD_MEASURE_TEMP):
raise RuntimeError("I2C.WriteReg() failed")
return 0.0
while self.getStatus() == False:
pass
temperature = self.i2c.readReg(TH02_REG_DATA_H) << 8
temperature = temperature | self.i2c.readReg(TH02_REG_DATA_L)
temperature = temperature >> 2
return (temperature / 32.0) - 50.0 #is conversion needed?
def getHumidity(self):
"""
Gets the humidity value from the sensor
"""
if self.i2c.writeReg(TH02_REG_CONFIG, TH02_CMD_MEASURE_HUMI):
raise RuntimeError("I2C.WriteReg() failed")
return 0.0
while self.getStatus() == False:
pass
humidity = self.i2c.readReg(TH02_REG_DATA_H) << 8
humidity = humidity | self.i2c.readReg(TH02_REG_DATA_L)
humidity = humidity >> 4
return (humidity / 16.0) - 24.0 #is conversion needed?
def getStatus(self):
"""
Gets the sensor status
"""
status = self.i2c.readReg(TH02_REG_STATUS)
if status & TH02_STATUS_RDY_MASK:
return False
else:
return True
def name(self):
"""
Returns the name of the component
"""
return self._name #TH02
|
<commit_before><commit_msg>Add Temp & Humi API<commit_after>
|
#TH02 by Trent Monahan, 2015
#ported from Intel's UPM TH02 in C
import mraa
TH02_ADDR = 0x40 # device address
TH02_REG_STATUS = 0x00
TH02_REG_DATA_H = 0x01
TH02_REG_DATA_L = 0x02
TH02_REG_CONFIG = 0x03
TH02_REG_ID = 0x11
TH02_STATUS_RDY_MASK = 0x01
TH02_CMD_MEASURE_HUMI = 0x01
TH02_CMD_MEASURE_TEMP = 0x11
class TH02:
def __init__(self, bus=0, addr=TH02_ADDR):
"""
Instantiates a TH02 object
"""
self._address = addr
self._name = "TH02"
self.i2c = mraa.I2c(bus)
if self.i2c.address(self._address) != mraa.SUCCESS:
raise ValueError("mraa_i2c_address() failed")
#TODO: Check if its MRAA_SUCCESS
def getTemperature(self):
"""
Gets the temperature value from the sensor
"""
if self.i2c.writeReg(TH02_REG_CONFIG, TH02_CMD_MEASURE_TEMP):
raise RuntimeError("I2C.WriteReg() failed")
return 0.0
while self.getStatus() == False:
pass
temperature = self.i2c.readReg(TH02_REG_DATA_H) << 8
temperature = temperature | self.i2c.readReg(TH02_REG_DATA_L)
temperature = temperature >> 2
return (temperature / 32.0) - 50.0 #is conversion needed?
def getHumidity(self):
"""
Gets the humidity value from the sensor
"""
if self.i2c.writeReg(TH02_REG_CONFIG, TH02_CMD_MEASURE_HUMI):
raise RuntimeError("I2C.WriteReg() failed")
return 0.0
while self.getStatus() == False:
pass
humidity = self.i2c.readReg(TH02_REG_DATA_H) << 8
humidity = humidity | self.i2c.readReg(TH02_REG_DATA_L)
humidity = humidity >> 4
return (humidity / 16.0) - 24.0 #is conversion needed?
def getStatus(self):
"""
Gets the sensor status
"""
status = self.i2c.readReg(TH02_REG_STATUS)
if status & TH02_STATUS_RDY_MASK:
return False
else:
return True
def name(self):
"""
Returns the name of the component
"""
return self._name #TH02
|
Add Temp & Humi API#TH02 by Trent Monahan, 2015
#ported from Intel's UPM TH02 in C
import mraa
TH02_ADDR = 0x40 # device address
TH02_REG_STATUS = 0x00
TH02_REG_DATA_H = 0x01
TH02_REG_DATA_L = 0x02
TH02_REG_CONFIG = 0x03
TH02_REG_ID = 0x11
TH02_STATUS_RDY_MASK = 0x01
TH02_CMD_MEASURE_HUMI = 0x01
TH02_CMD_MEASURE_TEMP = 0x11
class TH02:
def __init__(self, bus=0, addr=TH02_ADDR):
"""
Instantiates a TH02 object
"""
self._address = addr
self._name = "TH02"
self.i2c = mraa.I2c(bus)
if self.i2c.address(self._address) != mraa.SUCCESS:
raise ValueError("mraa_i2c_address() failed")
#TODO: Check if its MRAA_SUCCESS
def getTemperature(self):
"""
Gets the temperature value from the sensor
"""
if self.i2c.writeReg(TH02_REG_CONFIG, TH02_CMD_MEASURE_TEMP):
raise RuntimeError("I2C.WriteReg() failed")
return 0.0
while self.getStatus() == False:
pass
temperature = self.i2c.readReg(TH02_REG_DATA_H) << 8
temperature = temperature | self.i2c.readReg(TH02_REG_DATA_L)
temperature = temperature >> 2
return (temperature / 32.0) - 50.0 #is conversion needed?
def getHumidity(self):
"""
Gets the humidity value from the sensor
"""
if self.i2c.writeReg(TH02_REG_CONFIG, TH02_CMD_MEASURE_HUMI):
raise RuntimeError("I2C.WriteReg() failed")
return 0.0
while self.getStatus() == False:
pass
humidity = self.i2c.readReg(TH02_REG_DATA_H) << 8
humidity = humidity | self.i2c.readReg(TH02_REG_DATA_L)
humidity = humidity >> 4
return (humidity / 16.0) - 24.0 #is conversion needed?
def getStatus(self):
"""
Gets the sensor status
"""
status = self.i2c.readReg(TH02_REG_STATUS)
if status & TH02_STATUS_RDY_MASK:
return False
else:
return True
def name(self):
"""
Returns the name of the component
"""
return self._name #TH02
|
<commit_before><commit_msg>Add Temp & Humi API<commit_after>#TH02 by Trent Monahan, 2015
#ported from Intel's UPM TH02 in C
import mraa
TH02_ADDR = 0x40 # device address
TH02_REG_STATUS = 0x00
TH02_REG_DATA_H = 0x01
TH02_REG_DATA_L = 0x02
TH02_REG_CONFIG = 0x03
TH02_REG_ID = 0x11
TH02_STATUS_RDY_MASK = 0x01
TH02_CMD_MEASURE_HUMI = 0x01
TH02_CMD_MEASURE_TEMP = 0x11
class TH02:
def __init__(self, bus=0, addr=TH02_ADDR):
"""
Instantiates a TH02 object
"""
self._address = addr
self._name = "TH02"
self.i2c = mraa.I2c(bus)
if self.i2c.address(self._address) != mraa.SUCCESS:
raise ValueError("mraa_i2c_address() failed")
#TODO: Check if its MRAA_SUCCESS
def getTemperature(self):
"""
Gets the temperature value from the sensor
"""
if self.i2c.writeReg(TH02_REG_CONFIG, TH02_CMD_MEASURE_TEMP):
raise RuntimeError("I2C.WriteReg() failed")
return 0.0
while self.getStatus() == False:
pass
temperature = self.i2c.readReg(TH02_REG_DATA_H) << 8
temperature = temperature | self.i2c.readReg(TH02_REG_DATA_L)
temperature = temperature >> 2
return (temperature / 32.0) - 50.0 #is conversion needed?
def getHumidity(self):
"""
Gets the humidity value from the sensor
"""
if self.i2c.writeReg(TH02_REG_CONFIG, TH02_CMD_MEASURE_HUMI):
raise RuntimeError("I2C.WriteReg() failed")
return 0.0
while self.getStatus() == False:
pass
humidity = self.i2c.readReg(TH02_REG_DATA_H) << 8
humidity = humidity | self.i2c.readReg(TH02_REG_DATA_L)
humidity = humidity >> 4
return (humidity / 16.0) - 24.0 #is conversion needed?
def getStatus(self):
"""
Gets the sensor status
"""
status = self.i2c.readReg(TH02_REG_STATUS)
if status & TH02_STATUS_RDY_MASK:
return False
else:
return True
def name(self):
"""
Returns the name of the component
"""
return self._name #TH02
|
|
f2e6fedf021e48e275a857bc7a0471a17aa10c29
|
tests/integration/test_webui.py
|
tests/integration/test_webui.py
|
import requests
import pytest
class TestWebUI(object):
def get_page(self, page):
return requests.get('http://127.0.0.1' + page)
pages = [
{
'page': '/',
'matching_text': 'Diamond',
},
{
'page': '/scoreboard',
},
{
'page': '/login',
'matching_text': 'Please sign in',
},
{
'page': '/about',
'matching_text': 'Use the following credentials to login',
},
{
'page': '/overview',
},
{
'page': '/api/overview/data'
}
]
@pytest.mark.parametrize("page_data", pages)
def test_page(self, page_data):
resp = self.get_page(page_data['page'])
assert resp.status_code == 200
if 'matching_text' in page_data:
assert page_data['matching_text'] in resp.text
|
Add basic checks for web endpoints integraiton tests
|
Add basic checks for web endpoints integraiton tests
|
Python
|
mit
|
pwnbus/scoring_engine,pwnbus/scoring_engine,pwnbus/scoring_engine,pwnbus/scoring_engine
|
Add basic checks for web endpoints integraiton tests
|
import requests
import pytest
class TestWebUI(object):
def get_page(self, page):
return requests.get('http://127.0.0.1' + page)
pages = [
{
'page': '/',
'matching_text': 'Diamond',
},
{
'page': '/scoreboard',
},
{
'page': '/login',
'matching_text': 'Please sign in',
},
{
'page': '/about',
'matching_text': 'Use the following credentials to login',
},
{
'page': '/overview',
},
{
'page': '/api/overview/data'
}
]
@pytest.mark.parametrize("page_data", pages)
def test_page(self, page_data):
resp = self.get_page(page_data['page'])
assert resp.status_code == 200
if 'matching_text' in page_data:
assert page_data['matching_text'] in resp.text
|
<commit_before><commit_msg>Add basic checks for web endpoints integraiton tests<commit_after>
|
import requests
import pytest
class TestWebUI(object):
def get_page(self, page):
return requests.get('http://127.0.0.1' + page)
pages = [
{
'page': '/',
'matching_text': 'Diamond',
},
{
'page': '/scoreboard',
},
{
'page': '/login',
'matching_text': 'Please sign in',
},
{
'page': '/about',
'matching_text': 'Use the following credentials to login',
},
{
'page': '/overview',
},
{
'page': '/api/overview/data'
}
]
@pytest.mark.parametrize("page_data", pages)
def test_page(self, page_data):
resp = self.get_page(page_data['page'])
assert resp.status_code == 200
if 'matching_text' in page_data:
assert page_data['matching_text'] in resp.text
|
Add basic checks for web endpoints integraiton testsimport requests
import pytest
class TestWebUI(object):
def get_page(self, page):
return requests.get('http://127.0.0.1' + page)
pages = [
{
'page': '/',
'matching_text': 'Diamond',
},
{
'page': '/scoreboard',
},
{
'page': '/login',
'matching_text': 'Please sign in',
},
{
'page': '/about',
'matching_text': 'Use the following credentials to login',
},
{
'page': '/overview',
},
{
'page': '/api/overview/data'
}
]
@pytest.mark.parametrize("page_data", pages)
def test_page(self, page_data):
resp = self.get_page(page_data['page'])
assert resp.status_code == 200
if 'matching_text' in page_data:
assert page_data['matching_text'] in resp.text
|
<commit_before><commit_msg>Add basic checks for web endpoints integraiton tests<commit_after>import requests
import pytest
class TestWebUI(object):
def get_page(self, page):
return requests.get('http://127.0.0.1' + page)
pages = [
{
'page': '/',
'matching_text': 'Diamond',
},
{
'page': '/scoreboard',
},
{
'page': '/login',
'matching_text': 'Please sign in',
},
{
'page': '/about',
'matching_text': 'Use the following credentials to login',
},
{
'page': '/overview',
},
{
'page': '/api/overview/data'
}
]
@pytest.mark.parametrize("page_data", pages)
def test_page(self, page_data):
resp = self.get_page(page_data['page'])
assert resp.status_code == 200
if 'matching_text' in page_data:
assert page_data['matching_text'] in resp.text
|
|
4233919f94296e1e4986345d876b3f2ebf14e0e4
|
unix_rpc.py
|
unix_rpc.py
|
import socket
import struct
import json
import os
RPC_VERSION = 1
def msg_send(sock, msg):
j = json.dumps(msg)
header = struct.pack('!LL', RPC_VERSION, len(j))
sock.sendall(b'%b%b' % (header, j.encode('utf-8')))
def msg_recv(sock):
buff = b''
while len(buff) < 8:
buff += sock.recv(1024)
(version, length) = struct.unpack('!LL', buff[0:8])
assert RPC_VERSION == version
buff = buff[8:]
while len(buff) < length:
buff += sock.recv(1024)
assert len(buff) == length
return json.loads(buff.decode('utf-8'))
class UnknownRPCError(Exception):
pass
class RPCError(Exception):
pass
class RPC(object):
def __init__(self, sock, method):
self.sock = sock
self.method = method
def __call__(self, *args, **kwargs):
msg_send(self.sock, ['rpc', self.method, args, kwargs])
[msg_type, msg] = msg_recv(self.sock)
assert msg_type in ['return', 'error']
if msg_type == 'error':
if msg == 'UnknownRPCError':
raise UnknownRPCError
raise RPCError
return msg
class Server(object):
def __init__(self, path):
self.path = path
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.bind(self.path)
self.socket.listen(1)
self.funs = {}
def register(self, name, function):
self.funs[name] = function
def start(self):
try:
while True:
conn, _ = self.socket.accept()
[msg_type, method, args, kwargs] = msg_recv(conn)
assert msg_type == 'rpc'
if method not in self.funs:
msg_send(conn, ['error', 'UnknownRPCError'])
else:
try:
ret = self.funs[method](*args, **kwargs)
msg_send(conn, ['return', ret])
except Exception as e:
try:
msg_send(conn, ['error', str(e)])
except:
pass
finally:
try:
if os.path.exists(self.path):
os.remove(self.path)
except: # pylint: disable=bare-except
pass
class Client(object):
def __init__(self, path):
self.__path = path
self.__socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.__socket.connect(self.__path)
def __getattr__(self, method):
return RPC(self.__socket, method)
def fileno(self):
return self.__socket.fileno()
def read(self):
return self.__socket.recv(1024)
|
Add module for executing RPCs over UNIX sockets
|
Add module for executing RPCs over UNIX sockets
|
Python
|
apache-2.0
|
sagelywizard/dlex
|
Add module for executing RPCs over UNIX sockets
|
import socket
import struct
import json
import os
RPC_VERSION = 1
def msg_send(sock, msg):
j = json.dumps(msg)
header = struct.pack('!LL', RPC_VERSION, len(j))
sock.sendall(b'%b%b' % (header, j.encode('utf-8')))
def msg_recv(sock):
buff = b''
while len(buff) < 8:
buff += sock.recv(1024)
(version, length) = struct.unpack('!LL', buff[0:8])
assert RPC_VERSION == version
buff = buff[8:]
while len(buff) < length:
buff += sock.recv(1024)
assert len(buff) == length
return json.loads(buff.decode('utf-8'))
class UnknownRPCError(Exception):
pass
class RPCError(Exception):
pass
class RPC(object):
def __init__(self, sock, method):
self.sock = sock
self.method = method
def __call__(self, *args, **kwargs):
msg_send(self.sock, ['rpc', self.method, args, kwargs])
[msg_type, msg] = msg_recv(self.sock)
assert msg_type in ['return', 'error']
if msg_type == 'error':
if msg == 'UnknownRPCError':
raise UnknownRPCError
raise RPCError
return msg
class Server(object):
def __init__(self, path):
self.path = path
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.bind(self.path)
self.socket.listen(1)
self.funs = {}
def register(self, name, function):
self.funs[name] = function
def start(self):
try:
while True:
conn, _ = self.socket.accept()
[msg_type, method, args, kwargs] = msg_recv(conn)
assert msg_type == 'rpc'
if method not in self.funs:
msg_send(conn, ['error', 'UnknownRPCError'])
else:
try:
ret = self.funs[method](*args, **kwargs)
msg_send(conn, ['return', ret])
except Exception as e:
try:
msg_send(conn, ['error', str(e)])
except:
pass
finally:
try:
if os.path.exists(self.path):
os.remove(self.path)
except: # pylint: disable=bare-except
pass
class Client(object):
def __init__(self, path):
self.__path = path
self.__socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.__socket.connect(self.__path)
def __getattr__(self, method):
return RPC(self.__socket, method)
def fileno(self):
return self.__socket.fileno()
def read(self):
return self.__socket.recv(1024)
|
<commit_before><commit_msg>Add module for executing RPCs over UNIX sockets<commit_after>
|
import socket
import struct
import json
import os
RPC_VERSION = 1
def msg_send(sock, msg):
j = json.dumps(msg)
header = struct.pack('!LL', RPC_VERSION, len(j))
sock.sendall(b'%b%b' % (header, j.encode('utf-8')))
def msg_recv(sock):
buff = b''
while len(buff) < 8:
buff += sock.recv(1024)
(version, length) = struct.unpack('!LL', buff[0:8])
assert RPC_VERSION == version
buff = buff[8:]
while len(buff) < length:
buff += sock.recv(1024)
assert len(buff) == length
return json.loads(buff.decode('utf-8'))
class UnknownRPCError(Exception):
pass
class RPCError(Exception):
pass
class RPC(object):
def __init__(self, sock, method):
self.sock = sock
self.method = method
def __call__(self, *args, **kwargs):
msg_send(self.sock, ['rpc', self.method, args, kwargs])
[msg_type, msg] = msg_recv(self.sock)
assert msg_type in ['return', 'error']
if msg_type == 'error':
if msg == 'UnknownRPCError':
raise UnknownRPCError
raise RPCError
return msg
class Server(object):
def __init__(self, path):
self.path = path
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.bind(self.path)
self.socket.listen(1)
self.funs = {}
def register(self, name, function):
self.funs[name] = function
def start(self):
try:
while True:
conn, _ = self.socket.accept()
[msg_type, method, args, kwargs] = msg_recv(conn)
assert msg_type == 'rpc'
if method not in self.funs:
msg_send(conn, ['error', 'UnknownRPCError'])
else:
try:
ret = self.funs[method](*args, **kwargs)
msg_send(conn, ['return', ret])
except Exception as e:
try:
msg_send(conn, ['error', str(e)])
except:
pass
finally:
try:
if os.path.exists(self.path):
os.remove(self.path)
except: # pylint: disable=bare-except
pass
class Client(object):
def __init__(self, path):
self.__path = path
self.__socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.__socket.connect(self.__path)
def __getattr__(self, method):
return RPC(self.__socket, method)
def fileno(self):
return self.__socket.fileno()
def read(self):
return self.__socket.recv(1024)
|
Add module for executing RPCs over UNIX socketsimport socket
import struct
import json
import os
RPC_VERSION = 1
def msg_send(sock, msg):
j = json.dumps(msg)
header = struct.pack('!LL', RPC_VERSION, len(j))
sock.sendall(b'%b%b' % (header, j.encode('utf-8')))
def msg_recv(sock):
buff = b''
while len(buff) < 8:
buff += sock.recv(1024)
(version, length) = struct.unpack('!LL', buff[0:8])
assert RPC_VERSION == version
buff = buff[8:]
while len(buff) < length:
buff += sock.recv(1024)
assert len(buff) == length
return json.loads(buff.decode('utf-8'))
class UnknownRPCError(Exception):
pass
class RPCError(Exception):
pass
class RPC(object):
def __init__(self, sock, method):
self.sock = sock
self.method = method
def __call__(self, *args, **kwargs):
msg_send(self.sock, ['rpc', self.method, args, kwargs])
[msg_type, msg] = msg_recv(self.sock)
assert msg_type in ['return', 'error']
if msg_type == 'error':
if msg == 'UnknownRPCError':
raise UnknownRPCError
raise RPCError
return msg
class Server(object):
def __init__(self, path):
self.path = path
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.bind(self.path)
self.socket.listen(1)
self.funs = {}
def register(self, name, function):
self.funs[name] = function
def start(self):
try:
while True:
conn, _ = self.socket.accept()
[msg_type, method, args, kwargs] = msg_recv(conn)
assert msg_type == 'rpc'
if method not in self.funs:
msg_send(conn, ['error', 'UnknownRPCError'])
else:
try:
ret = self.funs[method](*args, **kwargs)
msg_send(conn, ['return', ret])
except Exception as e:
try:
msg_send(conn, ['error', str(e)])
except:
pass
finally:
try:
if os.path.exists(self.path):
os.remove(self.path)
except: # pylint: disable=bare-except
pass
class Client(object):
def __init__(self, path):
self.__path = path
self.__socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.__socket.connect(self.__path)
def __getattr__(self, method):
return RPC(self.__socket, method)
def fileno(self):
return self.__socket.fileno()
def read(self):
return self.__socket.recv(1024)
|
<commit_before><commit_msg>Add module for executing RPCs over UNIX sockets<commit_after>import socket
import struct
import json
import os
RPC_VERSION = 1
def msg_send(sock, msg):
j = json.dumps(msg)
header = struct.pack('!LL', RPC_VERSION, len(j))
sock.sendall(b'%b%b' % (header, j.encode('utf-8')))
def msg_recv(sock):
buff = b''
while len(buff) < 8:
buff += sock.recv(1024)
(version, length) = struct.unpack('!LL', buff[0:8])
assert RPC_VERSION == version
buff = buff[8:]
while len(buff) < length:
buff += sock.recv(1024)
assert len(buff) == length
return json.loads(buff.decode('utf-8'))
class UnknownRPCError(Exception):
pass
class RPCError(Exception):
pass
class RPC(object):
def __init__(self, sock, method):
self.sock = sock
self.method = method
def __call__(self, *args, **kwargs):
msg_send(self.sock, ['rpc', self.method, args, kwargs])
[msg_type, msg] = msg_recv(self.sock)
assert msg_type in ['return', 'error']
if msg_type == 'error':
if msg == 'UnknownRPCError':
raise UnknownRPCError
raise RPCError
return msg
class Server(object):
def __init__(self, path):
self.path = path
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.bind(self.path)
self.socket.listen(1)
self.funs = {}
def register(self, name, function):
self.funs[name] = function
def start(self):
try:
while True:
conn, _ = self.socket.accept()
[msg_type, method, args, kwargs] = msg_recv(conn)
assert msg_type == 'rpc'
if method not in self.funs:
msg_send(conn, ['error', 'UnknownRPCError'])
else:
try:
ret = self.funs[method](*args, **kwargs)
msg_send(conn, ['return', ret])
except Exception as e:
try:
msg_send(conn, ['error', str(e)])
except:
pass
finally:
try:
if os.path.exists(self.path):
os.remove(self.path)
except: # pylint: disable=bare-except
pass
class Client(object):
def __init__(self, path):
self.__path = path
self.__socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.__socket.connect(self.__path)
def __getattr__(self, method):
return RPC(self.__socket, method)
def fileno(self):
return self.__socket.fileno()
def read(self):
return self.__socket.recv(1024)
|
|
431292036e591e037ce83ce84173d17e86ff80d1
|
lib/carbon/tests/benchmark_aggregator.py
|
lib/carbon/tests/benchmark_aggregator.py
|
import timeit
import time
from carbon.aggregator.processor import AggregationProcessor, RuleManager, settings
from carbon.aggregator.buffers import BufferManager
from carbon.tests.util import print_stats
from carbon.conf import settings
from carbon import state
METRIC = 'prod.applications.foo.1.requests'
METRIC_AGGR = 'prod.applications.foo.all.requests'
FREQUENCY = 1000
def bench_aggregator_noop():
RuleManager.clear()
_bench_aggregator("noop")
def bench_aggregator_sum():
RuleManager.clear()
RuleManager.rules = [
RuleManager.parse_definition(
('<env>.applications.<app>.all.requests (%d) =' % FREQUENCY) +
'sum <env>.applications.<app>.*.requests'),
]
_bench_aggregator("sum")
def bench_aggregator_fake():
RuleManager.clear()
RuleManager.rules = [
RuleManager.parse_definition('foo (60) = sum bar'),
]
_bench_aggregator("fake")
def _bench_aggregator(name):
print "== %s ==" % name
max_intervals = settings['MAX_AGGREGATION_INTERVALS']
now = time.time() - (max_intervals * FREQUENCY)
buf = None
for n in [1, 1000, 10000, 100000, 1000000, 10000000]:
count = 0
processor = AggregationProcessor()
processor.process(METRIC, (now, 1))
def _process():
processor.process(METRIC, (now + _process.i, 1))
if (_process.i % FREQUENCY) == 0 and buf is not None:
buf.compute_values()
_process.i += 1
_process.i = 0
if buf is None:
buf = BufferManager.get_buffer(METRIC_AGGR, 1, None)
t = timeit.timeit(_process, number=n)
buf.close()
print_stats(n, t)
print ""
def main():
settings.LOG_AGGREGATOR_MISSES = False
class _Fake(object):
def metricGenerated(self, metric, datapoint):
pass
def increment(self, metric):
pass
state.events = _Fake()
state.instrumentation = _Fake()
_bench_aggregator("warmup")
bench_aggregator_noop()
bench_aggregator_sum()
bench_aggregator_fake()
if __name__ == '__main__':
main()
|
Add a benchmark for the aggregator
|
Add a benchmark for the aggregator
|
Python
|
apache-2.0
|
graphite-project/carbon,deniszh/carbon,deniszh/carbon,obfuscurity/carbon,obfuscurity/carbon,criteo-forks/carbon,criteo-forks/carbon,piotr1212/carbon,graphite-project/carbon,piotr1212/carbon
|
Add a benchmark for the aggregator
|
import timeit
import time
from carbon.aggregator.processor import AggregationProcessor, RuleManager, settings
from carbon.aggregator.buffers import BufferManager
from carbon.tests.util import print_stats
from carbon.conf import settings
from carbon import state
METRIC = 'prod.applications.foo.1.requests'
METRIC_AGGR = 'prod.applications.foo.all.requests'
FREQUENCY = 1000
def bench_aggregator_noop():
RuleManager.clear()
_bench_aggregator("noop")
def bench_aggregator_sum():
RuleManager.clear()
RuleManager.rules = [
RuleManager.parse_definition(
('<env>.applications.<app>.all.requests (%d) =' % FREQUENCY) +
'sum <env>.applications.<app>.*.requests'),
]
_bench_aggregator("sum")
def bench_aggregator_fake():
RuleManager.clear()
RuleManager.rules = [
RuleManager.parse_definition('foo (60) = sum bar'),
]
_bench_aggregator("fake")
def _bench_aggregator(name):
print "== %s ==" % name
max_intervals = settings['MAX_AGGREGATION_INTERVALS']
now = time.time() - (max_intervals * FREQUENCY)
buf = None
for n in [1, 1000, 10000, 100000, 1000000, 10000000]:
count = 0
processor = AggregationProcessor()
processor.process(METRIC, (now, 1))
def _process():
processor.process(METRIC, (now + _process.i, 1))
if (_process.i % FREQUENCY) == 0 and buf is not None:
buf.compute_values()
_process.i += 1
_process.i = 0
if buf is None:
buf = BufferManager.get_buffer(METRIC_AGGR, 1, None)
t = timeit.timeit(_process, number=n)
buf.close()
print_stats(n, t)
print ""
def main():
settings.LOG_AGGREGATOR_MISSES = False
class _Fake(object):
def metricGenerated(self, metric, datapoint):
pass
def increment(self, metric):
pass
state.events = _Fake()
state.instrumentation = _Fake()
_bench_aggregator("warmup")
bench_aggregator_noop()
bench_aggregator_sum()
bench_aggregator_fake()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a benchmark for the aggregator<commit_after>
|
import timeit
import time
from carbon.aggregator.processor import AggregationProcessor, RuleManager, settings
from carbon.aggregator.buffers import BufferManager
from carbon.tests.util import print_stats
from carbon.conf import settings
from carbon import state
METRIC = 'prod.applications.foo.1.requests'
METRIC_AGGR = 'prod.applications.foo.all.requests'
FREQUENCY = 1000
def bench_aggregator_noop():
RuleManager.clear()
_bench_aggregator("noop")
def bench_aggregator_sum():
RuleManager.clear()
RuleManager.rules = [
RuleManager.parse_definition(
('<env>.applications.<app>.all.requests (%d) =' % FREQUENCY) +
'sum <env>.applications.<app>.*.requests'),
]
_bench_aggregator("sum")
def bench_aggregator_fake():
RuleManager.clear()
RuleManager.rules = [
RuleManager.parse_definition('foo (60) = sum bar'),
]
_bench_aggregator("fake")
def _bench_aggregator(name):
print "== %s ==" % name
max_intervals = settings['MAX_AGGREGATION_INTERVALS']
now = time.time() - (max_intervals * FREQUENCY)
buf = None
for n in [1, 1000, 10000, 100000, 1000000, 10000000]:
count = 0
processor = AggregationProcessor()
processor.process(METRIC, (now, 1))
def _process():
processor.process(METRIC, (now + _process.i, 1))
if (_process.i % FREQUENCY) == 0 and buf is not None:
buf.compute_values()
_process.i += 1
_process.i = 0
if buf is None:
buf = BufferManager.get_buffer(METRIC_AGGR, 1, None)
t = timeit.timeit(_process, number=n)
buf.close()
print_stats(n, t)
print ""
def main():
settings.LOG_AGGREGATOR_MISSES = False
class _Fake(object):
def metricGenerated(self, metric, datapoint):
pass
def increment(self, metric):
pass
state.events = _Fake()
state.instrumentation = _Fake()
_bench_aggregator("warmup")
bench_aggregator_noop()
bench_aggregator_sum()
bench_aggregator_fake()
if __name__ == '__main__':
main()
|
Add a benchmark for the aggregatorimport timeit
import time
from carbon.aggregator.processor import AggregationProcessor, RuleManager, settings
from carbon.aggregator.buffers import BufferManager
from carbon.tests.util import print_stats
from carbon.conf import settings
from carbon import state
METRIC = 'prod.applications.foo.1.requests'
METRIC_AGGR = 'prod.applications.foo.all.requests'
FREQUENCY = 1000
def bench_aggregator_noop():
RuleManager.clear()
_bench_aggregator("noop")
def bench_aggregator_sum():
RuleManager.clear()
RuleManager.rules = [
RuleManager.parse_definition(
('<env>.applications.<app>.all.requests (%d) =' % FREQUENCY) +
'sum <env>.applications.<app>.*.requests'),
]
_bench_aggregator("sum")
def bench_aggregator_fake():
RuleManager.clear()
RuleManager.rules = [
RuleManager.parse_definition('foo (60) = sum bar'),
]
_bench_aggregator("fake")
def _bench_aggregator(name):
print "== %s ==" % name
max_intervals = settings['MAX_AGGREGATION_INTERVALS']
now = time.time() - (max_intervals * FREQUENCY)
buf = None
for n in [1, 1000, 10000, 100000, 1000000, 10000000]:
count = 0
processor = AggregationProcessor()
processor.process(METRIC, (now, 1))
def _process():
processor.process(METRIC, (now + _process.i, 1))
if (_process.i % FREQUENCY) == 0 and buf is not None:
buf.compute_values()
_process.i += 1
_process.i = 0
if buf is None:
buf = BufferManager.get_buffer(METRIC_AGGR, 1, None)
t = timeit.timeit(_process, number=n)
buf.close()
print_stats(n, t)
print ""
def main():
settings.LOG_AGGREGATOR_MISSES = False
class _Fake(object):
def metricGenerated(self, metric, datapoint):
pass
def increment(self, metric):
pass
state.events = _Fake()
state.instrumentation = _Fake()
_bench_aggregator("warmup")
bench_aggregator_noop()
bench_aggregator_sum()
bench_aggregator_fake()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a benchmark for the aggregator<commit_after>import timeit
import time
from carbon.aggregator.processor import AggregationProcessor, RuleManager, settings
from carbon.aggregator.buffers import BufferManager
from carbon.tests.util import print_stats
from carbon.conf import settings
from carbon import state
METRIC = 'prod.applications.foo.1.requests'
METRIC_AGGR = 'prod.applications.foo.all.requests'
FREQUENCY = 1000
def bench_aggregator_noop():
RuleManager.clear()
_bench_aggregator("noop")
def bench_aggregator_sum():
RuleManager.clear()
RuleManager.rules = [
RuleManager.parse_definition(
('<env>.applications.<app>.all.requests (%d) =' % FREQUENCY) +
'sum <env>.applications.<app>.*.requests'),
]
_bench_aggregator("sum")
def bench_aggregator_fake():
RuleManager.clear()
RuleManager.rules = [
RuleManager.parse_definition('foo (60) = sum bar'),
]
_bench_aggregator("fake")
def _bench_aggregator(name):
print "== %s ==" % name
max_intervals = settings['MAX_AGGREGATION_INTERVALS']
now = time.time() - (max_intervals * FREQUENCY)
buf = None
for n in [1, 1000, 10000, 100000, 1000000, 10000000]:
count = 0
processor = AggregationProcessor()
processor.process(METRIC, (now, 1))
def _process():
processor.process(METRIC, (now + _process.i, 1))
if (_process.i % FREQUENCY) == 0 and buf is not None:
buf.compute_values()
_process.i += 1
_process.i = 0
if buf is None:
buf = BufferManager.get_buffer(METRIC_AGGR, 1, None)
t = timeit.timeit(_process, number=n)
buf.close()
print_stats(n, t)
print ""
def main():
settings.LOG_AGGREGATOR_MISSES = False
class _Fake(object):
def metricGenerated(self, metric, datapoint):
pass
def increment(self, metric):
pass
state.events = _Fake()
state.instrumentation = _Fake()
_bench_aggregator("warmup")
bench_aggregator_noop()
bench_aggregator_sum()
bench_aggregator_fake()
if __name__ == '__main__':
main()
|
|
cfee86e7e9e1e29cbdbf6d888edff1f02d733808
|
31-coin-sums.py
|
31-coin-sums.py
|
def coin_change(left, coins, count):
if left == 0:
return count
if not coins:
return 0
coin, *coins_left = coins
return sum(coin_change(left-coin*i, coins_left, count+1)
for i
in range(0, left//coin))
if __name__ == '__main__':
coins = [200, 100, 50, 20, 10, 5, 2, 1]
total = 200
ans = coin_change(total, coins, 0)
|
Work on 31 coin sums
|
Work on 31 coin sums
|
Python
|
mit
|
dawran6/project-euler
|
Work on 31 coin sums
|
def coin_change(left, coins, count):
if left == 0:
return count
if not coins:
return 0
coin, *coins_left = coins
return sum(coin_change(left-coin*i, coins_left, count+1)
for i
in range(0, left//coin))
if __name__ == '__main__':
coins = [200, 100, 50, 20, 10, 5, 2, 1]
total = 200
ans = coin_change(total, coins, 0)
|
<commit_before><commit_msg>Work on 31 coin sums<commit_after>
|
def coin_change(left, coins, count):
if left == 0:
return count
if not coins:
return 0
coin, *coins_left = coins
return sum(coin_change(left-coin*i, coins_left, count+1)
for i
in range(0, left//coin))
if __name__ == '__main__':
coins = [200, 100, 50, 20, 10, 5, 2, 1]
total = 200
ans = coin_change(total, coins, 0)
|
Work on 31 coin sumsdef coin_change(left, coins, count):
if left == 0:
return count
if not coins:
return 0
coin, *coins_left = coins
return sum(coin_change(left-coin*i, coins_left, count+1)
for i
in range(0, left//coin))
if __name__ == '__main__':
coins = [200, 100, 50, 20, 10, 5, 2, 1]
total = 200
ans = coin_change(total, coins, 0)
|
<commit_before><commit_msg>Work on 31 coin sums<commit_after>def coin_change(left, coins, count):
if left == 0:
return count
if not coins:
return 0
coin, *coins_left = coins
return sum(coin_change(left-coin*i, coins_left, count+1)
for i
in range(0, left//coin))
if __name__ == '__main__':
coins = [200, 100, 50, 20, 10, 5, 2, 1]
total = 200
ans = coin_change(total, coins, 0)
|
|
660aecd99935ef3073d8ff166c5bd131b2d95b22
|
tests/unit/test_inotify.py
|
tests/unit/test_inotify.py
|
#!/usr/bin/env python
import pytest
from butter.inotify import watch
from subprocess import Popen
from tempfile import TemporaryDirectory
from time import sleep
import os
def test_watch():
FILENAME = 'inotify_test'
with TemporaryDirectory() as tmp_dir:
filename = os.path.join(tmp_dir, FILENAME)
proc = Popen('sleep 0.04 ; touch {}'.format(filename), shell=True)
event = watch(tmp_dir)
proc.wait()
|
Add tests for watch function
|
Add tests for watch function
|
Python
|
bsd-3-clause
|
wdv4758h/butter,dasSOZO/python-butter
|
Add tests for watch function
|
#!/usr/bin/env python
import pytest
from butter.inotify import watch
from subprocess import Popen
from tempfile import TemporaryDirectory
from time import sleep
import os
def test_watch():
FILENAME = 'inotify_test'
with TemporaryDirectory() as tmp_dir:
filename = os.path.join(tmp_dir, FILENAME)
proc = Popen('sleep 0.04 ; touch {}'.format(filename), shell=True)
event = watch(tmp_dir)
proc.wait()
|
<commit_before><commit_msg>Add tests for watch function<commit_after>
|
#!/usr/bin/env python
import pytest
from butter.inotify import watch
from subprocess import Popen
from tempfile import TemporaryDirectory
from time import sleep
import os
def test_watch():
FILENAME = 'inotify_test'
with TemporaryDirectory() as tmp_dir:
filename = os.path.join(tmp_dir, FILENAME)
proc = Popen('sleep 0.04 ; touch {}'.format(filename), shell=True)
event = watch(tmp_dir)
proc.wait()
|
Add tests for watch function#!/usr/bin/env python
import pytest
from butter.inotify import watch
from subprocess import Popen
from tempfile import TemporaryDirectory
from time import sleep
import os
def test_watch():
FILENAME = 'inotify_test'
with TemporaryDirectory() as tmp_dir:
filename = os.path.join(tmp_dir, FILENAME)
proc = Popen('sleep 0.04 ; touch {}'.format(filename), shell=True)
event = watch(tmp_dir)
proc.wait()
|
<commit_before><commit_msg>Add tests for watch function<commit_after>#!/usr/bin/env python
import pytest
from butter.inotify import watch
from subprocess import Popen
from tempfile import TemporaryDirectory
from time import sleep
import os
def test_watch():
FILENAME = 'inotify_test'
with TemporaryDirectory() as tmp_dir:
filename = os.path.join(tmp_dir, FILENAME)
proc = Popen('sleep 0.04 ; touch {}'.format(filename), shell=True)
event = watch(tmp_dir)
proc.wait()
|
|
4e5ce8c0d0c998d8e9734bec584c93e0bc2fd065
|
migrations/versions/790_add_g9_lots.py
|
migrations/versions/790_add_g9_lots.py
|
"""add entries to lots table for G-Cloud 9
Revision ID: 790
Revises: 780
Create Date: 2017-01-27 13:33:33.333333
"""
# revision identifiers, used by Alembic.
revision = '790'
down_revision = '780'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
def upgrade():
# Insert G-Cloud 9 lot records
lot_table = table(
'lots',
column('name', sa.String),
column('slug', sa.String),
column('one_service_limit', sa.Boolean),
column('data', sa.JSON)
)
op.bulk_insert(lot_table, [
{
'name': 'Cloud hosting', 'slug': 'cloud-hosting', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
{
'name': 'Cloud software', 'slug': 'cloud-software', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
{
'name': 'Cloud support', 'slug': 'cloud-support', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
])
def downgrade():
op.execute("""
DELETE from lots WHERE slug in ('cloud-hosting', 'cloud-software', 'cloud-support');
""")
|
Add new lots for G-Cloud 9
|
Add new lots for G-Cloud 9
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Add new lots for G-Cloud 9
|
"""add entries to lots table for G-Cloud 9
Revision ID: 790
Revises: 780
Create Date: 2017-01-27 13:33:33.333333
"""
# revision identifiers, used by Alembic.
revision = '790'
down_revision = '780'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
def upgrade():
# Insert G-Cloud 9 lot records
lot_table = table(
'lots',
column('name', sa.String),
column('slug', sa.String),
column('one_service_limit', sa.Boolean),
column('data', sa.JSON)
)
op.bulk_insert(lot_table, [
{
'name': 'Cloud hosting', 'slug': 'cloud-hosting', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
{
'name': 'Cloud software', 'slug': 'cloud-software', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
{
'name': 'Cloud support', 'slug': 'cloud-support', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
])
def downgrade():
op.execute("""
DELETE from lots WHERE slug in ('cloud-hosting', 'cloud-software', 'cloud-support');
""")
|
<commit_before><commit_msg>Add new lots for G-Cloud 9<commit_after>
|
"""add entries to lots table for G-Cloud 9
Revision ID: 790
Revises: 780
Create Date: 2017-01-27 13:33:33.333333
"""
# revision identifiers, used by Alembic.
revision = '790'
down_revision = '780'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
def upgrade():
# Insert G-Cloud 9 lot records
lot_table = table(
'lots',
column('name', sa.String),
column('slug', sa.String),
column('one_service_limit', sa.Boolean),
column('data', sa.JSON)
)
op.bulk_insert(lot_table, [
{
'name': 'Cloud hosting', 'slug': 'cloud-hosting', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
{
'name': 'Cloud software', 'slug': 'cloud-software', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
{
'name': 'Cloud support', 'slug': 'cloud-support', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
])
def downgrade():
op.execute("""
DELETE from lots WHERE slug in ('cloud-hosting', 'cloud-software', 'cloud-support');
""")
|
Add new lots for G-Cloud 9"""add entries to lots table for G-Cloud 9
Revision ID: 790
Revises: 780
Create Date: 2017-01-27 13:33:33.333333
"""
# revision identifiers, used by Alembic.
revision = '790'
down_revision = '780'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
def upgrade():
# Insert G-Cloud 9 lot records
lot_table = table(
'lots',
column('name', sa.String),
column('slug', sa.String),
column('one_service_limit', sa.Boolean),
column('data', sa.JSON)
)
op.bulk_insert(lot_table, [
{
'name': 'Cloud hosting', 'slug': 'cloud-hosting', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
{
'name': 'Cloud software', 'slug': 'cloud-software', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
{
'name': 'Cloud support', 'slug': 'cloud-support', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
])
def downgrade():
op.execute("""
DELETE from lots WHERE slug in ('cloud-hosting', 'cloud-software', 'cloud-support');
""")
|
<commit_before><commit_msg>Add new lots for G-Cloud 9<commit_after>"""add entries to lots table for G-Cloud 9
Revision ID: 790
Revises: 780
Create Date: 2017-01-27 13:33:33.333333
"""
# revision identifiers, used by Alembic.
revision = '790'
down_revision = '780'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
def upgrade():
# Insert G-Cloud 9 lot records
lot_table = table(
'lots',
column('name', sa.String),
column('slug', sa.String),
column('one_service_limit', sa.Boolean),
column('data', sa.JSON)
)
op.bulk_insert(lot_table, [
{
'name': 'Cloud hosting', 'slug': 'cloud-hosting', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
{
'name': 'Cloud software', 'slug': 'cloud-software', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
{
'name': 'Cloud support', 'slug': 'cloud-support', 'one_service_limit': False,
'data': {"unitSingular": "service", "unitPlural": "services"}
},
])
def downgrade():
op.execute("""
DELETE from lots WHERE slug in ('cloud-hosting', 'cloud-software', 'cloud-support');
""")
|
|
7086dcce04799d744bb594cbf6ccbe8b7b09767d
|
Glyphs/DecRO.py
|
Glyphs/DecRO.py
|
# MenuTitle: Copy to Background, Decompose, Remove Overlaps, Correct Path Direction
for layer in Glyphs.font.selectedLayers:
g = layer.parent
for l in g.layers:
l.background = l.copy()
l.decomposeComponents()
l.removeOverlap()
l.correctPathDirection()
|
Add "Glyphs/Copy to Background, Decompose, Remove Overlaps, Correct Path Direction"
|
Add "Glyphs/Copy to Background, Decompose, Remove Overlaps, Correct Path Direction"
|
Python
|
mit
|
jenskutilek/Glyphs-Scripts
|
Add "Glyphs/Copy to Background, Decompose, Remove Overlaps, Correct Path Direction"
|
# MenuTitle: Copy to Background, Decompose, Remove Overlaps, Correct Path Direction
for layer in Glyphs.font.selectedLayers:
g = layer.parent
for l in g.layers:
l.background = l.copy()
l.decomposeComponents()
l.removeOverlap()
l.correctPathDirection()
|
<commit_before><commit_msg>Add "Glyphs/Copy to Background, Decompose, Remove Overlaps, Correct Path Direction"<commit_after>
|
# MenuTitle: Copy to Background, Decompose, Remove Overlaps, Correct Path Direction
for layer in Glyphs.font.selectedLayers:
g = layer.parent
for l in g.layers:
l.background = l.copy()
l.decomposeComponents()
l.removeOverlap()
l.correctPathDirection()
|
Add "Glyphs/Copy to Background, Decompose, Remove Overlaps, Correct Path Direction"# MenuTitle: Copy to Background, Decompose, Remove Overlaps, Correct Path Direction
for layer in Glyphs.font.selectedLayers:
g = layer.parent
for l in g.layers:
l.background = l.copy()
l.decomposeComponents()
l.removeOverlap()
l.correctPathDirection()
|
<commit_before><commit_msg>Add "Glyphs/Copy to Background, Decompose, Remove Overlaps, Correct Path Direction"<commit_after># MenuTitle: Copy to Background, Decompose, Remove Overlaps, Correct Path Direction
for layer in Glyphs.font.selectedLayers:
g = layer.parent
for l in g.layers:
l.background = l.copy()
l.decomposeComponents()
l.removeOverlap()
l.correctPathDirection()
|
|
8475c80b48d02fad440acd4324b2bfba2f8e1f67
|
migrations/versions/940_more_supplier_details.py
|
migrations/versions/940_more_supplier_details.py
|
""" Extend suppliers table with new fields (to be initially populated from declaration data)
Revision ID: 940
Revises: 930
Create Date: 2017-08-16 16:39:00.000000
"""
# revision identifiers, used by Alembic.
revision = '940'
down_revision = '930'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(u'suppliers', sa.Column('registered_name', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_country', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('other_company_registration_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_date', sa.DateTime(), nullable=True))
op.add_column(u'suppliers', sa.Column('vat_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('organisation_size', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('trading_status', sa.String(), nullable=True))
def downgrade():
op.drop_column(u'suppliers', 'registered_name')
op.drop_column(u'suppliers', 'registration_country')
op.drop_column(u'suppliers', 'other_company_registration_number')
op.drop_column(u'suppliers', 'registration_date')
op.drop_column(u'suppliers', 'vat_number')
op.drop_column(u'suppliers', 'organisation_size')
op.drop_column(u'suppliers', 'trading_status')
|
Add columns to suppliers table
|
Add columns to suppliers table
The following data is moving from framework-level supplier declarations to the
supplier account level:
* Organisation size
* VAT number
* Non-companies-house registration number (for non-UK companies)
* Registered organisation name (as opposed to marketplace supplier account name)
* Registration date
* Trading status
* Registration country
This adds the relevant columns to the supplier table. All are nullable string fields, except
registration date which is a string in the declaration data we have, but we are moving to
a proper DateTime field at the supplier account level.
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Add columns to suppliers table
The following data is moving from framework-level supplier declarations to the
supplier account level:
* Organisation size
* VAT number
* Non-companies-house registration number (for non-UK companies)
* Registered organisation name (as opposed to marketplace supplier account name)
* Registration date
* Trading status
* Registration country
This adds the relevant columns to the supplier table. All are nullable string fields, except
registration date which is a string in the declaration data we have, but we are moving to
a proper DateTime field at the supplier account level.
|
""" Extend suppliers table with new fields (to be initially populated from declaration data)
Revision ID: 940
Revises: 930
Create Date: 2017-08-16 16:39:00.000000
"""
# revision identifiers, used by Alembic.
revision = '940'
down_revision = '930'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(u'suppliers', sa.Column('registered_name', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_country', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('other_company_registration_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_date', sa.DateTime(), nullable=True))
op.add_column(u'suppliers', sa.Column('vat_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('organisation_size', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('trading_status', sa.String(), nullable=True))
def downgrade():
op.drop_column(u'suppliers', 'registered_name')
op.drop_column(u'suppliers', 'registration_country')
op.drop_column(u'suppliers', 'other_company_registration_number')
op.drop_column(u'suppliers', 'registration_date')
op.drop_column(u'suppliers', 'vat_number')
op.drop_column(u'suppliers', 'organisation_size')
op.drop_column(u'suppliers', 'trading_status')
|
<commit_before><commit_msg>Add columns to suppliers table
The following data is moving from framework-level supplier declarations to the
supplier account level:
* Organisation size
* VAT number
* Non-companies-house registration number (for non-UK companies)
* Registered organisation name (as opposed to marketplace supplier account name)
* Registration date
* Trading status
* Registration country
This adds the relevant columns to the supplier table. All are nullable string fields, except
registration date which is a string in the declaration data we have, but we are moving to
a proper DateTime field at the supplier account level.<commit_after>
|
""" Extend suppliers table with new fields (to be initially populated from declaration data)
Revision ID: 940
Revises: 930
Create Date: 2017-08-16 16:39:00.000000
"""
# revision identifiers, used by Alembic.
revision = '940'
down_revision = '930'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(u'suppliers', sa.Column('registered_name', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_country', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('other_company_registration_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_date', sa.DateTime(), nullable=True))
op.add_column(u'suppliers', sa.Column('vat_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('organisation_size', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('trading_status', sa.String(), nullable=True))
def downgrade():
op.drop_column(u'suppliers', 'registered_name')
op.drop_column(u'suppliers', 'registration_country')
op.drop_column(u'suppliers', 'other_company_registration_number')
op.drop_column(u'suppliers', 'registration_date')
op.drop_column(u'suppliers', 'vat_number')
op.drop_column(u'suppliers', 'organisation_size')
op.drop_column(u'suppliers', 'trading_status')
|
Add columns to suppliers table
The following data is moving from framework-level supplier declarations to the
supplier account level:
* Organisation size
* VAT number
* Non-companies-house registration number (for non-UK companies)
* Registered organisation name (as opposed to marketplace supplier account name)
* Registration date
* Trading status
* Registration country
This adds the relevant columns to the supplier table. All are nullable string fields, except
registration date which is a string in the declaration data we have, but we are moving to
a proper DateTime field at the supplier account level.""" Extend suppliers table with new fields (to be initially populated from declaration data)
Revision ID: 940
Revises: 930
Create Date: 2017-08-16 16:39:00.000000
"""
# revision identifiers, used by Alembic.
revision = '940'
down_revision = '930'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(u'suppliers', sa.Column('registered_name', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_country', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('other_company_registration_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_date', sa.DateTime(), nullable=True))
op.add_column(u'suppliers', sa.Column('vat_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('organisation_size', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('trading_status', sa.String(), nullable=True))
def downgrade():
op.drop_column(u'suppliers', 'registered_name')
op.drop_column(u'suppliers', 'registration_country')
op.drop_column(u'suppliers', 'other_company_registration_number')
op.drop_column(u'suppliers', 'registration_date')
op.drop_column(u'suppliers', 'vat_number')
op.drop_column(u'suppliers', 'organisation_size')
op.drop_column(u'suppliers', 'trading_status')
|
<commit_before><commit_msg>Add columns to suppliers table
The following data is moving from framework-level supplier declarations to the
supplier account level:
* Organisation size
* VAT number
* Non-companies-house registration number (for non-UK companies)
* Registered organisation name (as opposed to marketplace supplier account name)
* Registration date
* Trading status
* Registration country
This adds the relevant columns to the supplier table. All are nullable string fields, except
registration date which is a string in the declaration data we have, but we are moving to
a proper DateTime field at the supplier account level.<commit_after>""" Extend suppliers table with new fields (to be initially populated from declaration data)
Revision ID: 940
Revises: 930
Create Date: 2017-08-16 16:39:00.000000
"""
# revision identifiers, used by Alembic.
revision = '940'
down_revision = '930'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(u'suppliers', sa.Column('registered_name', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_country', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('other_company_registration_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_date', sa.DateTime(), nullable=True))
op.add_column(u'suppliers', sa.Column('vat_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('organisation_size', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('trading_status', sa.String(), nullable=True))
def downgrade():
op.drop_column(u'suppliers', 'registered_name')
op.drop_column(u'suppliers', 'registration_country')
op.drop_column(u'suppliers', 'other_company_registration_number')
op.drop_column(u'suppliers', 'registration_date')
op.drop_column(u'suppliers', 'vat_number')
op.drop_column(u'suppliers', 'organisation_size')
op.drop_column(u'suppliers', 'trading_status')
|
|
fe9da2481ab777012fbfd94bd9bacef9fc8151f4
|
psshlib/hosts.py
|
psshlib/hosts.py
|
import sys
import psshutil
import random
class ServerPool(list):
def __init__(self, options):
self.options = options
try:
hosts = psshutil.read_host_files(options.host_files, default_user=options.user)
except IOError:
_, e, _ = sys.exc_info()
sys.stderr.write('Could not open hosts file: %s\n' % e.strerror)
sys.exit(1)
if options.host_strings:
for s in options.host_strings:
hosts.extend(psshutil.parse_host_string(s, default_user=options.user))
sample_size = options.sample_size
if sample_size:
if sample_size <= 0:
sys.stderr.write('Sample size cannot be negative')
sys.exit(1)
elif sample_size > len(hosts):
sys.stderr.write('Sample size larger than population')
sys.exit(1)
hosts = random.sample(hosts, sample_size)
super(ServerPool, self).__init__(hosts)
|
import sys
import psshutil
import random
class ServerPool(list):
def __init__(self, options):
self.options = options
try:
hosts = psshutil.read_host_files(options.host_files, default_user=options.user)
except IOError:
_, e, _ = sys.exc_info()
sys.stderr.write('Could not open hosts file: %s\n' % e.strerror)
sys.exit(1)
if options.host_strings:
for s in options.host_strings:
hosts.extend(psshutil.parse_host_string(s, default_user=options.user))
sample_size = options.sample_size
if sample_size:
if sample_size <= 0:
sys.stderr.write('Sample size cannot be negative\n')
sys.exit(1)
elif sample_size > len(hosts):
sys.stderr.write('Sample size larger than population\n')
sys.exit(1)
hosts = random.sample(hosts, sample_size)
super(ServerPool, self).__init__(hosts)
|
Fix --sample-size error messages lacking newlines
|
Fix --sample-size error messages lacking newlines
|
Python
|
bsd-3-clause
|
jcmcken/parallel-ssh,jorik041/parallel-ssh,gyf19/parallel-ssh
|
import sys
import psshutil
import random
class ServerPool(list):
def __init__(self, options):
self.options = options
try:
hosts = psshutil.read_host_files(options.host_files, default_user=options.user)
except IOError:
_, e, _ = sys.exc_info()
sys.stderr.write('Could not open hosts file: %s\n' % e.strerror)
sys.exit(1)
if options.host_strings:
for s in options.host_strings:
hosts.extend(psshutil.parse_host_string(s, default_user=options.user))
sample_size = options.sample_size
if sample_size:
if sample_size <= 0:
sys.stderr.write('Sample size cannot be negative')
sys.exit(1)
elif sample_size > len(hosts):
sys.stderr.write('Sample size larger than population')
sys.exit(1)
hosts = random.sample(hosts, sample_size)
super(ServerPool, self).__init__(hosts)
Fix --sample-size error messages lacking newlines
|
import sys
import psshutil
import random
class ServerPool(list):
def __init__(self, options):
self.options = options
try:
hosts = psshutil.read_host_files(options.host_files, default_user=options.user)
except IOError:
_, e, _ = sys.exc_info()
sys.stderr.write('Could not open hosts file: %s\n' % e.strerror)
sys.exit(1)
if options.host_strings:
for s in options.host_strings:
hosts.extend(psshutil.parse_host_string(s, default_user=options.user))
sample_size = options.sample_size
if sample_size:
if sample_size <= 0:
sys.stderr.write('Sample size cannot be negative\n')
sys.exit(1)
elif sample_size > len(hosts):
sys.stderr.write('Sample size larger than population\n')
sys.exit(1)
hosts = random.sample(hosts, sample_size)
super(ServerPool, self).__init__(hosts)
|
<commit_before>import sys
import psshutil
import random
class ServerPool(list):
def __init__(self, options):
self.options = options
try:
hosts = psshutil.read_host_files(options.host_files, default_user=options.user)
except IOError:
_, e, _ = sys.exc_info()
sys.stderr.write('Could not open hosts file: %s\n' % e.strerror)
sys.exit(1)
if options.host_strings:
for s in options.host_strings:
hosts.extend(psshutil.parse_host_string(s, default_user=options.user))
sample_size = options.sample_size
if sample_size:
if sample_size <= 0:
sys.stderr.write('Sample size cannot be negative')
sys.exit(1)
elif sample_size > len(hosts):
sys.stderr.write('Sample size larger than population')
sys.exit(1)
hosts = random.sample(hosts, sample_size)
super(ServerPool, self).__init__(hosts)
<commit_msg>Fix --sample-size error messages lacking newlines<commit_after>
|
import sys
import psshutil
import random
class ServerPool(list):
def __init__(self, options):
self.options = options
try:
hosts = psshutil.read_host_files(options.host_files, default_user=options.user)
except IOError:
_, e, _ = sys.exc_info()
sys.stderr.write('Could not open hosts file: %s\n' % e.strerror)
sys.exit(1)
if options.host_strings:
for s in options.host_strings:
hosts.extend(psshutil.parse_host_string(s, default_user=options.user))
sample_size = options.sample_size
if sample_size:
if sample_size <= 0:
sys.stderr.write('Sample size cannot be negative\n')
sys.exit(1)
elif sample_size > len(hosts):
sys.stderr.write('Sample size larger than population\n')
sys.exit(1)
hosts = random.sample(hosts, sample_size)
super(ServerPool, self).__init__(hosts)
|
import sys
import psshutil
import random
class ServerPool(list):
def __init__(self, options):
self.options = options
try:
hosts = psshutil.read_host_files(options.host_files, default_user=options.user)
except IOError:
_, e, _ = sys.exc_info()
sys.stderr.write('Could not open hosts file: %s\n' % e.strerror)
sys.exit(1)
if options.host_strings:
for s in options.host_strings:
hosts.extend(psshutil.parse_host_string(s, default_user=options.user))
sample_size = options.sample_size
if sample_size:
if sample_size <= 0:
sys.stderr.write('Sample size cannot be negative')
sys.exit(1)
elif sample_size > len(hosts):
sys.stderr.write('Sample size larger than population')
sys.exit(1)
hosts = random.sample(hosts, sample_size)
super(ServerPool, self).__init__(hosts)
Fix --sample-size error messages lacking newlinesimport sys
import psshutil
import random
class ServerPool(list):
def __init__(self, options):
self.options = options
try:
hosts = psshutil.read_host_files(options.host_files, default_user=options.user)
except IOError:
_, e, _ = sys.exc_info()
sys.stderr.write('Could not open hosts file: %s\n' % e.strerror)
sys.exit(1)
if options.host_strings:
for s in options.host_strings:
hosts.extend(psshutil.parse_host_string(s, default_user=options.user))
sample_size = options.sample_size
if sample_size:
if sample_size <= 0:
sys.stderr.write('Sample size cannot be negative\n')
sys.exit(1)
elif sample_size > len(hosts):
sys.stderr.write('Sample size larger than population\n')
sys.exit(1)
hosts = random.sample(hosts, sample_size)
super(ServerPool, self).__init__(hosts)
|
<commit_before>import sys
import psshutil
import random
class ServerPool(list):
def __init__(self, options):
self.options = options
try:
hosts = psshutil.read_host_files(options.host_files, default_user=options.user)
except IOError:
_, e, _ = sys.exc_info()
sys.stderr.write('Could not open hosts file: %s\n' % e.strerror)
sys.exit(1)
if options.host_strings:
for s in options.host_strings:
hosts.extend(psshutil.parse_host_string(s, default_user=options.user))
sample_size = options.sample_size
if sample_size:
if sample_size <= 0:
sys.stderr.write('Sample size cannot be negative')
sys.exit(1)
elif sample_size > len(hosts):
sys.stderr.write('Sample size larger than population')
sys.exit(1)
hosts = random.sample(hosts, sample_size)
super(ServerPool, self).__init__(hosts)
<commit_msg>Fix --sample-size error messages lacking newlines<commit_after>import sys
import psshutil
import random
class ServerPool(list):
def __init__(self, options):
self.options = options
try:
hosts = psshutil.read_host_files(options.host_files, default_user=options.user)
except IOError:
_, e, _ = sys.exc_info()
sys.stderr.write('Could not open hosts file: %s\n' % e.strerror)
sys.exit(1)
if options.host_strings:
for s in options.host_strings:
hosts.extend(psshutil.parse_host_string(s, default_user=options.user))
sample_size = options.sample_size
if sample_size:
if sample_size <= 0:
sys.stderr.write('Sample size cannot be negative\n')
sys.exit(1)
elif sample_size > len(hosts):
sys.stderr.write('Sample size larger than population\n')
sys.exit(1)
hosts = random.sample(hosts, sample_size)
super(ServerPool, self).__init__(hosts)
|
8c37c1ae7f42d22a186de4751a56209ebdd77ec4
|
dr27demo/dr27app/test_settings.py
|
dr27demo/dr27app/test_settings.py
|
from .base_settings import *
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'KEY_PREFIX': 'driver27-cache-app'
}
}
|
Add settings for django tests.
|
Add settings for django tests.
|
Python
|
mit
|
SRJ9/django-driver27,SRJ9/django-driver27,SRJ9/django-driver27
|
Add settings for django tests.
|
from .base_settings import *
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'KEY_PREFIX': 'driver27-cache-app'
}
}
|
<commit_before><commit_msg>Add settings for django tests.<commit_after>
|
from .base_settings import *
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'KEY_PREFIX': 'driver27-cache-app'
}
}
|
Add settings for django tests.from .base_settings import *
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'KEY_PREFIX': 'driver27-cache-app'
}
}
|
<commit_before><commit_msg>Add settings for django tests.<commit_after>from .base_settings import *
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'KEY_PREFIX': 'driver27-cache-app'
}
}
|
|
3e25a1b44743bbf7d0bf1205cebe5bfb6a693fef
|
delete_whois.py
|
delete_whois.py
|
#!/usr/bin/env python
import os
import sys
import time
import getpass
import datetime
from common.appenginepatch.aecmd import setup_env
setup_env()
import ADNS
from adns import rr
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.api.datastore_errors import Timeout
from domains.models import Domain, Whois
BATCH_SIZE = 400
MAX_ATTEMPTS = 5
def auth_func():
return open('.passwd').read().split(':')
def delete_batch(objects):
if not objects:
return
for attempt in range(MAX_ATTEMPTS):
if attempt:
print "Attempt %d of %d will start in %d seconds." % (
attempt + 1, MAX_ATTEMPTS, attempt)
time.sleep(attempt)
print "Deleting %d objects (%s to %s):" % (
len(objects), objects[0].name(), objects[-1].name())
try:
db.delete(objects)
break
except Timeout:
print "*** Timeout ***"
if attempt + 1 == MAX_ATTEMPTS:
sys.exit(1)
del objects[:]
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--server', metavar='<hostname>',
default='scoretool.appspot.com',
help="connect to a different server")
(options, args) = parser.parse_args()
remote_api_stub.ConfigureRemoteDatastore(
'scoretool', '/remote_api', auth_func, options.server)
while True:
whois_keys = Whois.all(keys_only=True).fetch(BATCH_SIZE)
delete_batch(whois_keys)
if __name__ == '__main__':
main()
|
Delete obsolete Whois model from the datastore.
|
Delete obsolete Whois model from the datastore.
|
Python
|
mit
|
jcrocholl/nxdom,jcrocholl/nxdom
|
Delete obsolete Whois model from the datastore.
|
#!/usr/bin/env python
import os
import sys
import time
import getpass
import datetime
from common.appenginepatch.aecmd import setup_env
setup_env()
import ADNS
from adns import rr
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.api.datastore_errors import Timeout
from domains.models import Domain, Whois
BATCH_SIZE = 400
MAX_ATTEMPTS = 5
def auth_func():
return open('.passwd').read().split(':')
def delete_batch(objects):
if not objects:
return
for attempt in range(MAX_ATTEMPTS):
if attempt:
print "Attempt %d of %d will start in %d seconds." % (
attempt + 1, MAX_ATTEMPTS, attempt)
time.sleep(attempt)
print "Deleting %d objects (%s to %s):" % (
len(objects), objects[0].name(), objects[-1].name())
try:
db.delete(objects)
break
except Timeout:
print "*** Timeout ***"
if attempt + 1 == MAX_ATTEMPTS:
sys.exit(1)
del objects[:]
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--server', metavar='<hostname>',
default='scoretool.appspot.com',
help="connect to a different server")
(options, args) = parser.parse_args()
remote_api_stub.ConfigureRemoteDatastore(
'scoretool', '/remote_api', auth_func, options.server)
while True:
whois_keys = Whois.all(keys_only=True).fetch(BATCH_SIZE)
delete_batch(whois_keys)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Delete obsolete Whois model from the datastore.<commit_after>
|
#!/usr/bin/env python
import os
import sys
import time
import getpass
import datetime
from common.appenginepatch.aecmd import setup_env
setup_env()
import ADNS
from adns import rr
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.api.datastore_errors import Timeout
from domains.models import Domain, Whois
BATCH_SIZE = 400
MAX_ATTEMPTS = 5
def auth_func():
return open('.passwd').read().split(':')
def delete_batch(objects):
if not objects:
return
for attempt in range(MAX_ATTEMPTS):
if attempt:
print "Attempt %d of %d will start in %d seconds." % (
attempt + 1, MAX_ATTEMPTS, attempt)
time.sleep(attempt)
print "Deleting %d objects (%s to %s):" % (
len(objects), objects[0].name(), objects[-1].name())
try:
db.delete(objects)
break
except Timeout:
print "*** Timeout ***"
if attempt + 1 == MAX_ATTEMPTS:
sys.exit(1)
del objects[:]
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--server', metavar='<hostname>',
default='scoretool.appspot.com',
help="connect to a different server")
(options, args) = parser.parse_args()
remote_api_stub.ConfigureRemoteDatastore(
'scoretool', '/remote_api', auth_func, options.server)
while True:
whois_keys = Whois.all(keys_only=True).fetch(BATCH_SIZE)
delete_batch(whois_keys)
if __name__ == '__main__':
main()
|
Delete obsolete Whois model from the datastore.#!/usr/bin/env python
import os
import sys
import time
import getpass
import datetime
from common.appenginepatch.aecmd import setup_env
setup_env()
import ADNS
from adns import rr
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.api.datastore_errors import Timeout
from domains.models import Domain, Whois
BATCH_SIZE = 400
MAX_ATTEMPTS = 5
def auth_func():
return open('.passwd').read().split(':')
def delete_batch(objects):
if not objects:
return
for attempt in range(MAX_ATTEMPTS):
if attempt:
print "Attempt %d of %d will start in %d seconds." % (
attempt + 1, MAX_ATTEMPTS, attempt)
time.sleep(attempt)
print "Deleting %d objects (%s to %s):" % (
len(objects), objects[0].name(), objects[-1].name())
try:
db.delete(objects)
break
except Timeout:
print "*** Timeout ***"
if attempt + 1 == MAX_ATTEMPTS:
sys.exit(1)
del objects[:]
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--server', metavar='<hostname>',
default='scoretool.appspot.com',
help="connect to a different server")
(options, args) = parser.parse_args()
remote_api_stub.ConfigureRemoteDatastore(
'scoretool', '/remote_api', auth_func, options.server)
while True:
whois_keys = Whois.all(keys_only=True).fetch(BATCH_SIZE)
delete_batch(whois_keys)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Delete obsolete Whois model from the datastore.<commit_after>#!/usr/bin/env python
import os
import sys
import time
import getpass
import datetime
from common.appenginepatch.aecmd import setup_env
setup_env()
import ADNS
from adns import rr
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.api.datastore_errors import Timeout
from domains.models import Domain, Whois
BATCH_SIZE = 400
MAX_ATTEMPTS = 5
def auth_func():
return open('.passwd').read().split(':')
def delete_batch(objects):
if not objects:
return
for attempt in range(MAX_ATTEMPTS):
if attempt:
print "Attempt %d of %d will start in %d seconds." % (
attempt + 1, MAX_ATTEMPTS, attempt)
time.sleep(attempt)
print "Deleting %d objects (%s to %s):" % (
len(objects), objects[0].name(), objects[-1].name())
try:
db.delete(objects)
break
except Timeout:
print "*** Timeout ***"
if attempt + 1 == MAX_ATTEMPTS:
sys.exit(1)
del objects[:]
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--server', metavar='<hostname>',
default='scoretool.appspot.com',
help="connect to a different server")
(options, args) = parser.parse_args()
remote_api_stub.ConfigureRemoteDatastore(
'scoretool', '/remote_api', auth_func, options.server)
while True:
whois_keys = Whois.all(keys_only=True).fetch(BATCH_SIZE)
delete_batch(whois_keys)
if __name__ == '__main__':
main()
|
|
b977cf85a16d322cecb2bde051ad3026fa8e037e
|
setup.py
|
setup.py
|
from distutils.core import setup
setup(
name="pycomponents",
version='0.1',
description="A simple component entity system for python",
author="David Gabor Bodor",
url="http://github.com/dragonfi/pycomponents",
packages=['pycomponents'],
license="MIT",
)
|
Make package installable via pip
|
Make package installable via pip
|
Python
|
mit
|
dragonfi/pycomponents,dragonfi/pycomponents
|
Make package installable via pip
|
from distutils.core import setup
setup(
name="pycomponents",
version='0.1',
description="A simple component entity system for python",
author="David Gabor Bodor",
url="http://github.com/dragonfi/pycomponents",
packages=['pycomponents'],
license="MIT",
)
|
<commit_before><commit_msg>Make package installable via pip<commit_after>
|
from distutils.core import setup
setup(
name="pycomponents",
version='0.1',
description="A simple component entity system for python",
author="David Gabor Bodor",
url="http://github.com/dragonfi/pycomponents",
packages=['pycomponents'],
license="MIT",
)
|
Make package installable via pipfrom distutils.core import setup
setup(
name="pycomponents",
version='0.1',
description="A simple component entity system for python",
author="David Gabor Bodor",
url="http://github.com/dragonfi/pycomponents",
packages=['pycomponents'],
license="MIT",
)
|
<commit_before><commit_msg>Make package installable via pip<commit_after>from distutils.core import setup
setup(
name="pycomponents",
version='0.1',
description="A simple component entity system for python",
author="David Gabor Bodor",
url="http://github.com/dragonfi/pycomponents",
packages=['pycomponents'],
license="MIT",
)
|
|
594039c17f9db7b2cb058eaca58299281a61ef29
|
irrigator_pro/farms/formset_views.py
|
irrigator_pro/farms/formset_views.py
|
from extra_views import ModelFormSetView
from django.forms import Textarea, TextInput
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from farms.models import Field, Probe, ProbeReading, WaterHistory
from fields_filter import *
class Farms_FormsetView(ModelFormSetView):
# model = WaterHistory
# template_name = 'farms/water_history_list.html'
# fields = [ 'date', 'field_list', 'rain', 'irrigation', 'comment' ]
# widgets = {
# 'comment': Textarea(attrs={'rows':2, 'cols':20}),
# 'description': Textarea(attrs={'rows':2, 'cols':20}),
# 'date': TextInput(attrs={'width':10, 'class':'hasDatePicker'}),
# }
can_delete=True
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(Farms_FormsetView, self).dispatch(*args, **kwargs)
def get_queryset(self):
user = self.request.user
queryset = super(Farms_FormsetView, self).get_queryset()
queryset = queryset.filter( Q(field_list__farm__farmer=user) |
Q(field_list__farm__users=user)
)
return queryset.distinct()
def fields_filter(self, user):
return Field.objects.filter( Q(farm__farmer=user) |
Q(farm__users=user) ).distinct()
def construct_formset(self, *args, **kwargs):
formset = super(Farms_FormsetView, self).construct_formset(*args, **kwargs)
for form in formset:
form.fields["field_list"].queryset = self.fields_filter(self.request.user)
print form.fields["comment"]
return formset
def get_factory_kwargs(self):
kwargs = super(Farms_FormsetView, self).get_factory_kwargs()
if hasattr(self, 'widgets'):
kwargs[ 'widgets' ] = self.widgets
return kwargs
class ProbeFormsetView(Farms_FormsetView):
model = Probe
template_name = 'farms/probe_list.html'
fields = [ 'name', 'description', 'field_list', 'farm_code', 'probe_code', 'comment']
widgets = {
'comment': Textarea(attrs={'rows':2, 'cols':20}),
'description': Textarea(attrs={'rows':2, 'cols':20}),
}
class WaterHistoryFormsetView(Farms_FormsetView):
model = WaterHistory
template_name = 'farms/water_history_list.html'
fields = [ 'date', 'field_list', 'rain', 'irrigation', 'comment' ]
widgets = {
'comment': Textarea(attrs={'rows':2, 'cols':20}),
'description': Textarea(attrs={'rows':2, 'cols':20}),
'date': TextInput(attrs={'width':10, 'class':'hasDatePicker'}),
}
|
Move Probes and Water_History formset views to using a common "Farms_FormsetView" superclass.
|
Move Probes and Water_History formset views to using a common
"Farms_FormsetView" superclass.
|
Python
|
mit
|
warnes/irrigatorpro,warnes/irrigatorpro,warnes/irrigatorpro,warnes/irrigatorpro
|
Move Probes and Water_History formset views to using a common
"Farms_FormsetView" superclass.
|
from extra_views import ModelFormSetView
from django.forms import Textarea, TextInput
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from farms.models import Field, Probe, ProbeReading, WaterHistory
from fields_filter import *
class Farms_FormsetView(ModelFormSetView):
# model = WaterHistory
# template_name = 'farms/water_history_list.html'
# fields = [ 'date', 'field_list', 'rain', 'irrigation', 'comment' ]
# widgets = {
# 'comment': Textarea(attrs={'rows':2, 'cols':20}),
# 'description': Textarea(attrs={'rows':2, 'cols':20}),
# 'date': TextInput(attrs={'width':10, 'class':'hasDatePicker'}),
# }
can_delete=True
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(Farms_FormsetView, self).dispatch(*args, **kwargs)
def get_queryset(self):
user = self.request.user
queryset = super(Farms_FormsetView, self).get_queryset()
queryset = queryset.filter( Q(field_list__farm__farmer=user) |
Q(field_list__farm__users=user)
)
return queryset.distinct()
def fields_filter(self, user):
return Field.objects.filter( Q(farm__farmer=user) |
Q(farm__users=user) ).distinct()
def construct_formset(self, *args, **kwargs):
formset = super(Farms_FormsetView, self).construct_formset(*args, **kwargs)
for form in formset:
form.fields["field_list"].queryset = self.fields_filter(self.request.user)
print form.fields["comment"]
return formset
def get_factory_kwargs(self):
kwargs = super(Farms_FormsetView, self).get_factory_kwargs()
if hasattr(self, 'widgets'):
kwargs[ 'widgets' ] = self.widgets
return kwargs
class ProbeFormsetView(Farms_FormsetView):
model = Probe
template_name = 'farms/probe_list.html'
fields = [ 'name', 'description', 'field_list', 'farm_code', 'probe_code', 'comment']
widgets = {
'comment': Textarea(attrs={'rows':2, 'cols':20}),
'description': Textarea(attrs={'rows':2, 'cols':20}),
}
class WaterHistoryFormsetView(Farms_FormsetView):
model = WaterHistory
template_name = 'farms/water_history_list.html'
fields = [ 'date', 'field_list', 'rain', 'irrigation', 'comment' ]
widgets = {
'comment': Textarea(attrs={'rows':2, 'cols':20}),
'description': Textarea(attrs={'rows':2, 'cols':20}),
'date': TextInput(attrs={'width':10, 'class':'hasDatePicker'}),
}
|
<commit_before><commit_msg>Move Probes and Water_History formset views to using a common
"Farms_FormsetView" superclass.<commit_after>
|
from extra_views import ModelFormSetView
from django.forms import Textarea, TextInput
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from farms.models import Field, Probe, ProbeReading, WaterHistory
from fields_filter import *
class Farms_FormsetView(ModelFormSetView):
# model = WaterHistory
# template_name = 'farms/water_history_list.html'
# fields = [ 'date', 'field_list', 'rain', 'irrigation', 'comment' ]
# widgets = {
# 'comment': Textarea(attrs={'rows':2, 'cols':20}),
# 'description': Textarea(attrs={'rows':2, 'cols':20}),
# 'date': TextInput(attrs={'width':10, 'class':'hasDatePicker'}),
# }
can_delete=True
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(Farms_FormsetView, self).dispatch(*args, **kwargs)
def get_queryset(self):
user = self.request.user
queryset = super(Farms_FormsetView, self).get_queryset()
queryset = queryset.filter( Q(field_list__farm__farmer=user) |
Q(field_list__farm__users=user)
)
return queryset.distinct()
def fields_filter(self, user):
return Field.objects.filter( Q(farm__farmer=user) |
Q(farm__users=user) ).distinct()
def construct_formset(self, *args, **kwargs):
formset = super(Farms_FormsetView, self).construct_formset(*args, **kwargs)
for form in formset:
form.fields["field_list"].queryset = self.fields_filter(self.request.user)
print form.fields["comment"]
return formset
def get_factory_kwargs(self):
kwargs = super(Farms_FormsetView, self).get_factory_kwargs()
if hasattr(self, 'widgets'):
kwargs[ 'widgets' ] = self.widgets
return kwargs
class ProbeFormsetView(Farms_FormsetView):
model = Probe
template_name = 'farms/probe_list.html'
fields = [ 'name', 'description', 'field_list', 'farm_code', 'probe_code', 'comment']
widgets = {
'comment': Textarea(attrs={'rows':2, 'cols':20}),
'description': Textarea(attrs={'rows':2, 'cols':20}),
}
class WaterHistoryFormsetView(Farms_FormsetView):
model = WaterHistory
template_name = 'farms/water_history_list.html'
fields = [ 'date', 'field_list', 'rain', 'irrigation', 'comment' ]
widgets = {
'comment': Textarea(attrs={'rows':2, 'cols':20}),
'description': Textarea(attrs={'rows':2, 'cols':20}),
'date': TextInput(attrs={'width':10, 'class':'hasDatePicker'}),
}
|
Move Probes and Water_History formset views to using a common
"Farms_FormsetView" superclass.from extra_views import ModelFormSetView
from django.forms import Textarea, TextInput
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from farms.models import Field, Probe, ProbeReading, WaterHistory
from fields_filter import *
class Farms_FormsetView(ModelFormSetView):
# model = WaterHistory
# template_name = 'farms/water_history_list.html'
# fields = [ 'date', 'field_list', 'rain', 'irrigation', 'comment' ]
# widgets = {
# 'comment': Textarea(attrs={'rows':2, 'cols':20}),
# 'description': Textarea(attrs={'rows':2, 'cols':20}),
# 'date': TextInput(attrs={'width':10, 'class':'hasDatePicker'}),
# }
can_delete=True
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(Farms_FormsetView, self).dispatch(*args, **kwargs)
def get_queryset(self):
user = self.request.user
queryset = super(Farms_FormsetView, self).get_queryset()
queryset = queryset.filter( Q(field_list__farm__farmer=user) |
Q(field_list__farm__users=user)
)
return queryset.distinct()
def fields_filter(self, user):
return Field.objects.filter( Q(farm__farmer=user) |
Q(farm__users=user) ).distinct()
def construct_formset(self, *args, **kwargs):
formset = super(Farms_FormsetView, self).construct_formset(*args, **kwargs)
for form in formset:
form.fields["field_list"].queryset = self.fields_filter(self.request.user)
print form.fields["comment"]
return formset
def get_factory_kwargs(self):
kwargs = super(Farms_FormsetView, self).get_factory_kwargs()
if hasattr(self, 'widgets'):
kwargs[ 'widgets' ] = self.widgets
return kwargs
class ProbeFormsetView(Farms_FormsetView):
model = Probe
template_name = 'farms/probe_list.html'
fields = [ 'name', 'description', 'field_list', 'farm_code', 'probe_code', 'comment']
widgets = {
'comment': Textarea(attrs={'rows':2, 'cols':20}),
'description': Textarea(attrs={'rows':2, 'cols':20}),
}
class WaterHistoryFormsetView(Farms_FormsetView):
model = WaterHistory
template_name = 'farms/water_history_list.html'
fields = [ 'date', 'field_list', 'rain', 'irrigation', 'comment' ]
widgets = {
'comment': Textarea(attrs={'rows':2, 'cols':20}),
'description': Textarea(attrs={'rows':2, 'cols':20}),
'date': TextInput(attrs={'width':10, 'class':'hasDatePicker'}),
}
|
<commit_before><commit_msg>Move Probes and Water_History formset views to using a common
"Farms_FormsetView" superclass.<commit_after>from extra_views import ModelFormSetView
from django.forms import Textarea, TextInput
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from farms.models import Field, Probe, ProbeReading, WaterHistory
from fields_filter import *
class Farms_FormsetView(ModelFormSetView):
# model = WaterHistory
# template_name = 'farms/water_history_list.html'
# fields = [ 'date', 'field_list', 'rain', 'irrigation', 'comment' ]
# widgets = {
# 'comment': Textarea(attrs={'rows':2, 'cols':20}),
# 'description': Textarea(attrs={'rows':2, 'cols':20}),
# 'date': TextInput(attrs={'width':10, 'class':'hasDatePicker'}),
# }
can_delete=True
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(Farms_FormsetView, self).dispatch(*args, **kwargs)
def get_queryset(self):
user = self.request.user
queryset = super(Farms_FormsetView, self).get_queryset()
queryset = queryset.filter( Q(field_list__farm__farmer=user) |
Q(field_list__farm__users=user)
)
return queryset.distinct()
def fields_filter(self, user):
return Field.objects.filter( Q(farm__farmer=user) |
Q(farm__users=user) ).distinct()
def construct_formset(self, *args, **kwargs):
formset = super(Farms_FormsetView, self).construct_formset(*args, **kwargs)
for form in formset:
form.fields["field_list"].queryset = self.fields_filter(self.request.user)
print form.fields["comment"]
return formset
def get_factory_kwargs(self):
kwargs = super(Farms_FormsetView, self).get_factory_kwargs()
if hasattr(self, 'widgets'):
kwargs[ 'widgets' ] = self.widgets
return kwargs
class ProbeFormsetView(Farms_FormsetView):
model = Probe
template_name = 'farms/probe_list.html'
fields = [ 'name', 'description', 'field_list', 'farm_code', 'probe_code', 'comment']
widgets = {
'comment': Textarea(attrs={'rows':2, 'cols':20}),
'description': Textarea(attrs={'rows':2, 'cols':20}),
}
class WaterHistoryFormsetView(Farms_FormsetView):
model = WaterHistory
template_name = 'farms/water_history_list.html'
fields = [ 'date', 'field_list', 'rain', 'irrigation', 'comment' ]
widgets = {
'comment': Textarea(attrs={'rows':2, 'cols':20}),
'description': Textarea(attrs={'rows':2, 'cols':20}),
'date': TextInput(attrs={'width':10, 'class':'hasDatePicker'}),
}
|
|
1337bed0e13f8e0f3d1294464e817b782b765b47
|
scripts/read_in_lines.py
|
scripts/read_in_lines.py
|
from image_process.hough_transform import get_hough_lines
from lines.line import Point, LineSegment
import matplotlib.pyplot as plt
import os
image_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', 'hough_test', 'Test_Set_1', 'PNGs',
'C(C)C(CCCC)(C)C.png')
lines = get_hough_lines(image_path)
line_segment_list = []
for line_seg in lines:
point_1 = Point(*line_seg[0])
point_2 = Point(*line_seg[1])
new_line = LineSegment([point_1, point_2])
line_segment_list.append(new_line)
l1 = line_segment_list[0]
l2 = line_segment_list[10]
print l1.pts
print l2.pts
print l1.getDifference(l2)
print l1.m
print l1.b
plt.figure()
plt.plot([l1.pts[0].x, l1.pts[1].x], [l1.pts[0].y, l1.pts[1].y])
plt.plot([l2.pts[0].x, l2.pts[1].x], [l2.pts[0].y, l2.pts[1].y])
deltas = []
thetas = []
for line_segs in line_segment_list:
if line_segs == l1:
continue
diffs = l1.getDifference(line_segs)
deltas.append(diffs[0])
thetas.append(diffs[1])
plt.figure()
plt.plot(deltas, thetas, 'o')
plt.show()
|
Add Script for Testing Line Segment Features
|
Add Script for Testing Line Segment Features
|
Python
|
mit
|
Molecular-Image-Recognition/Molecular-Image-Recognition
|
Add Script for Testing Line Segment Features
|
from image_process.hough_transform import get_hough_lines
from lines.line import Point, LineSegment
import matplotlib.pyplot as plt
import os
image_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', 'hough_test', 'Test_Set_1', 'PNGs',
'C(C)C(CCCC)(C)C.png')
lines = get_hough_lines(image_path)
line_segment_list = []
for line_seg in lines:
point_1 = Point(*line_seg[0])
point_2 = Point(*line_seg[1])
new_line = LineSegment([point_1, point_2])
line_segment_list.append(new_line)
l1 = line_segment_list[0]
l2 = line_segment_list[10]
print l1.pts
print l2.pts
print l1.getDifference(l2)
print l1.m
print l1.b
plt.figure()
plt.plot([l1.pts[0].x, l1.pts[1].x], [l1.pts[0].y, l1.pts[1].y])
plt.plot([l2.pts[0].x, l2.pts[1].x], [l2.pts[0].y, l2.pts[1].y])
deltas = []
thetas = []
for line_segs in line_segment_list:
if line_segs == l1:
continue
diffs = l1.getDifference(line_segs)
deltas.append(diffs[0])
thetas.append(diffs[1])
plt.figure()
plt.plot(deltas, thetas, 'o')
plt.show()
|
<commit_before><commit_msg>Add Script for Testing Line Segment Features<commit_after>
|
from image_process.hough_transform import get_hough_lines
from lines.line import Point, LineSegment
import matplotlib.pyplot as plt
import os
image_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', 'hough_test', 'Test_Set_1', 'PNGs',
'C(C)C(CCCC)(C)C.png')
lines = get_hough_lines(image_path)
line_segment_list = []
for line_seg in lines:
point_1 = Point(*line_seg[0])
point_2 = Point(*line_seg[1])
new_line = LineSegment([point_1, point_2])
line_segment_list.append(new_line)
l1 = line_segment_list[0]
l2 = line_segment_list[10]
print l1.pts
print l2.pts
print l1.getDifference(l2)
print l1.m
print l1.b
plt.figure()
plt.plot([l1.pts[0].x, l1.pts[1].x], [l1.pts[0].y, l1.pts[1].y])
plt.plot([l2.pts[0].x, l2.pts[1].x], [l2.pts[0].y, l2.pts[1].y])
deltas = []
thetas = []
for line_segs in line_segment_list:
if line_segs == l1:
continue
diffs = l1.getDifference(line_segs)
deltas.append(diffs[0])
thetas.append(diffs[1])
plt.figure()
plt.plot(deltas, thetas, 'o')
plt.show()
|
Add Script for Testing Line Segment Featuresfrom image_process.hough_transform import get_hough_lines
from lines.line import Point, LineSegment
import matplotlib.pyplot as plt
import os
image_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', 'hough_test', 'Test_Set_1', 'PNGs',
'C(C)C(CCCC)(C)C.png')
lines = get_hough_lines(image_path)
line_segment_list = []
for line_seg in lines:
point_1 = Point(*line_seg[0])
point_2 = Point(*line_seg[1])
new_line = LineSegment([point_1, point_2])
line_segment_list.append(new_line)
l1 = line_segment_list[0]
l2 = line_segment_list[10]
print l1.pts
print l2.pts
print l1.getDifference(l2)
print l1.m
print l1.b
plt.figure()
plt.plot([l1.pts[0].x, l1.pts[1].x], [l1.pts[0].y, l1.pts[1].y])
plt.plot([l2.pts[0].x, l2.pts[1].x], [l2.pts[0].y, l2.pts[1].y])
deltas = []
thetas = []
for line_segs in line_segment_list:
if line_segs == l1:
continue
diffs = l1.getDifference(line_segs)
deltas.append(diffs[0])
thetas.append(diffs[1])
plt.figure()
plt.plot(deltas, thetas, 'o')
plt.show()
|
<commit_before><commit_msg>Add Script for Testing Line Segment Features<commit_after>from image_process.hough_transform import get_hough_lines
from lines.line import Point, LineSegment
import matplotlib.pyplot as plt
import os
image_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data', 'hough_test', 'Test_Set_1', 'PNGs',
'C(C)C(CCCC)(C)C.png')
lines = get_hough_lines(image_path)
line_segment_list = []
for line_seg in lines:
point_1 = Point(*line_seg[0])
point_2 = Point(*line_seg[1])
new_line = LineSegment([point_1, point_2])
line_segment_list.append(new_line)
l1 = line_segment_list[0]
l2 = line_segment_list[10]
print l1.pts
print l2.pts
print l1.getDifference(l2)
print l1.m
print l1.b
plt.figure()
plt.plot([l1.pts[0].x, l1.pts[1].x], [l1.pts[0].y, l1.pts[1].y])
plt.plot([l2.pts[0].x, l2.pts[1].x], [l2.pts[0].y, l2.pts[1].y])
deltas = []
thetas = []
for line_segs in line_segment_list:
if line_segs == l1:
continue
diffs = l1.getDifference(line_segs)
deltas.append(diffs[0])
thetas.append(diffs[1])
plt.figure()
plt.plot(deltas, thetas, 'o')
plt.show()
|
|
05c2b5edb0ad2dd96f4146893de1bcaa2b691c64
|
jenkinsapi/utils/krb_requester.py
|
jenkinsapi/utils/krb_requester.py
|
from jenkinsapi.utils.requester import Requester
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
class KrbRequester(Requester):
"""
A class which carries out HTTP requests with Kerberos/GSSAPI authentication.
"""
def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL):
"""
:param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified
:param baseurl: Jenkins' base URL
:param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED
from requests_kerberos package
"""
args = {}
if ssl_verify:
args["ssl_verify"] = ssl_verify
if baseurl:
args["baseurl"] = baseurl
super(KrbRequester, self).__init__(**args)
self.mutual_auth = mutual_auth
def get_request_dict(self, url, params, data, headers):
req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params,
data=data, headers=headers)
if self.mutual_auth:
auth = HTTPKerberosAuth(self.mutual_auth)
else:
auth = HTTPKerberosAuth()
req_dict['auth'] = auth
return req_dict
|
Add kerberos authentication requester using requests_kerberos
|
Add kerberos authentication requester using requests_kerberos
|
Python
|
mit
|
domenkozar/jenkinsapi,imsardine/jenkinsapi,zaro0508/jenkinsapi,jduan/jenkinsapi,JohnLZeller/jenkinsapi,JohnLZeller/jenkinsapi,mistermocha/jenkinsapi,imsardine/jenkinsapi,aerickson/jenkinsapi,zaro0508/jenkinsapi,mistermocha/jenkinsapi,jduan/jenkinsapi,domenkozar/jenkinsapi,salimfadhley/jenkinsapi,salimfadhley/jenkinsapi,mistermocha/jenkinsapi,JohnLZeller/jenkinsapi,aerickson/jenkinsapi,imsardine/jenkinsapi,zaro0508/jenkinsapi
|
Add kerberos authentication requester using requests_kerberos
|
from jenkinsapi.utils.requester import Requester
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
class KrbRequester(Requester):
"""
A class which carries out HTTP requests with Kerberos/GSSAPI authentication.
"""
def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL):
"""
:param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified
:param baseurl: Jenkins' base URL
:param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED
from requests_kerberos package
"""
args = {}
if ssl_verify:
args["ssl_verify"] = ssl_verify
if baseurl:
args["baseurl"] = baseurl
super(KrbRequester, self).__init__(**args)
self.mutual_auth = mutual_auth
def get_request_dict(self, url, params, data, headers):
req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params,
data=data, headers=headers)
if self.mutual_auth:
auth = HTTPKerberosAuth(self.mutual_auth)
else:
auth = HTTPKerberosAuth()
req_dict['auth'] = auth
return req_dict
|
<commit_before><commit_msg>Add kerberos authentication requester using requests_kerberos<commit_after>
|
from jenkinsapi.utils.requester import Requester
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
class KrbRequester(Requester):
"""
A class which carries out HTTP requests with Kerberos/GSSAPI authentication.
"""
def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL):
"""
:param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified
:param baseurl: Jenkins' base URL
:param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED
from requests_kerberos package
"""
args = {}
if ssl_verify:
args["ssl_verify"] = ssl_verify
if baseurl:
args["baseurl"] = baseurl
super(KrbRequester, self).__init__(**args)
self.mutual_auth = mutual_auth
def get_request_dict(self, url, params, data, headers):
req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params,
data=data, headers=headers)
if self.mutual_auth:
auth = HTTPKerberosAuth(self.mutual_auth)
else:
auth = HTTPKerberosAuth()
req_dict['auth'] = auth
return req_dict
|
Add kerberos authentication requester using requests_kerberosfrom jenkinsapi.utils.requester import Requester
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
class KrbRequester(Requester):
"""
A class which carries out HTTP requests with Kerberos/GSSAPI authentication.
"""
def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL):
"""
:param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified
:param baseurl: Jenkins' base URL
:param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED
from requests_kerberos package
"""
args = {}
if ssl_verify:
args["ssl_verify"] = ssl_verify
if baseurl:
args["baseurl"] = baseurl
super(KrbRequester, self).__init__(**args)
self.mutual_auth = mutual_auth
def get_request_dict(self, url, params, data, headers):
req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params,
data=data, headers=headers)
if self.mutual_auth:
auth = HTTPKerberosAuth(self.mutual_auth)
else:
auth = HTTPKerberosAuth()
req_dict['auth'] = auth
return req_dict
|
<commit_before><commit_msg>Add kerberos authentication requester using requests_kerberos<commit_after>from jenkinsapi.utils.requester import Requester
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
class KrbRequester(Requester):
"""
A class which carries out HTTP requests with Kerberos/GSSAPI authentication.
"""
def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL):
"""
:param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified
:param baseurl: Jenkins' base URL
:param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED
from requests_kerberos package
"""
args = {}
if ssl_verify:
args["ssl_verify"] = ssl_verify
if baseurl:
args["baseurl"] = baseurl
super(KrbRequester, self).__init__(**args)
self.mutual_auth = mutual_auth
def get_request_dict(self, url, params, data, headers):
req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params,
data=data, headers=headers)
if self.mutual_auth:
auth = HTTPKerberosAuth(self.mutual_auth)
else:
auth = HTTPKerberosAuth()
req_dict['auth'] = auth
return req_dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.