text stringlengths 4 1.02M | meta dict |
|---|---|
import unittest
from common import get_ldap_lookup, PETER, BILL
class MailerLdapTest(unittest.TestCase):
def setUp(self):
self.ldap_lookup = get_ldap_lookup(cache_engine='sqlite')
def test_sqlite_cached_get_mail(self):
michael_bolton = self.ldap_lookup.caching.get('michael_bolton')
self.assertEqual(michael_bolton.get('mail'), 'michael_bolton@initech.com')
def test_regex_requiring_underscore(self):
self.ldap_lookup.uid_regex = '_'
michael_bolton = self.ldap_lookup.get_metadata_from_uid('michael_bolton')
# since michael_bolton has an underscore, it should pass regex and return a result
self.assertEqual(michael_bolton.get('mail'), 'michael_bolton@initech.com')
milton = self.ldap_lookup.get_metadata_from_uid('123456')
# since '123456' doesn't have an underscore, it should return {}
self.assertEqual(milton, {})
def test_sqlite_cache_set_escaping(self):
irish_guy = {
'dn': 'uid=john_oconnor,cn=users,dc=initech,dc=com',
'mail': 'john_oconnor@initech.com',
'manager': 'uid=bill_lumbergh,cn=users,dc=initech,dc=com',
'displayName': "John O'Connor",
'uid': 'john_oconnor'
}
set_result = self.ldap_lookup.caching.set(irish_guy['uid'], irish_guy)
self.assertEqual(set_result, None)
get_result = self.ldap_lookup.caching.get(irish_guy['uid'])
self.assertEqual(get_result, irish_guy)
def test_regex_requiring_6chars_and_only_digits(self):
# now we'll do some tests requiring the uid to be 6 characters only and digits
self.ldap_lookup.uid_regex = '^[0-9]{6}$'
milton = self.ldap_lookup.get_metadata_from_uid('123456')
milton_email = milton.get('mail')
self.assertEqual(milton_email, 'milton@initech.com')
def test_sqlite_cached_get_email_to_addr_without_manager(self):
to_addr = self.ldap_lookup.get_email_to_addrs_from_uid('michael_bolton')
self.assertEqual(to_addr, ['michael_bolton@initech.com'])
def test_sqlite_cached_get_email_to_addrs_with_manager(self):
to_addr = self.ldap_lookup.get_email_to_addrs_from_uid('michael_bolton', manager=True)
self.assertEqual(to_addr, ['michael_bolton@initech.com', 'milton@initech.com'])
def test_uid_ldap_lookup(self):
ldap_result = self.ldap_lookup.get_metadata_from_uid('peter')
self.assertEqual(ldap_result['mail'], PETER[1]['mail'][0])
self.assertEqual(ldap_result['uid'], PETER[1]['uid'][0])
# make sure it set a value in the cache as well.
cached_result = self.ldap_lookup.caching.get('peter')
self.assertEqual(cached_result['mail'], PETER[1]['mail'][0])
self.assertEqual(cached_result['uid'], PETER[1]['uid'][0])
def test_dn_ldap_lookup(self):
bill_metadata = self.ldap_lookup.get_metadata_from_dn(BILL[0])
self.assertEqual(bill_metadata['mail'], BILL[1]['mail'][0])
def test_to_addr_with_ldap_query(self):
to_addr = self.ldap_lookup.get_email_to_addrs_from_uid('peter', manager=True)
self.assertEqual(to_addr, ['peter@initech.com', 'bill_lumberg@initech.com'])
def test_that_dn_and_uid_write_to_cache_on_manager_lookup(self):
bill_metadata = self.ldap_lookup.get_metadata_from_dn(BILL[0])
bill_metadata_dn_lookup_cache = self.ldap_lookup.caching.get(BILL[0])
self.assertEqual(bill_metadata, bill_metadata_dn_lookup_cache)
bill_metadata_uid_lookup_cache = self.ldap_lookup.caching.get(BILL[1]['uid'][0])
self.assertEqual(bill_metadata, bill_metadata_uid_lookup_cache)
def test_that_dn_and_uid_write_to_cache_on_employee_lookup(self):
peter_uid = PETER[1]['uid'][0]
peter_metadata = self.ldap_lookup.get_metadata_from_uid(peter_uid)
peter_metadata_dn_lookup_cache = self.ldap_lookup.caching.get(PETER[0])
peter_metadata_uid_lookup_cache = self.ldap_lookup.caching.get(peter_uid)
self.assertEqual(peter_metadata, peter_metadata_uid_lookup_cache)
self.assertEqual(peter_metadata, peter_metadata_dn_lookup_cache)
def test_random_string_dont_hit_ldap_twice_uid_lookup(self):
# if we query ldap and get no result, we should never query ldap again
# for that result, we should query the cache and just return {}
to_addr = self.ldap_lookup.get_email_to_addrs_from_uid('doesnotexist', manager=True)
self.assertEqual(to_addr, [])
self.ldap_lookup.connection = None
to_addr = self.ldap_lookup.get_email_to_addrs_from_uid('doesnotexist', manager=True)
self.assertEqual(to_addr, [])
| {
"content_hash": "c0f94973d8891721b798660b559683b8",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 94,
"avg_line_length": 51.417582417582416,
"alnum_prop": 0.6646719384483865,
"repo_name": "jimmyraywv/cloud-custodian",
"id": "c979d2da3df7cd4964cd798af169bbb7bbe5efbe",
"size": "5265",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/c7n_mailer/tests/test_ldap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1364"
},
{
"name": "Python",
"bytes": "1760566"
}
],
"symlink_target": ""
} |
"""
Simple script to rename .env.keep to .env after generating the project.
"""
import os
os.rename('.env.keep', '.env')
print('Django project was generated in {{ cookiecutter.repo_name }}.')
| {
"content_hash": "3924830612f468f80497dab26a0d9f64",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 71,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.6923076923076923,
"repo_name": "ameistad/django-template",
"id": "0cc439149e79c1ea716a6ae0f5f017922d850e7a",
"size": "241",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "hooks/post_gen_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "166"
},
{
"name": "Python",
"bytes": "11223"
},
{
"name": "Shell",
"bytes": "3329"
}
],
"symlink_target": ""
} |
"""
author : wanghe
company: LogInsight
email_ : wangh@loginsight.cn
file: __init__.py.py
time : 16/4/15 下午9:20
"""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wangblog.conf.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "a1a92edd3c7fad5062bdd7d0db533ca2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 77,
"avg_line_length": 21.1875,
"alnum_prop": 0.7109144542772862,
"repo_name": "wanghe4096/WangBlog",
"id": "ada89c20b79c585761623f7d8d0565bbe1b2f944",
"size": "367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/wangblog/runner/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "100891"
},
{
"name": "HTML",
"bytes": "138081"
},
{
"name": "JavaScript",
"bytes": "227431"
},
{
"name": "Python",
"bytes": "97173"
}
],
"symlink_target": ""
} |
import io
import re
import errno
import pytest
from unittest import mock
import gpiozero.pins.data
import gpiozero.pins.local
from gpiozero.pins.local import LocalPiFactory
from gpiozero.pins.data import Style, HeaderInfo, PinInfo
from gpiozero import *
def test_pi_revision():
with mock.patch('gpiozero.devices.Device.pin_factory', LocalPiFactory()):
# Can't use MockPin for this as we want something that'll actually try
# and read /proc/device-tree/system/linux,revision and /proc/cpuinfo
# (MockPin simply parrots the 3B's data); LocalPiFactory is used as we
# can definitely instantiate it (strictly speaking it's abstract but
# we're only interested in the pi_info stuff)
with mock.patch('io.open') as m:
m.return_value.__enter__.side_effect = [
# Pretend /proc/device-tree/system/linux,revision doesn't
# exist, and that /proc/cpuinfo contains the Revision: 0002
# after some filler
IOError(errno.ENOENT, 'File not found'),
['lots of irrelevant', 'lines', 'followed by', 'Revision: 0002', 'Serial: xxxxxxxxxxx']
]
assert pi_info().revision == '0002'
# LocalPiFactory caches the revision (because realistically it
# isn't going to change at runtime); we need to wipe it here though
Device.pin_factory._info = None
m.return_value.__enter__.side_effect = [
IOError(errno.ENOENT, 'File not found'),
['Revision: a21042']
]
assert pi_info().revision == 'a21042'
# Check over-volting result (some argument over whether this is 7
# or 8 character result; make sure both work)
Device.pin_factory._info = None
m.return_value.__enter__.side_effect = [
IOError(errno.ENOENT, 'File not found'),
['Revision: 1000003']
]
assert pi_info().revision == '0003'
Device.pin_factory._info = None
m.return_value.__enter__.side_effect = [
IOError(errno.ENOENT, 'File not found'),
['Revision: 100003']
]
assert pi_info().revision == '0003'
# Check we complain loudly if we can't access linux,revision
Device.pin_factory._info = None
m.return_value.__enter__.side_effect = [
# Pretend /proc/device-tree/system/linux,revision doesn't
# exist, and that /proc/cpuinfo contains the Revision: 0002
# after some filler
IOError(errno.EACCES, 'Permission denied'),
['Revision: 100003']
]
with pytest.raises(IOError):
pi_info()
# Check that parsing /proc/device-tree/system/linux,revision also
# works properly
Device.pin_factory._info = None
m.return_value.__enter__.side_effect = None
m.return_value.__enter__.return_value = io.BytesIO(b'\x00\xa2\x20\xd3')
assert pi_info().revision == 'a220d3'
# Check that if everything's a bust we raise PinUnknownPi
with pytest.raises(PinUnknownPi):
Device.pin_factory._info = None
m.return_value.__enter__.return_value = None
m.return_value.__enter__.side_effect = [
IOError(errno.ENOENT, 'File not found'),
['nothing', 'relevant']
]
pi_info()
with pytest.raises(PinUnknownPi):
pi_info('0fff')
def test_pi_info():
r = pi_info('900011')
assert r.model == 'B'
assert r.pcb_revision == '1.0'
assert r.memory == 512
assert r.manufacturer == 'Sony'
assert r.storage == 'SD'
assert r.usb == 2
assert r.ethernet == 1
assert not r.wifi
assert not r.bluetooth
assert r.csi == 1
assert r.dsi == 1
r = pi_info('9000f1')
assert r.model == '???'
assert r.pcb_revision == '1.1'
assert r.memory == 512
assert r.manufacturer == 'Sony'
assert r.storage == 'MicroSD'
assert r.usb == 4
assert r.ethernet == 1
assert not r.wifi
assert not r.bluetooth
assert r.csi == 1
assert r.dsi == 1
assert repr(r).startswith('PiBoardInfo(revision=')
assert 'headers=...' in repr(r)
def test_pi_info_not_a_pi():
class NotAPiFactory(LocalPiFactory):
def _get_pi_info(self):
return None
with mock.patch('gpiozero.devices.Device.pin_factory', NotAPiFactory()):
with pytest.raises(PinUnknownPi):
pi_info()
def test_pi_info_other_types():
assert pi_info(b'9000f1') == pi_info(0x9000f1)
def test_physical_pins():
# Assert physical pins for some well-known Pi's; a21041 is a Pi2B
assert pi_info('a21041').physical_pins('3V3') == {('J8', 1), ('J8', 17)}
assert pi_info('a21041').physical_pins('GPIO2') == {('J8', 3)}
assert pi_info('a21041').physical_pins('GPIO47') == set()
def test_physical_pin():
with pytest.raises(PinMultiplePins):
assert pi_info('a21041').physical_pin('GND')
assert pi_info('a21041').physical_pin('GPIO3') == ('J8', 5)
with pytest.raises(PinNoPins):
assert pi_info('a21041').physical_pin('GPIO47')
def test_pulled_up():
assert pi_info('a21041').pulled_up('GPIO2')
assert not pi_info('a21041').pulled_up('GPIO4')
assert not pi_info('a21041').pulled_up('GPIO47')
def test_pprint_content():
with mock.patch('sys.stdout') as stdout:
stdout.output = []
stdout.write = lambda buf: stdout.output.append(buf)
pi_info('900092').pprint(color=False)
s = ''.join(stdout.output)
assert ('o' * 20 + ' ') in s # first header row
assert ('1' + 'o' * 19 + ' ') in s # second header row
assert 'PiZero' in s
assert 'V1.2' in s # PCB revision
assert '900092' in s # Pi revision
assert 'BCM2835' in s # SOC name
stdout.output = []
pi_info('0002').pprint(color=False)
s = ''.join(stdout.output)
assert ('o' * 13 + ' ') in s # first header row
assert ('1' + 'o' * 12 + ' ') in s # second header row
assert 'Pi Model' in s
assert 'B V1.0' in s # PCB revision
assert '0002' in s # Pi revision
assert 'BCM2835' in s # SOC name
stdout.output = []
pi_info('0014').headers['SODIMM'].pprint(color=False)
assert len(''.join(stdout.output).splitlines()) == 100
def test_format_content():
with mock.patch('sys.stdout') as stdout:
stdout.output = []
stdout.write = lambda buf: stdout.output.append(buf)
pi_info('900092').pprint(color=False)
s = ''.join(stdout.output)
assert '{0:mono}\n'.format(pi_info('900092')) == s
stdout.output = []
pi_info('900092').pprint(color=True)
s = ''.join(stdout.output)
assert '{0:color full}\n'.format(pi_info('900092')) == s
with pytest.raises(ValueError):
'{0:color foo}'.format(pi_info('900092'))
def test_pprint_headers():
assert len(pi_info('0002').headers) == 1
assert len(pi_info('000e').headers) == 2
assert len(pi_info('900092').headers) == 1
with mock.patch('sys.stdout') as stdout:
stdout.output = []
stdout.write = lambda buf: stdout.output.append(buf)
pi_info('0002').pprint()
s = ''.join(stdout.output)
assert 'P1:\n' in s
assert 'P5:\n' not in s
stdout.output = []
pi_info('000e').pprint()
s = ''.join(stdout.output)
assert 'P1:\n' in s
assert 'P5:\n' in s
stdout.output = []
pi_info('900092').pprint()
s = ''.join(stdout.output)
assert 'J8:\n' in s
assert 'P1:\n' not in s
assert 'P5:\n' not in s
def test_format_headers():
with mock.patch('sys.stdout') as stdout:
stdout.output = []
stdout.write = lambda buf: stdout.output.append(buf)
info = pi_info('c03131')
info.headers['J8'].pprint(color=False)
s = ''.join(stdout.output)
assert '{0.headers[J8]:mono}\n'.format(info) == s
stdout.output = []
info.headers['J8'].pprint(color=True)
s = ''.join(stdout.output)
assert '{0.headers[J8]:color}\n'.format(info) == s
with pytest.raises(ValueError):
'{0.headers[J8]:mono foo}'.format(info)
def test_pprint_color():
with mock.patch('sys.stdout') as stdout:
stdout.output = []
stdout.write = lambda buf: stdout.output.append(buf)
pi_info('900092').pprint(color=False)
s = ''.join(stdout.output)
assert '\x1b[0m' not in s # make sure ANSI reset code isn't in there
stdout.output = []
pi_info('900092').pprint(color=True)
s = ''.join(stdout.output)
assert '\x1b[0m' in s # check the ANSI reset code *is* in there (can't guarantee much else!)
stdout.output = []
stdout.fileno.side_effect = IOError('not a real file')
pi_info('900092').pprint()
s = ''.join(stdout.output)
assert '\x1b[0m' not in s # default should output mono
with mock.patch('os.isatty') as isatty:
isatty.return_value = True
stdout.fileno.side_effect = None
stdout.output = []
pi_info('900092').pprint()
s = ''.join(stdout.output)
assert '\x1b[0m' in s # default should now output color
def test_pprint_styles():
with pytest.raises(ValueError):
Style.from_style_content('mono color full')
with pytest.raises(ValueError):
Style.from_style_content('full specs')
with mock.patch('sys.stdout') as stdout:
s = '{0:full}'.format(pi_info('900092'))
assert '\x1b[0m' not in s # ensure default is mono when stdout is not a tty
with pytest.raises(ValueError):
'{0:foo on bar}'.format(Style())
def test_pprint_missing_pin():
header = HeaderInfo('FOO', 4, 2, {
1: PinInfo(1, '5V', False, 1, 1),
2: PinInfo(2, 'GND', False, 1, 2),
# Pin 3 is deliberately missing
4: PinInfo(4, 'GPIO1', False, 2, 2),
5: PinInfo(5, 'GPIO2', False, 3, 1),
6: PinInfo(6, 'GPIO3', False, 3, 2),
7: PinInfo(7, '3V3', False, 4, 1),
8: PinInfo(8, 'GND', False, 4, 2),
})
with mock.patch('sys.stdout') as stdout:
stdout.output = []
stdout.write = lambda buf: stdout.output.append(buf)
s = ''.join(stdout.output)
header.pprint()
for i in range(1, 9):
if i == 3:
assert '(3)' not in s
else:
assert '({i:d})'.format(i=i)
def test_pprint_rows_cols():
assert '{0:row1}'.format(pi_info('900092').headers['J8']) == '1o'
assert '{0:row2}'.format(pi_info('900092').headers['J8']) == 'oo'
assert '{0:col1}'.format(pi_info('0002').headers['P1']) == '1oooooooooooo'
assert '{0:col2}'.format(pi_info('0002').headers['P1']) == 'ooooooooooooo'
with pytest.raises(ValueError):
'{0:row16}'.format(pi_info('0002').headers['P1'])
with pytest.raises(ValueError):
'{0:col3}'.format(pi_info('0002').headers['P1'])
| {
"content_hash": "e8372896759bb194ac5eea65cd3da6aa",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 104,
"avg_line_length": 40.42142857142857,
"alnum_prop": 0.5699770277434175,
"repo_name": "waveform80/gpio-zero",
"id": "e0256d8b4b33c5f0df24afdfd96b888b43390927",
"size": "11627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pins_data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Python",
"bytes": "553301"
}
],
"symlink_target": ""
} |
import argparse
import random
from collections import namedtuple
from pyborgeous import codestrings
from pyborgeous import docstrings
__VERSION__ = '0.5.1'
class Page:
"""
The page in a book in a shelf in a bookcase in a room in a library object
"""
# We iterate trough library_configuration, that's why it is separated from page_configuration
Room = namedtuple('Room', 'PAGES_PER_BOOK BOOKS_PER_SHELF SHELVES_PER_BOOKCASE BOOKCASES_PER_ROOM')
library_configuration = Room(410, 32, 5, 4)
PageContainer = namedtuple('PageContainer', 'CHARACTERS_PER_PAGE CHARACTERS_PER_PAGE_TITLE')
page_configuration = PageContainer(3200, 25)
def __init__(self, address_encode_string, page_encode_string, page_text, page_address):
"""
Instantiates the object with encode string,
and address if there is no page text provided
or page text if there is no address provided
"""
self.ADDRESS_ENCODE_STRING = address_encode_string
self.PAGE_ENCODE_STRING = page_encode_string
self.page_address = page_address
self.page_text = page_text
def get_page_text_by_address(self):
"""
Transforms coordinates back to page text
[Base number, int, int, int, int] ~> int ~> string
"""
# Check if -cm unicode_short is in effect
if self.ADDRESS_ENCODE_STRING:
address = self.page_address.split('\t')
magic_number = base_to_integer(address[0], self.ADDRESS_ENCODE_STRING)
else:
address = self.page_address.split('\t')
magic_number = base_to_integer(address[0], self.PAGE_ENCODE_STRING)
for config, address_item in zip(reversed(self.library_configuration), address[1:]):
magic_number = magic_number * config + int(address_item)
self.page_text = integer_to_text(magic_number)
return self.page_text
def get_address_by_page_text(self, mode):
"""
Fills the page with spaces or with random characters (depends on the mode),
then transforms it to magic_number,
then to page coordinates using ceil division
string ~> int ~> [Base number, int, int, int, int]
Format of the resulting address is: encoded room\tbookcase\tshelf\tbook\tpage
"""
address = []
space = ' '
page_text_length = len(self.page_text)
# If text is shorter than 3200 characters, fill the rest with spaces
if page_text_length < self.page_configuration.CHARACTERS_PER_PAGE and mode == 'spaces':
self.page_text += space * (self.page_configuration.CHARACTERS_PER_PAGE - page_text_length)
# If text is shorter than 3200 characters, fill the rest with random characters on both sides
elif page_text_length < self.page_configuration.CHARACTERS_PER_PAGE and mode == 'random':
postfix_range = random.randrange(self.page_configuration.CHARACTERS_PER_PAGE - page_text_length - 1)
prefix_range = self.page_configuration.CHARACTERS_PER_PAGE - page_text_length - postfix_range
prefix = ''.join(random.choice(self.PAGE_ENCODE_STRING) for _ in range(prefix_range))
postfix = ''.join(random.choice(self.PAGE_ENCODE_STRING) for _ in range(postfix_range))
self.page_text = prefix + self.page_text + postfix
# If text is longer than 3200 characters, truncate it
elif page_text_length > self.page_configuration.CHARACTERS_PER_PAGE:
self.page_text = self.page_text[:self.page_configuration.CHARACTERS_PER_PAGE + 1]
# If nothing happened and text is exact 3200 characters long, then the mode specified was wrong (which is weird)
elif page_text_length != self.page_configuration.CHARACTERS_PER_PAGE:
raise NotImplementedError(docstrings.ERROR_PAGE_TO_ADDRESS_UNKNOWN_MODE)
magic_number = text_to_integer(self.page_text)
for value in self.library_configuration:
result_value = magic_number % value
address.append(str(result_value))
magic_number = - ((magic_number - result_value) // - value) # Faster than 'from math import ceil'
# Check if -cm unicode_short is in effect
if self.ADDRESS_ENCODE_STRING:
address.append(integer_to_base(magic_number, self.ADDRESS_ENCODE_STRING))
else:
address.append(integer_to_base(magic_number, self.PAGE_ENCODE_STRING))
self.page_address = '\t'.join(reversed(address))
return self.page_address
class DataFile:
"""
Input/Output file handler object
"""
def __init__(self, file_name=None, data=None):
self.file_name = file_name
self.data = data
def load(self):
with open(self.file_name, 'r', encoding='utf-8') as input_file:
self.data = input_file.read()
return self.data
def save(self):
with open(self.file_name, 'w', encoding='utf-8') as output_file:
output_file.writelines(self.data)
print("Output saved in", self.file_name)
def main():
"""
Main logic here
Parses command line arguments then executes Page methods, prints the data and writes it to a file if -o specified
"""
class CapitalisedHelpFormatter(argparse.RawTextHelpFormatter):
"""
Cosmetics. Used to override 'usage: ' string to 'Usage: '
"""
def add_usage(self, usage, actions, groups, prefix=None):
if prefix is None:
prefix = 'Usage: '
return super(CapitalisedHelpFormatter, self).add_usage(usage, actions, groups, prefix)
arg_parser = argparse.ArgumentParser(description=docstrings.PROGRAM_DESCRIPTION,
formatter_class=CapitalisedHelpFormatter,
add_help=False,
prog="pyborgeous",
epilog=docstrings.PROGRAM_EPILOG)
# Cosmetic capitalization of protected stuff
# I know this is bad, tell me if you know a better way please
arg_parser._positionals.title = 'Positional arguments'
arg_parser._optionals.title = 'Optional arguments'
# Optional arguments
arg_parser.add_argument("-h", "--help", action="help",
default=argparse.SUPPRESS, help=docstrings.HELP_HELP)
arg_parser.add_argument("-v", "--version", action="version",
version="%(prog)s {0}".format(__VERSION__), help=docstrings.HELP_VERSION)
arg_parser.add_argument("-o", "--output",
dest="output_file", help=docstrings.HELP_OUTPUT)
# Mutually exclusive arguments group for charset
arg_charset = arg_parser.add_mutually_exclusive_group(required=True)
arg_charset.add_argument("-c", "--charset",
dest="charset", help=docstrings.HELP_CHARSET)
arg_charset.add_argument("-cm", "--charset-mode",
dest="charset_mode", help=docstrings.HELP_CHARSET_MODE)
arg_charset.add_argument("-cf", "--charset-file",
dest="charset_file", help=docstrings.HELP_CHARSET_FILE)
# Mutually exclusive arguments group for input
arg_input = arg_parser.add_mutually_exclusive_group(required=True)
arg_input.add_argument("-pa", "--page-address",
dest="page_address", help=docstrings.HELP_PAGE_ADDRESS)
arg_input.add_argument("-af", "--address-file",
dest="address_file", help=docstrings.HELP_ADDRESS_FILE)
arg_input.add_argument("-t", "--text",
dest="text_exact", help=docstrings.HELP_TEXT_EXACT)
arg_input.add_argument("-tr", "--text-random",
dest="text_random", help=docstrings.HELP_TEXT_RANDOM)
arg_input.add_argument("-tf", "--text-file",
dest="text_file", help=docstrings.HELP_TEXT_FILE)
# Now, parse!
command_line = arg_parser.parse_args()
charset_modes = {'binary': codestrings.BINARY,
'morse': codestrings.MORSE,
'borges': codestrings.BORGES,
'classic': codestrings.CLASSIC,
'full': codestrings.FULL,
'unicode': codestrings.UNICODE_REGULAR,
'unicode_short': codestrings.UNICODE_REGULAR}
# Figure out what characters to use according to arg_charset group
address_characters = None
if command_line.charset_mode and command_line.charset_mode in charset_modes: # -cm
if command_line.charset_mode == 'unicode_short':
page_characters = charset_modes[command_line.charset_mode]
address_characters = codestrings.UNICODE_ADDRESS # Different charset here
else:
page_characters = charset_modes[command_line.charset_mode]
elif command_line.charset: # -c
page_characters = command_line.charset
elif command_line.charset_file: # -cf
page_characters = DataFile(command_line.charset_file).load()
else:
raise NotImplementedError(docstrings.ERROR_CHARSET_MODE_NOT_IMPLEMENTED)
# Figure out what text to use if any specified according to -t, -tr, -tf arguments
if command_line.text_exact: # -t
page_text = command_line.text_exact
elif command_line.text_random: # -tr
page_text = command_line.text_random
elif command_line.text_file: # -tf
text_file_object = DataFile(command_line.text_file)
page_text = text_file_object.load()
else:
page_text = None
# Figure out what address to use if any specified according to -a, -af arguments
if command_line.page_address: # -pa
page_address = command_line.page_address
elif command_line.address_file: # -af
address_file_object = DataFile(command_line.address_file)
page_address = address_file_object.load()
else:
page_address = None
# Validate if page_text exists and consists of characters
if page_text and is_invalid_input(page_text, page_characters):
raise NotImplementedError(docstrings.ERROR_TEXT_NOT_IN_CHARSET)
# Validate if page_address exists and consists of characters
if page_address and is_invalid_input(page_address, page_characters):
raise NotImplementedError(docstrings.ERROR_ADDRESS_NOT_IN_CHARSET)
# Instantiate a page, either page_text or page_address should be None
current_page = Page(address_characters, page_characters, page_text, page_address)
# Getting data out
data_to_write = 'SOMETHING WENT WRONG' # Just in case
if page_address:
current_page.get_page_text_by_address()
data_to_write = current_page.page_text
elif page_text and (command_line.text_exact or command_line.text_file):
current_page.get_address_by_page_text('spaces')
data_to_write = current_page.page_address
elif page_text and command_line.text_random:
current_page.get_address_by_page_text('random')
data_to_write = current_page.page_address
if command_line.output_file:
storage = DataFile(command_line.output_file, data_to_write)
storage.save()
print(data_to_write)
def text_to_integer(text):
"""
Converts any text string to an integer,
for example: 'Hello, world!' to 2645608968347327576478451524936
"""
# byteorder should be the same as in integer_to_text(), if you want to change it to 'big', change it there too
return int.from_bytes(bytes(text, 'utf-8'), byteorder='little')
def integer_to_text(number):
"""
Converts an integer to a text string,
for example: 2645608968347327576478451524936 to 'Hello, world!'
Won't convert negative integers
"""
# byteorder should be the same as in text_to_integer(), if you want to change it to 'big', change it there too
text = number.to_bytes((number.bit_length() + 7) // 8, byteorder='little')
return text.decode("utf-8")
def integer_to_base(number, base_string):
"""
Converts base10 integer to baseX integer (where X is the length of base_string, say X for '0123' is 4),
for example: 2645608968347327576478451524936 (Which is 'Hello, world!') to 21646C726F77202C6F6C6C6548 (base16),
does not account for negative numbers
"""
digits = []
base_length = len(base_string)
while number:
digits.append(base_string[number % base_length])
number //= base_length
return ''.join(list(reversed(digits)))
def base_to_integer(base_number, base_string):
"""
Converts baseX integer to base10 integer (where X is the length of base_string, say X for '0123' is 4),
for example: 21646C726F77202C6F6C6C6548 (base16) to 2645608968347327576478451524936 (Which is 'Hello, world!'),
does not account for negative numbers
"""
number = 0
for digit in str(base_number):
number = number * len(base_string) + base_string.index(digit)
return number
def is_invalid_input(string_one, string_two):
"""
Checks if the first string does not consist only of characters from the second string
"""
for character in string_one:
if character not in string_two and character != '\t': # Dealing with address formatting
return True
return False
| {
"content_hash": "26babb4e11e57221699bfb9cda09bb01",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 120,
"avg_line_length": 40.183673469387756,
"alnum_prop": 0.6242472611187695,
"repo_name": "Spacehug/pyborgeous",
"id": "b8df360f7f462caa7d7a91a94abfcc246ea5c143",
"size": "13783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyborgeous/pyborgeous.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "895029"
}
],
"symlink_target": ""
} |
from parlai.utils.testing import AutoTeacherTest # noqa: F401
class TestDefaultTeacher(AutoTeacherTest):
task = 'xpersona'
class TestEnTeacher(AutoTeacherTest):
task = 'xpersona:En'
class TestZhTeacher(AutoTeacherTest):
task = 'xpersona:Zh'
class TestFrTeacher(AutoTeacherTest):
task = 'xpersona:Fr'
class TestIdTeacher(AutoTeacherTest):
task = 'xpersona:Id'
class TestItTeacher(AutoTeacherTest):
task = 'xpersona:It'
class TestKoTeacher(AutoTeacherTest):
task = 'xpersona:Ko'
class TestJpTeacher(AutoTeacherTest):
task = 'xpersona:Jp'
| {
"content_hash": "2ae77f62b16e472e97fa7946e8553ba6",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 62,
"avg_line_length": 17.727272727272727,
"alnum_prop": 0.7350427350427351,
"repo_name": "facebookresearch/ParlAI",
"id": "52048a82bcfe07645f77967bc3daa59af38e7085",
"size": "785",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parlai/tasks/xpersona/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
from django.core.cache import cache
from django.test.utils import override_settings
from django.utils.encoding import smart_text
from django.utils.translation import trim_whitespace
import mock
from pyquery import PyQuery as pq
from olympia import amo
from olympia.addons.models import (
Addon, AddonDependency, CompatOverride, CompatOverrideRange, Preview)
from olympia.amo.templatetags.jinja_helpers import format_date
from olympia.amo.tests import TestCase, addon_factory, collection_factory
from olympia.amo.urlresolvers import reverse
from olympia.bandwagon.models import MonthlyPick
from olympia.legacy_discovery import views
from olympia.legacy_discovery.forms import DiscoveryModuleForm
from olympia.legacy_discovery.models import DiscoveryModule
from olympia.legacy_discovery.modules import registry
from olympia.users.models import UserProfile
class TestModuleAdmin(TestCase):
def test_sync_db_and_registry(self):
def check():
views._sync_db_and_registry(qs, 1)
assert qs.count() == len(registry)
modules = qs.values_list('module', flat=True)
assert set(modules) == set(registry.keys())
qs = DiscoveryModule.objects.filter(app=1)
assert qs.count() == 0
# All our modules get added.
check()
# The deleted module is removed.
with mock.patch.dict(registry):
registry.popitem()
check()
def test_discovery_module_form_bad_locale(self):
d = dict(app=1, module='xx', locales='fake')
form = DiscoveryModuleForm(d)
assert form.errors['locales']
def test_discovery_module_form_dedupe(self):
d = dict(app=amo.FIREFOX.id, module='xx', locales='en-US he he fa fa')
form = DiscoveryModuleForm(d)
assert form.is_valid()
cleaned_locales = form.cleaned_data['locales'].split()
assert sorted(cleaned_locales) == ['en-US', 'fa', 'he']
class TestUrls(TestCase):
fixtures = ['base/users', 'base/featured', 'addons/featured',
'base/addon_3615']
def test_reverse(self):
assert '/en-US/firefox/discovery/pane/10.0/WINNT' == (
reverse('discovery.pane', kwargs=dict(version='10.0',
platform='WINNT')))
assert '/en-US/firefox/discovery/pane/10.0/WINNT/strict' == (
reverse('discovery.pane', args=('10.0', 'WINNT', 'strict')))
def test_resolve_addon_view(self):
r = self.client.get('/en-US/firefox/discovery/addon/3615', follow=True)
url = reverse('discovery.addons.detail', args=['a3615'])
self.assert3xx(r, url, 301)
def test_resolve_disco_pane(self):
# Redirect to default 'strict' if version < 10.
r = self.client.get('/en-US/firefox/discovery/4.0/Darwin', follow=True)
url = reverse('discovery.pane', args=['4.0', 'Darwin', 'strict'])
self.assert3xx(r, url, 302)
# Redirect to default 'ignore' if version >= 10.
r = self.client.get('/en-US/firefox/discovery/10.0/Darwin',
follow=True)
url = reverse('discovery.pane', args=['10.0', 'Darwin', 'ignore'])
self.assert3xx(r, url, 302)
def test_no_compat_mode(self):
r = self.client.head('/en-US/firefox/discovery/pane/10.0/WINNT')
assert r.status_code == 200
def test_with_compat_mode(self):
r = self.client.head('/en-US/firefox/discovery/pane/10.0/WINNT/strict')
assert r.status_code == 200
r = self.client.head('/en-US/firefox/discovery/pane/10.0/WINNT/normal')
assert r.status_code == 200
r = self.client.head('/en-US/firefox/discovery/pane/10.0/WINNT/ignore')
assert r.status_code == 200
r = self.client.head('/en-US/firefox/discovery/pane/10.0/WINNT/blargh')
assert r.status_code == 404
class TestPromos(TestCase):
def setUp(self):
super(TestPromos, self).setUp()
# Create a few add-ons...
self.addon1 = addon_factory()
self.addon2 = addon_factory()
self.addon3 = addon_factory(name='That & This', summary='This & That')
# Create a user for the collection.
user = UserProfile.objects.create(username='mozilla')
games_collection = collection_factory(author=user, slug='games')
games_collection.set_addons(
[self.addon1.pk, self.addon2.pk, self.addon3.pk])
DiscoveryModule.objects.create(
app=amo.FIREFOX.id, ordering=1, module='Games!')
musthave_collection = collection_factory(
author=user, slug='must-have-media')
musthave_collection.set_addons(
[self.addon1.pk, self.addon2.pk, self.addon3.pk])
DiscoveryModule.objects.create(
app=amo.FIREFOX.id, ordering=2, module='Must-Have Media')
def get_disco_url(self, platform, version):
return reverse('discovery.pane.promos', args=[platform, version])
def get_home_url(self):
return reverse('addons.homepage_promos')
def _test_response_contains_addons(self, response):
assert response.status_code == 200
assert response.content
content = smart_text(response.content)
assert unicode(self.addon1.name) in content
assert unicode(self.addon2.name) in content
assert 'This & That' in content
def test_no_params(self):
response = self.client.get(self.get_home_url())
assert response.status_code == 404
def test_home_ignores_platforms(self):
"""Ensure that we get the same thing for the homepage promos regardless
# of the platform."""
file_ = self.addon1.current_version.all_files[0]
file_.update(platform=amo.PLATFORM_LINUX.id)
assert self.addon1.current_version.supported_platforms == [
amo.PLATFORM_LINUX]
response_mac = self.client.get(
self.get_home_url(), {'version': '10.0', 'platform': 'mac'})
response_darwin = self.client.get(
self.get_home_url(), {'version': '10.0', 'platform': 'Darwin'})
response_win = self.client.get(
self.get_home_url(), {'version': '10.0', 'platform': 'win'})
response_winnt = self.client.get(
self.get_home_url(), {'version': '10.0', 'platform': 'WINNT'})
assert response_mac.status_code == 200
assert response_darwin.status_code == 200
assert response_win.status_code == 200
assert response_winnt.status_code == 200
assert response_mac.content == response_darwin.content
assert response_win.content == response_winnt.content
assert response_win.content == response_mac.content
self._test_response_contains_addons(response_win)
def test_home_no_platform(self):
response = self.client.get(self.get_home_url(), {'version': '10.0'})
self._test_response_contains_addons(response)
def test_home_no_version(self):
response = self.client.get(self.get_home_url(), {'platform': 'lol'})
self._test_response_contains_addons(response)
def test_home_does_not_contain_disabled_addons(self):
self.addon1.update(disabled_by_user=True)
self.addon2.update(status=amo.STATUS_DISABLED)
response = self.client.get(self.get_home_url(), {'platform': 'mac'})
assert response.status_code == 200
assert response.content
content = smart_text(response.content)
assert unicode(self.addon1.name) not in content
assert unicode(self.addon2.name) not in content
assert 'This & That' in content
def test_pane_platform_filtering(self):
"""Ensure that the discovery pane is filtered by platform."""
file_ = self.addon1.current_version.all_files[0]
file_.update(platform=amo.PLATFORM_LINUX.id)
assert self.addon1.current_version.supported_platforms == [
amo.PLATFORM_LINUX]
response = self.client.get(self.get_disco_url('10.0', 'Darwin'))
assert response.status_code == 200
assert response.content
content = smart_text(response.content)
assert unicode(self.addon1.name) not in content
assert unicode(self.addon2.name) in content
assert 'This & That' in content
# Make sure aliases are working.
response_mac = self.client.get(self.get_disco_url('10.0', 'mac'))
assert response_mac.status_code == 200
assert response_mac.content == response.content
def test_hidden(self):
DiscoveryModule.objects.all().delete()
response = self.client.get(self.get_disco_url('10.0', 'Darwin'))
assert response.status_code == 200
assert response.content == ''
def test_games_linkified(self):
response = self.client.get(self.get_disco_url('10.0', 'Darwin'))
self._test_response_contains_addons(response)
doc = pq(response.content)
h2_link = doc('h2 a').eq(0)
expected_url = '%s%s' % (
reverse('collections.detail', args=['mozilla', 'games']),
'?src=discovery-promo')
assert h2_link.attr('href') == expected_url
def test_games_linkified_home(self):
response = self.client.get(self.get_home_url(),
{'version': '10.0', 'platform': 'mac'})
self._test_response_contains_addons(response)
doc = pq(response.content)
h2_link = doc('h2 a').eq(0)
expected_url = '%s%s' % (
reverse('collections.detail', args=['mozilla', 'games']),
'?src=hp-dl-promo')
assert h2_link.attr('href') == expected_url
def test_musthave_media_linkified(self):
response = self.client.get(self.get_disco_url('10.0', 'Darwin'))
assert response.status_code == 200
doc = pq(response.content)
h2_link = doc('h2 a').eq(1)
expected_url = '%s%s' % (
reverse('collections.detail', args=['mozilla', 'must-have-media']),
'?src=discovery-promo')
assert h2_link.attr('href') == expected_url
def test_musthave_media_linkified_home(self):
response = self.client.get(self.get_home_url(),
{'version': '10.0', 'platform': 'mac'})
assert response.status_code == 200
doc = pq(response.content)
h2_link = doc('h2 a').eq(1)
expected_url = '%s%s' % (
reverse('collections.detail', args=['mozilla', 'must-have-media']),
'?src=hp-dl-promo')
assert h2_link.attr('href') == expected_url
def test_musthave_media_no_double_escaping(self):
response = self.client.get(self.get_home_url(),
{'version': '10.0', 'platform': 'mac'})
assert response.status_code == 200
doc = pq(response.content)
assert 'This & That' in doc.html()
assert 'That & This' in doc.html()
class TestPane(TestCase):
fixtures = ['addons/featured', 'base/addon_3615', 'base/collections',
'base/featured', 'base/users',
'bandwagon/featured_collections']
def setUp(self):
super(TestPane, self).setUp()
self.url = reverse('discovery.pane', args=['3.7a1pre', 'Darwin'])
def test_my_account(self):
self.client.login(email='regular@mozilla.com')
r = self.client.get(reverse('discovery.pane.account'))
assert r.status_code == 200
doc = pq(r.content)
s = doc('#my-account')
assert s
a = s.find('a').eq(0)
assert a.attr('href') == reverse('users.profile', args=['regularuser'])
assert a.text() == 'My Profile'
a = s.find('a').eq(1)
assert a.attr('href') == (
reverse('collections.detail', args=['regularuser', 'favorites']))
assert a.text() == 'My Favorites'
a = s.find('a').eq(2)
assert a.attr('href') == (
reverse('collections.user', args=['regularuser']))
assert a.text() == 'My Collections'
def test_mission(self):
r = self.client.get(reverse('discovery.pane.account'))
assert pq(r.content)('#mission')
def test_featured_addons_section(self):
r = self.client.get(self.url)
assert pq(r.content)('#featured-addons h2').text() == (
'Featured Add-ons')
def test_featured_addons(self):
r = self.client.get(self.url)
p = pq(r.content)('#featured-addons')
addon = Addon.objects.get(id=7661)
li = p.find('li[data-guid="%s"]' % addon.guid)
a = li.find('a.addon-title')
url = reverse('discovery.addons.detail', args=[7661])
assert a.attr('href').endswith(url + '?src=discovery-featured'), (
'Unexpected add-on details URL')
assert li.find('h3').text() == unicode(addon.name)
assert li.find('img').attr('src') == addon.icon_url
addon = Addon.objects.get(id=2464)
li = p.find('li[data-guid="%s"]' % addon.guid)
assert li.attr('data-guid') == addon.guid
a = li.find('a.addon-title')
url = reverse('discovery.addons.detail', args=[2464])
assert a.attr('href').endswith(url + '?src=discovery-featured'), (
'Unexpected add-on details URL')
assert li.find('h3').text() == unicode(addon.name)
assert li.find('img').attr('src') == addon.icon_url
def test_featured_personas_section(self):
r = self.client.get(self.url)
h2 = pq(r.content)('#featured-themes h2')
assert h2.text() == 'See all Featured Themes'
assert h2.find('a.all').attr('href') == reverse('browse.personas')
@override_settings(MEDIA_URL='/media/', STATIC_URL='/static/')
def test_featured_personas(self):
addon = Addon.objects.get(id=15679)
r = self.client.get(self.url)
doc = pq(r.content)
featured = doc('#featured-themes')
assert featured.length == 1
# Look for all images that are not icon uploads.
imgs = doc('img:not([src*="/media/"])')
imgs_ok = (pq(img).attr('src').startswith('/static/')
for img in imgs)
assert all(imgs_ok), 'Images must be prefixed with MEDIA_URL!'
featured = doc('#featured-themes')
assert featured.length == 1
a = featured.find('a[data-browsertheme]')
url = reverse('discovery.addons.detail', args=[15679])
assert a.attr('href').endswith(url + '?src=discovery-featured'), (
'Unexpected add-on details URL')
assert a.attr('target') == '_self'
assert featured.find('.addon-title').text() == unicode(addon.name)
class TestDetails(TestCase):
fixtures = ['base/addon_3615', 'base/addon_592']
def setUp(self):
super(TestDetails, self).setUp()
self.addon = self.get_addon()
self.detail_url = reverse('discovery.addons.detail',
args=[self.addon.slug])
self.eula_url = reverse('discovery.addons.eula',
args=[self.addon.slug])
def get_addon(self):
return Addon.objects.get(id=3615)
def test_install_button_eula(self):
doc = pq(self.client.get(self.detail_url).content)
assert doc('#install .install-button').text() == 'Download Now'
assert doc('#install .eula').text() == (
'View End-User License Agreement')
doc = pq(self.client.get(self.eula_url).content)
assert doc('#install .install-button').text() == 'Download Now'
def test_install_button_no_eula(self):
self.addon.update(eula=None)
doc = pq(self.client.get(self.detail_url).content)
assert doc('#install .install-button').text() == 'Download Now'
r = self.client.get(self.eula_url)
self.assert3xx(r, self.detail_url, 302)
def test_dependencies(self):
doc = pq(self.client.get(self.detail_url).content)
assert doc('.dependencies').length == 0
req = Addon.objects.get(id=592)
AddonDependency.objects.create(addon=self.addon, dependent_addon=req)
assert self.addon.all_dependencies == [req]
cache.clear()
d = pq(self.client.get(self.detail_url).content)('.dependencies')
assert d.length == 1
a = d.find('ul a')
assert a.text() == unicode(req.name)
assert a.attr('href').endswith('?src=discovery-dependencies')
class TestPersonaDetails(TestCase):
fixtures = ['addons/persona', 'base/users']
def setUp(self):
super(TestPersonaDetails, self).setUp()
self.addon = Addon.objects.get(id=15663)
self.url = reverse('discovery.addons.detail', args=[self.addon.slug])
def test_page(self):
r = self.client.get(self.url)
assert r.status_code == 200
def test_by(self):
"""Test that the `by ... <authors>` section works."""
r = self.client.get(self.url)
assert pq(r.content)('h2.author').text().startswith(
'by persona_author')
def test_no_version(self):
"""Don't display a version number for themes."""
r = self.client.get(self.url)
assert pq(r.content)('h1 .version') == []
def test_created_not_updated(self):
"""Don't display the updated date but the created date for themes."""
r = self.client.get(self.url)
doc = pq(r.content)
details = doc('.addon-info li')
# There's no "Last Updated" entry.
assert not any('Last Updated' in node.text_content()
for node in details)
# But there's a "Created" entry.
for detail in details:
if detail.find('h3').text_content() == 'Created':
created = detail.find('p').text_content()
assert created == (
trim_whitespace(format_date(self.addon.created)))
break # Needed, or we go in the "else" clause.
else:
assert False, 'No "Created" entry found.'
class TestDownloadSources(TestCase):
fixtures = ['base/addon_3615', 'base/users',
'base/collections', 'base/featured', 'addons/featured',
'legacy_discovery/discoverymodules']
def setUp(self):
super(TestDownloadSources, self).setUp()
self.url = reverse('discovery.pane', args=['3.7a1pre', 'Darwin'])
def test_detail(self):
url = reverse('discovery.addons.detail', args=['a3615'])
r = self.client.get(url)
doc = pq(r.content)
assert doc('#install a.download').attr('href').endswith(
'?src=discovery-details')
assert doc('#install li a#learn-more').attr('href').endswith(
'?src=discovery-learnmore')
assert doc('#install li.privacy a').attr('href').endswith(
'?src=discovery-learnmore')
def test_detail_trickle(self):
url = (reverse('discovery.addons.detail', args=['a3615']) +
'?src=discovery-featured')
r = self.client.get(url)
doc = pq(r.content)
assert doc('#install a.download').attr('href').endswith(
'?src=discovery-featured')
def test_eula(self):
url = reverse('discovery.addons.eula', args=['a3615'])
r = self.client.get(url)
doc = pq(r.content)
assert doc('#install a.download').attr('href').endswith(
'?src=discovery-details')
assert doc('#install li:eq(1)').find('a').attr('href').endswith(
'?src=discovery-details')
def test_eula_trickle(self):
url = (reverse('discovery.addons.eula', args=['a3615']) +
'?src=discovery-upandcoming')
r = self.client.get(url)
doc = pq(r.content)
assert doc('#install a.download').attr('href').endswith(
'?src=discovery-upandcoming')
assert doc('#install li:eq(1)').find('a').attr('href').endswith(
'?src=discovery-upandcoming')
class TestTestPilot(TestCase):
fixtures = ['base/users', 'base/addon_3615',
'legacy_discovery/discoverymodules']
def setUp(self):
super(TestTestPilot, self).setUp()
self.url = reverse('discovery.pane.promos', args=['Darwin', '10.0'])
self.addon = Addon.objects.get(id=3615)
DiscoveryModule.objects.create(
app=amo.FIREFOX.id, ordering=4,
module='Test Pilot')
def test_testpilot(self):
r = self.client.get(self.url)
assert pq(r.content)('h2').text() == 'Become a Test Pilot'
assert (pq(r.content)('h3').text() ==
'Unlock early access to experimental browser features.')
assert (pq(r.content)('a').attr('href') ==
'https://testpilot.firefox.com/')
class TestMonthlyPick(TestCase):
fixtures = ['base/users', 'base/addon_3615',
'legacy_discovery/discoverymodules']
def setUp(self):
super(TestMonthlyPick, self).setUp()
self.url = reverse('discovery.pane.promos', args=['Darwin', '10.0'])
self.addon = Addon.objects.get(id=3615)
DiscoveryModule.objects.create(
app=amo.FIREFOX.id, ordering=4,
module='Monthly Pick')
def test_monthlypick(self):
# First test with locale=None, it should never appear.
mp = MonthlyPick.objects.create(addon=self.addon, blurb='BOOP',
image='http://mozilla.com')
response = self.client.get(self.url)
assert response.content == ''
# Now update with locale='', it should be used as the fallback.
mp.update(locale='')
response = self.client.get(self.url)
pick = pq(response.content)('#monthly')
assert pick.length == 1
assert pick.parents('.panel').attr('data-addonguid') == self.addon.guid
a = pick.find('h3 a')
url = reverse('discovery.addons.detail', args=['a3615'])
assert a.attr('href').endswith(url + '?src=discovery-promo'), (
'Unexpected add-on details URL: %s' % url)
assert a.attr('target') == '_self'
assert a.text() == unicode(self.addon.name)
assert pick.find('img').attr('src') == 'http://mozilla.com'
assert pick.find('.wrap > div > div > p').text() == 'BOOP'
assert pick.find('p.install-button a').attr('href').endswith(
'?src=discovery-promo')
def test_monthlypick_disabled_addon(self):
disabled_addon = addon_factory(disabled_by_user=True)
MonthlyPick.objects.create(
addon=disabled_addon, blurb='foo', locale='en-US')
MonthlyPick.objects.create(
addon=self.addon, blurb='bar', locale='')
response = self.client.get(self.url)
pick = pq(response.content)('#monthly')
assert pick.length == 1
assert pick.parents('.panel').attr('data-addonguid') == self.addon.guid
def test_monthlypick_no_image(self):
MonthlyPick.objects.create(addon=self.addon, blurb='BOOP', locale='',
image='')
# Tests for no image when screenshot not set.
r = self.client.get(self.url)
pick = pq(r.content)('#monthly')
assert pick.length == 1
assert pick.find('img').length == 0
# Tests for screenshot image when set.
Preview.objects.create(addon=self.addon)
r = self.client.get(self.url)
pick = pq(r.content)('#monthly')
assert pick.length == 1
assert pick.find('img').attr('src') == (
self.addon.all_previews[0].image_url)
def test_no_monthlypick(self):
r = self.client.get(self.url)
assert r.content == ''
class TestPaneMoreAddons(TestCase):
fixtures = ['base/appversion']
def setUp(self):
super(TestPaneMoreAddons, self).setUp()
self.addon1 = addon_factory(hotness=99,
version_kw=dict(max_app_version='5.0'))
self.addon2 = addon_factory(hotness=0,
version_kw=dict(max_app_version='6.0'))
def _url(self, **kwargs):
default = dict(
section='up-and-coming',
version='5.0',
platform='Darwin')
default.update(kwargs)
return reverse('discovery.pane.more_addons', kwargs=default)
def test_hotness_strict(self):
# Defaults to strict compat mode, both are within range.
res = self.client.get(self._url())
assert res.status_code == 200
assert pq(res.content)('.featured-addons').length == 2
def test_hotness_strict_filtered(self):
# Defaults to strict compat mode, one is within range.
res = self.client.get(self._url(version='6.0'))
assert res.status_code == 200
assert pq(res.content)('.featured-addons').length == 1
self.assertContains(res, self.addon2.name)
def test_hotness_ignore(self):
# Defaults to ignore compat mode for Fx v10, both are compatible.
res = self.client.get(self._url(version='10.0'))
assert res.status_code == 200
assert pq(res.content)('.featured-addons').length == 2
def test_hotness_normal_strict_opt_in(self):
# Add a 3rd add-on that should get filtered out b/c of compatibility.
addon_factory(hotness=50, version_kw=dict(max_app_version='7.0'),
file_kw=dict(strict_compatibility=True))
res = self.client.get(self._url(version='12.0', compat_mode='normal'))
assert res.status_code == 200
assert pq(res.content)('.featured-addons').length == 2
def test_hotness_normal_binary_components(self):
# Add a 3rd add-on that should get filtered out b/c of compatibility.
addon_factory(hotness=50, version_kw=dict(max_app_version='7.0'),
file_kw=dict(binary_components=True))
res = self.client.get(self._url(version='12.0', compat_mode='normal'))
assert res.status_code == 200
assert pq(res.content)('.featured-addons').length == 2
def test_hotness_normal_compat_override(self):
# Add a 3rd add-on that should get filtered out b/c of compatibility.
addon3 = addon_factory(hotness=50,
version_kw=dict(max_app_version='7.0'))
# Add override for this add-on.
compat = CompatOverride.objects.create(guid='three', addon=addon3)
CompatOverrideRange.objects.create(
compat=compat, app=1,
min_version=addon3.current_version.version, max_version='*')
res = self.client.get(self._url(version='12.0', compat_mode='normal'))
assert res.status_code == 200
assert pq(res.content)('.featured-addons').length == 2
| {
"content_hash": "7a9e42181978937b378b86e8445fd528",
"timestamp": "",
"source": "github",
"line_count": 652,
"max_line_length": 79,
"avg_line_length": 41.00153374233129,
"alnum_prop": 0.6003815508921557,
"repo_name": "lavish205/olympia",
"id": "3b8b57b17be256d0b9a52d748a219c971125b426",
"size": "26733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/legacy_discovery/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "808053"
},
{
"name": "HTML",
"bytes": "614229"
},
{
"name": "JavaScript",
"bytes": "1075018"
},
{
"name": "Makefile",
"bytes": "820"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "5064850"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "11467"
},
{
"name": "Smarty",
"bytes": "1758"
}
],
"symlink_target": ""
} |
import vtk
import numpy as np
from vmtk import vmtkscripts
import argparse
import copy
# creates lines normal to surface for evaluation in the probe image with surface
def warp_surface(args):
print("warp the surface ")
reader = vmtkscripts.vmtkSurfaceReader()
reader.InputFileName = args.surface
reader.Execute()
Surface = reader.Surface
narrays = Surface.GetPointData().GetNumberOfArrays()
has_normals = False
for i in range(narrays):
if ( Surface.GetPointData().GetArrayName(i) == "Normals"):
has_normals = True
break
if(has_normals):
normals = Surface
print("already have")
else:
get_normals = vtk.vtkPolyDataNormals()
get_normals.SetInputData(Surface)
get_normals.SetFeatureAngle(30.0) # default
get_normals.SetSplitting(True)
get_normals.Update()
get_normals.GetOutput().GetPointData().SetActiveVectors("Normals")
normals = get_normals.GetOutput()
print("normals generated")
random = vtk.vtkRandomAttributeGenerator()
random.SetInputData(normals)
random.SetDataTypeToDouble()
random.GeneratePointScalarsOn ()
random.SetComponentRange(-0.5, 0.5)
random.Update()
#n = random.GetOutput().GetPointData().GetNumberOfArrays()
#for i in range(n):
#print(random.GetOutput().GetPointData().GetArrayName(i))
calc = vtk.vtkArrayCalculator()
calc.SetInputConnection(random.GetOutputPort())
calc.AddScalarArrayName("RandomPointScalars", 0)
calc.AddVectorArrayName("Normals", 0, 1, 2)
calc.SetFunction("Normals * RandomPointScalars")
calc.SetResultArrayName("RandomLengthNormalVectors")
calc.Update()
warp = vtk.vtkWarpVector()
warp.SetInputConnection(calc.GetOutputPort())
warp.SetInputArrayToProcess(0, 0, 0,
vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS,
"RandomLengthNormalVectors");
warp.SetScaleFactor(args.fuzz_scale)
warp.Update()
writer = vmtkscripts.vmtkSurfaceWriter()
writer.OutputFileName = args.file_out
writer.Input = warp.GetOutput()
writer.Execute()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='estimate vertices for uniform point distribution')
parser.add_argument("-i", dest="surface", required=True, help="input surface file", metavar="FILE")
parser.add_argument("-o", dest="file_out", required=True, help="output surface file", metavar="FILE")
parser.add_argument("-s", '--scale', dest="fuzz_scale", type=float, help='how much to fuzz surface ', default=0.08)
args = parser.parse_args()
#print(args)
warp_surface(args)
| {
"content_hash": "c79b898569d0096e326335d22d9b55e0",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 120,
"avg_line_length": 34.721518987341774,
"alnum_prop": 0.6697047028800583,
"repo_name": "kayarre/Tools",
"id": "0ef8d7faf6179994e70f71f839bff1cab233fca7",
"size": "2767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vmtk/fuzzypsurface.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2306"
},
{
"name": "Mako",
"bytes": "8541"
},
{
"name": "Python",
"bytes": "1125456"
}
],
"symlink_target": ""
} |
from datetime import datetime
from google.appengine.ext import ndb
from models import Despesa
def index():
despesas = Despesa.query().fetch()
now = datetime.now()
for despesa in despesas:
dias_vencidos = (now - datetime(year=now.year, month=despesa.data_vencimento[0],
day=despesa.data_vencimento[1])).days
if dias_vencidos and despesa.juros:
despesa.valor_corrigido = despesa.valor * ((1 + despesa.juros) * dias_vencidos)
despesa.status = "ATRASADA"
ndb.put_multi(despesas) | {
"content_hash": "5f32ba68e3fc1ecfbbd8be09a65e86d7",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 91,
"avg_line_length": 34.875,
"alnum_prop": 0.6523297491039427,
"repo_name": "giovaneliberato/keepfamily",
"id": "5fea4da3aa56c50190e858724205debe670f39ab",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/web/tasks/calcular_juros.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1101"
},
{
"name": "JavaScript",
"bytes": "0"
},
{
"name": "Python",
"bytes": "5600"
},
{
"name": "Shell",
"bytes": "1474"
}
],
"symlink_target": ""
} |
from lxml.builder import E
from jnpr.junos.utils.util import Util
from jnpr.junos.utils.start_shell import StartShell
class FS(Util):
"""
Filesystem (FS) utilities:
cat - show the contents of a file
checksum - calculate file checksum (md5,sha256,sha1)
copy - local file copy (not scp)
cwd - change working directory
ls - return file/dir listing
mkdir - create a directory
pwd - get working directory
rename - local file rename
rm - local file delete
rmdir - remove a directory
stat - return file/dir information
storage_usage - return storage usage
storage_cleanup - perform storage storage_cleanup
storage_cleanup_check - returns a list of files to remove at cleanup
symlink - create a symlink
tgz - tar+gzip a directory
NOTES:
The following methods require 'start shell' priveldges:
[mkdir, rmdir, symlink]
"""
# -------------------------------------------------------------------------
# cat - show file contents
# -------------------------------------------------------------------------
def cat(self, path):
"""
returns the contents of the file :path:
"""
try:
rsp = self._dev.rpc.file_show(filename=path)
except:
return None
return rsp.text
# -------------------------------------------------------------------------
# cwd - change working directory
# -------------------------------------------------------------------------
def cwd(self, path):
"""
change working directory to path
"""
self._dev.rpc.set_cli_working_directory(directory=path)
# -------------------------------------------------------------------------
# pwd - return current working directory
# -------------------------------------------------------------------------
def pwd(self):
"""
returns the current working directory
"""
rsp = self._dev.rpc(E.command("show cli directory"))
return rsp.findtext('./working-directory')
# -------------------------------------------------------------------------
# checksum - compute file checksum
# -------------------------------------------------------------------------
def checksum(self, path, calc='md5'):
"""
performs the checksum command on the given file path using the
required calculation method ['md5', 'sha256', 'sha1'] and returns
the string value. if the :path: is not found on the device, then
None is returned.
"""
cmd_map = {
'md5': self._dev.rpc.get_checksum_information,
'sha256': self._dev.rpc.get_sha256_checksum_information,
'sha1': self._dev.rpc.get_sha1_checksum_information
}
rpc = cmd_map.get(calc)
if rpc is None:
raise ValueError("Unknown calculation method: '%s'" % calc)
try:
rsp = rpc(path=path)
return rsp.findtext('.//checksum').strip()
except:
# the only exception is that the path is not found
return None
@classmethod
def _decode_file(cls, fileinfo):
results = {}
not_file = fileinfo.xpath('file-directory | file-symlink-target')
if len(not_file):
results['type'] = {'file-directory': 'dir',
'file-symlink-target': 'link'}[not_file[0].tag]
if 'link' == results['type']:
results['link'] = not_file[0].text.strip()
else:
results['type'] = 'file'
results['path'] = fileinfo.findtext('file-name').strip()
results['owner'] = fileinfo.findtext('file-owner').strip()
results['size'] = int(fileinfo.findtext('file-size'))
fper = fileinfo.find('file-permissions')
results['permissions'] = int(fper.text.strip())
results['permissions_text'] = fper.get('format')
fdate = fileinfo.find('file-date')
results['ts_date'] = fdate.get('format')
results['ts_epoc'] = fdate.text.strip()
return results
@classmethod
def _decode_dir(cls, dirinfo, files=None):
results = {}
results['type'] = 'dir'
results['path'] = dirinfo.get('name')
if files is None:
files = dirinfo.xpath('file-information')
results['file_count'] = len(files)
results['size'] = sum([int(f.findtext('file-size')) for f in files])
return results
# -------------------------------------------------------------------------
# stat - file information
# -------------------------------------------------------------------------
def stat(self, path):
"""
Returns a dictionary of status information on the path, or None
if the path does not exist.
@@@ MORE NEEDED @@@
"""
rsp = self._dev.rpc.file_list(detail=True, path=path)
# if there is an output tag, then it means that the path
# was not found
if rsp.find('output') is not None:
return None
# ok, so we've either got a directory or a file at
# this point, so decode accordingly
xdir = rsp.find('directory')
if xdir.get('name'): # then this is a directory path
return FS._decode_dir(xdir)
else:
return FS._decode_file(xdir.find('file-information'))
# -------------------------------------------------------------------------
# ls - file/dir listing
# -------------------------------------------------------------------------
def ls(self, path='.', brief=False, followlink=True):
"""
File listing, returns a dict of file information. If the
path is a symlink, then by default (:followlink):) will
recursively call this method to obtain the symlink specific
information.
"""
rsp = self._dev.rpc.file_list(detail=True, path=path)
# if there is an output tag, then it means that the path
# was not found, and we return :None:
if rsp.find('output') is not None:
return None
xdir = rsp.find('directory')
# check to see if the directory element has a :name:
# attribute, and if it does not, then this is a file, and
# decode accordingly. If the file is a symlink, then we
# want to follow the symlink to get what we want.
if not xdir.get('name'):
results = FS._decode_file(xdir.find('file-information'))
link_path = results.get('link')
if not link_path: # then we are done
return results
else:
return results if followlink is False else self.ls(
path=link_path)
# if we are here, then it's a directory, include information on all
# files
files = xdir.xpath('file-information')
results = FS._decode_dir(xdir, files)
if brief is True:
results['files'] = [f.findtext('file-name').strip() for f in files]
else:
results['files'] = {
f.findtext('file-name').strip(): FS._decode_file(f)
for f in files
}
return results
# -------------------------------------------------------------------------
# storage_usage - filesystem storage usage
# -------------------------------------------------------------------------
def storage_usage(self):
rsp = self._dev.rpc.get_system_storage()
_name = lambda fs: fs.findtext('filesystem-name').strip()
def _decode(fs):
r = {}
r['mount'] = fs.find('mounted-on').text.strip()
tb = fs.find('total-blocks')
r['total'] = tb.get('format')
r['total_blocks'] = int(tb.text)
ub = fs.find('used-blocks')
r['used'] = ub.get('format')
r['used_blocks'] = int(ub.text)
r['used_pct'] = fs.find('used-percent').text.strip()
ab = fs.find('available-blocks')
r['avail'] = ab.get('format')
r['avail_block'] = int(ab.text)
return r
return {_name(fs): _decode(fs) for fs in rsp.xpath('filesystem')}
# -------------------------------------------------------------------------
### storage_cleanup_check, storage_cleanip
# -------------------------------------------------------------------------
@classmethod
def _decode_storage_cleanup(cls, files):
_name = lambda f: f.findtext('file-name').strip()
def _decode(f):
return {
'size': int(f.findtext('size')),
'ts_date': f.findtext('date').strip()
}
# return a dict of name/decode pairs for each file
return {_name(f): _decode(f) for f in files}
def storage_cleanup_check(self):
"""
Perform the 'request system storage cleanup dry-run' command
to return a :dict: of files/info that would be removed if
the cleanup command was executed.
"""
rsp = self._dev.rpc.request_system_storage_cleanup(dry_run=True)
files = rsp.xpath('file-list/file')
return FS._decode_storage_cleanup(files)
def storage_cleanup(self):
"""
Perform the 'request system storage cleanup' command to remove
files from the filesystem. Return a :dict: of file name/info
on the files that were removed.
"""
rsp = self._dev.rpc.request_system_storage_cleanup()
files = rsp.xpath('file-list/file')
return FS._decode_storage_cleanup(files)
# -------------------------------------------------------------------------
# rm - local file delete
# -------------------------------------------------------------------------
def rm(self, path):
"""
Performs a local file delete action, per Junos CLI command
"file delete". If the file does not exist, then this returns False.
"""
# the return value from this RPC will return either True if the delete
# was successful, or an XML structure otherwise. So we can do a simple
# test to provide the return result to the caller.
rsp = self._dev.rpc.file_delete(path=path)
if rsp is True:
return True
else:
return False
# -------------------------------------------------------------------------
# cp - local file copy
# -------------------------------------------------------------------------
def cp(self, from_path, to_path):
"""
Perform a local file copy where :from_path: and :to_path: can be any
valid Junos path argument. Refer to the Junos "file copy" command
documentation for details.
Returns True if OK, False if file does not exist.
"""
# this RPC returns True if it is OK. If the file does not exist
# this RPC will generate an RpcError exception, so just return False
try:
self._dev.rpc.file_copy(source=from_path, destination=to_path)
except:
return False
return True
# -------------------------------------------------------------------------
# mv - local file rename
# -------------------------------------------------------------------------
def mv(self, from_path, to_path):
"""
Perform a local file rename function, same as "file rename" Junos CLI.
"""
rsp = self._dev.rpc.file_rename(source=from_path, destination=to_path)
if rsp is True:
return True
else:
return False
def tgz(self, from_path, tgz_path):
"""
create a file called :tgz_path: that is the tar-gzip of the given
directory specified :from_path:
"""
rsp = self._dev.rpc.file_archive(compress=True,
source=from_path,
destination=tgz_path)
# if the rsp is True, then the command executed OK.
if rsp is True:
return True
# otherwise, return the error string to the caller
return rsp.text
# -------------------------------------------------------------------------
# !!!!! methods that use SSH shell commands, requires that the user
# !!!!! has 'start shell' priveldges
# -------------------------------------------------------------------------
def _ssh_exec(self, command):
with StartShell(self._dev) as sh:
got = sh.run(command)
ok = sh.last_ok
return (ok, got)
def rmdir(self, path):
"""
~| REQUIRES SHELL PRIVELDGES |~
executes the 'rmdir' command on path
returns True if OK, or error string
"""
results = self._ssh_exec("rmdir %s" % path)
return True if results[0] is True else ''.join(results[1][2:-1])
def mkdir(self, path):
"""
~| REQUIRES SHELL PRIVELDGES |~
executes the 'mkdir -p' command on path
returns True if OK, or error string
"""
results = self._ssh_exec("mkdir -p %s" % path)
return True if results[0] is True else ''.join(results[1][2:-1])
def symlink(self, from_path, to_path):
"""
~| REQUIRES SHELL PRIVELDGES |~
executes the 'ln -sf <from_path> <to_path>' command
returns True if OK, or error string
"""
results = self._ssh_exec("ln -sf %s %s" % (from_path, to_path))
return True if results[0] is True else ''.join(results[1][2:-1])
| {
"content_hash": "6ffd60a752d3a2a78094d7c40e11b15e",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 79,
"avg_line_length": 36.72727272727273,
"alnum_prop": 0.4876965637740245,
"repo_name": "dgjnpr/py-junos-eznc",
"id": "ec8ca645fe46f94c3c84cf25238b4f88a37c66f2",
"size": "13736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/jnpr/junos/utils/fs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Pascal",
"bytes": "13"
},
{
"name": "Puppet",
"bytes": "2658"
},
{
"name": "Python",
"bytes": "376420"
},
{
"name": "Ruby",
"bytes": "4840"
},
{
"name": "Shell",
"bytes": "597"
}
],
"symlink_target": ""
} |
import re, string
from tree import Tree, NodeId
whitespace = ' \t\n\r\v\f'
ascii_lowercase = 'abcdefghijklmnopqrstuvwxyz'
ascii_uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + ascii_letters + punctuation + whitespace
def capwords(s, sep=None):
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
class Huffman:
def __init__(self, text):
self.text = text
self.dicionarioUpper = dict([(asc, 0) for asc in ascii_uppercase])
self.dicionarioLower = dict([(asc, 0) for asc in ascii_lowercase])
for el in ascii_uppercase:
self.dicionarioUpper[el] = len(re.findall(el, self.text))
for el in ascii_lowercase:
self.dicionarioLower[el] = len(re.findall(el, self.text))
def mostrarTabelas(self):
string = 'Tabela de Freqs (uppercase letters):\n'
for el in ascii_uppercase:
string += "\t{chave} -> {valor}\n".format(chave=el, valor=self.dicionarioUpper.get(el))
string += '\nTabela de Freqs (lowercase letters):\n'
for el in ascii_lowercase:
string += "\t{chave} -> {valor}\n".format(chave=el, valor=self.dicionarioLower.get(el))
return string
def funcao_tamanho(self):
while self.alfabeto > 1:
S0 = self.retira_menor_probabilidade(self.alfabeto)
S1 = self.retira_menor_probabilidade(self.alfabeto)
self.X = NodeId() # O comando novo_nó cria um novo nodo vazio.
self.X.filho0 = S0
self.X.filho1 = S1
self.X.probabilidade = self.S0.probabilidade + self.S1.probabilidade
self.insere(self.alfabeto, self.X)
self.X = self.retira_menor_simbolo(self.alfabeto) # nesse ponto só existe um símbolo.
for folha in self.X:
self.codigo[folha] = self.percorre_da_raiz_ate_a_folha(folha)
def retira_menor_probabilidade(self):
""" A função retira_menor_probabilidade(alfabeto) retorna o nó ou folha de menor probabilidade no nosso e
remove este símbolo do repositório."""
pass
def percorre_da_raiz_ate_a_folha(self):
"""a função percorre_da_raiz_até_a_folha(folha) percorre a árvore binária da raiz até a folha acumulando os
valores associados com as arestas em seu valor de retorno."""
pass
def insere(self):
"""A função insere(alfabeto, X) insere o símbolo X no nosso repositório"""
pass
| {
"content_hash": "264f590abd4ad5d99846c581b6beab8b",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 115,
"avg_line_length": 35.648648648648646,
"alnum_prop": 0.634950720242608,
"repo_name": "tonussi/multimedia",
"id": "a74a022918d5266834dea0711858ea56eb4254a4",
"size": "2681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "huffman/huffman.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "10432"
},
{
"name": "Python",
"bytes": "12416"
}
],
"symlink_target": ""
} |
from signals import register
register('app.activate')
register('app.deactivate')
register('app.pre_start')
register('app.start')
register('app.ready')
register('app.close')
register('app.stop')
register('app.chdir')
register('app.command')
register('app.remote')
register('app.open_file')
register('project.init')
register('project.preload')
register('project.presave')
register('project.load')
register('project.save')
register('module.loaded')
register('module.register')
register('module.unregister')
register('module.load')
register('module.unload')
register('preview.start')
register('preview.resume')
register('preview.stop')
register('preview.pause')
register('selection.changed')
register('selection.hint')
register('command.new')
register('command.undo')
register('command.redo')
register('command.clear')
# register('game.pause')
# register('game.resume')
# register('debug.enter')
# register('debug.exit')
# register('debug.continue')
# register('debug.stop')
# register('debug.command')
# register('debug.info')
# register('file.modified')
# register('file.removed')
# register('file.added')
# register('file.moved')
# register('project.pre_deploy')
# register('project.deploy')
# register('project.post_deploy')
# register('project.done_deploy')
# register('asset.reset')
# register('asset.post_import_all')
# register('asset.added')
# register('asset.removed')
# register('asset.modified')
# register('asset.moved')
# register('asset.deploy.changed')
# register('asset.register')
# register('asset.unregister')
| {
"content_hash": "94418d80f78fbc5bb6bbce3e34e97355",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 35,
"avg_line_length": 21.38888888888889,
"alnum_prop": 0.7318181818181818,
"repo_name": "cloudteampro/juma-editor",
"id": "92916a7f0fb5559b25b599a97980eeb3df47d9bf",
"size": "1540",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "editor/lib/juma/core/globalSignals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "490405"
},
{
"name": "C++",
"bytes": "15076"
},
{
"name": "Lua",
"bytes": "223218"
},
{
"name": "Makefile",
"bytes": "6088"
},
{
"name": "Objective-C",
"bytes": "25470"
},
{
"name": "Python",
"bytes": "1033362"
},
{
"name": "Shell",
"bytes": "2792"
}
],
"symlink_target": ""
} |
from cherrypy.test import test
test.prefer_parent_path()
import cherrypy
def setup_server():
class WSGIResponse(object):
def __init__(self, appresults):
self.appresults = appresults
self.iter = iter(appresults)
def __iter__(self):
return self
def next(self):
return self.iter.next()
def close(self):
if hasattr(self.appresults, "close"):
self.appresults.close()
class ChangeCase(object):
def __init__(self, app, to=None):
self.app = app
self.to = to
def __call__(self, environ, start_response):
res = self.app(environ, start_response)
class CaseResults(WSGIResponse):
def next(this):
return getattr(this.iter.next(), self.to)()
return CaseResults(res)
class Replacer(object):
def __init__(self, app, map={}):
self.app = app
self.map = map
def __call__(self, environ, start_response):
res = self.app(environ, start_response)
class ReplaceResults(WSGIResponse):
def next(this):
line = this.iter.next()
for k, v in self.map.iteritems():
line = line.replace(k, v)
return line
return ReplaceResults(res)
class Root(object):
def index(self):
return "HellO WoRlD!"
index.exposed = True
root_conf = {'wsgi.pipeline': [('replace', Replacer)],
'wsgi.replace.map': {'L': 'X', 'l': 'r'},
}
cherrypy.config.update({'environment': 'test_suite'})
app = cherrypy.Application(Root())
app.wsgiapp.pipeline.append(('changecase', ChangeCase))
app.wsgiapp.config['changecase'] = {'to': 'upper'}
cherrypy.tree.mount(app, config={'/': root_conf})
from cherrypy.test import helper
class WSGI_Namespace_Test(helper.CPWebCase):
def test_pipeline(self):
if not cherrypy.server.httpserver:
print "skipped ",
return
self.getPage("/")
# If body is "HEXXO WORXD!", the middleware was applied out of order.
self.assertBody("HERRO WORRD!")
if __name__ == '__main__':
setup_server()
helper.testmain()
| {
"content_hash": "97a6db3998c7860c702dcf4ce0574c2e",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 77,
"avg_line_length": 27.274725274725274,
"alnum_prop": 0.5124899274778405,
"repo_name": "VHAINNOVATIONS/DmD",
"id": "3aaabf5ec313c8888c91a871a8596fee1c4361e8",
"size": "2482",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scrubber/MIST_2_0_4/src/CherryPy-3.1.2/cherrypy/test/test_wsgi_ns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "258262"
},
{
"name": "HTML",
"bytes": "3057541"
},
{
"name": "Java",
"bytes": "363296"
},
{
"name": "JavaScript",
"bytes": "8682388"
},
{
"name": "Perl",
"bytes": "294110"
},
{
"name": "Perl6",
"bytes": "14166"
},
{
"name": "Prolog",
"bytes": "782419"
},
{
"name": "Python",
"bytes": "3569206"
},
{
"name": "Shell",
"bytes": "6422"
},
{
"name": "XS",
"bytes": "120883"
}
],
"symlink_target": ""
} |
"""
Created on Fri Apr 28 11:30:56 2017
@author: Patrick Woo-Sam
"""
import dryscrape
import time
import pandas as pd
from io import StringIO
class Trajectory_browser:
def __init__(self):
self._url = 'https://trajbrowser.arc.nasa.gov/traj_browser.php'
self.session = dryscrape.Session()
self.session.set_attribute('local_storage_enabled')
self.csv = None
def search(self, **kwargs):
'''Submit a seach query to NASA's trajectory browser.
Keyword arguments:
------------------
NEOs -- on or off (default on)
NEAs -- on or off (default on)
NECs -- on or off (default on)
chk_maxOCC -- on or off (default on)
maxMag -- int (default 25)
chk_target_list -- on or off (default on)
target_list -- ??? (default '')
mission_class -- Mission class: oneway or roundtrip (default oneway)
mission_type -- Mission type: rendezvous or flyby (default rendezvous)
LD1 -- Launch year, lower bound: int (default 2014)
LD2 -- Launch year, upper bound: int (defualt 2016)
maxDT -- float (default 7.0)
min -- DV or DT (default DV)
wdw_width -- int (default 0)
'''
kwargs = {k: str(v) for k, v in kwargs.items()}
query = {'NEOs': 'on',
'NEAs': 'on',
'NECs': 'on',
'chk_maxMag': 'on',
'maxMag': '25',
'chk_maxOCC': 'on',
'maxOCC': '4',
'chk_target_list': 'on',
'target_list': '',
'mission_class': 'oneway',
'mission_type': 'rendezvous',
'LD1': '2014',
'LD2': '2016',
'maxDT': '2.0',
'DTunit': 'yrs',
'maxDV': '7.0',
'min': 'DV',
'wdw_width': '0',
'submit': 'Search'}
query.update(kwargs)
search_string = '?'
for k, v in query.items():
search_string += k + '=' + v + '&'
search_string = search_string[:-1]
self.session.visit(self._url + search_string)
def download_csv(self, **kwargs):
'''Save the csv from a query to self.csv.
This method calls Trajectory_browser().search(**kwargs) and then saves
the trajectory data to Trajectory_browser().csv.
See Trajectory_browser().search() for valid search parameters.
'''
self.search(**kwargs)
self.session.at_xpath('/html/body/div[2]/button').click()
timeout = 10
attempt = 0
sleep_time = 0.01
while self.session.body().startswith('<!DOCTYPE html>'):
time.sleep(sleep_time)
attempt += sleep_time
if attempt >= timeout:
self.csv = None
break
else:
self.csv = StringIO(self.session.body())
def get_DataFrame(self):
'''Return pandas DataFrame for self.csv.'''
if self.csv is not None:
df = pd.read_csv(self.csv)
df = df.iloc[:, :-1]
return df
else:
print(('You must first download a csv! `Trajectory_browser().'
'download_csv()`'))
return pd.DataFrame()
if __name__ == '__main__':
br = Trajectory_browser()
br.download_csv()
df = br.get_DataFrame()
print(df)
| {
"content_hash": "89c7602adf5a9257d2dce0d8dfa1cbcd",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 79,
"avg_line_length": 30.56779661016949,
"alnum_prop": 0.48211810368727476,
"repo_name": "capsulecorplab/Space_Renegades",
"id": "e6393712a3509cab4f9ec60b75870ab43f6a948c",
"size": "3654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/trajectory_browser.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3654"
}
],
"symlink_target": ""
} |
import MySQLdb as mysql
db = mysql.connect(user='root',passwd='Hackathon@2017',db='weathercn',host='localhost')
db.autocommit(True)
cur = db.cursor()
sql = 'select time_stamp,response_time from httpaccess'
cur.execute(sql)
arr = []
for i in cur.fetchall():
arr.append([i[1], i[0]])
if len(arr) > 0:
print arr[-1]
| {
"content_hash": "2e2e9ac78632c8f074f935687be3d100",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 87,
"avg_line_length": 26.833333333333332,
"alnum_prop": 0.6832298136645962,
"repo_name": "clovertrail/rocket",
"id": "50d95232f3931f43fd7b00f6b5c6dbc840a6cdd7",
"size": "322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/Weathercn/checkdb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "10523"
},
{
"name": "CSS",
"bytes": "39883"
},
{
"name": "Go",
"bytes": "20845"
},
{
"name": "HTML",
"bytes": "303913"
},
{
"name": "Java",
"bytes": "14333"
},
{
"name": "Makefile",
"bytes": "677"
},
{
"name": "PowerShell",
"bytes": "568597"
},
{
"name": "Python",
"bytes": "40851"
},
{
"name": "Shell",
"bytes": "239488"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.conf import settings
from molly.utils.i18n import name_in_language
class Migration(DataMigration):
def forwards(self, orm):
"""
Move category names to the new i18n format
"""
for pc in orm.PodcastCategory.objects.all():
pc.names.create(language_code=settings.LANGUAGE_CODE,
name=pc.name)
def backwards(self, orm):
"""
Move category names from the new i18n format
"""
for pc in orm.PodcastCategory.objects.all():
pc.name = name_in_language(pc, 'name')
models = {
'podcasts.podcast': {
'Meta': {'ordering': "('title',)", 'object_name': 'Podcast'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['podcasts.PodcastCategory']", 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'logo': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'medium': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True'}),
'most_recent_item_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'provider': ('django.db.models.fields.TextField', [], {}),
'rss_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'podcasts.podcastcategory': {
'Meta': {'ordering': "('order', 'name')", 'object_name': 'PodcastCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'podcasts.podcastcategoryname': {
'Meta': {'object_name': 'PodcastCategoryName'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'name': ('django.db.models.fields.TextField', [], {}),
'podcast_category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'names'", 'to': "orm['podcasts.PodcastCategory']"})
},
'podcasts.podcastenclosure': {
'Meta': {'object_name': 'PodcastEnclosure'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'mimetype': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'podcast_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['podcasts.PodcastItem']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'podcasts.podcastitem': {
'Meta': {'object_name': 'PodcastItem'},
'author': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'guid': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'podcast': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['podcasts.Podcast']"}),
'published_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True'})
}
}
complete_apps = ['podcasts']
| {
"content_hash": "bcf36cda4d24b06e0f113bd5a6eee8e7",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 152,
"avg_line_length": 58.44444444444444,
"alnum_prop": 0.5479509928179129,
"repo_name": "mollyproject/mollyproject",
"id": "c0431e3bd3090d9ceb15e9f32ce4d58debebe4e5",
"size": "4752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "molly/apps/podcasts/migrations/0005_i18n_categories.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "90319"
},
{
"name": "JavaScript",
"bytes": "76592"
},
{
"name": "Python",
"bytes": "1120664"
},
{
"name": "Shell",
"bytes": "4042"
},
{
"name": "XSLT",
"bytes": "11864"
}
],
"symlink_target": ""
} |
import argparse
import csv
import matplotlib.pyplot as plt
import numpy as np
# parser
parser = argparse.ArgumentParser('Plot GPU use')
parser.add_argument('csv_path', type=str)
args = parser.parse_args()
with open(args.csv_path, 'r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
y = [[int(r) for r in row] for row in plots]
x = range(len(y)) # number of iterations
num_gpus = len(y[0]) # number of gpus
# plotting gpu use over time
plt.plot(x, y)
plt.legend(['gpu' + str(n) for n in range(num_gpus)])
plt.xlabel('time')
plt.ylabel('gpu use (%)')
plt.show()
# plotting average gpu use
avg_y = np.mean(np.asarray(y), axis=0)
y_pos = np.arange(num_gpus)
plt.bar(y_pos, height=avg_y)
plt.xticks(y_pos, ['gpu' + str(n) for n in range(num_gpus)])
plt.ylabel('Average gpu use (%)')
plt.show()
| {
"content_hash": "00fb6a2533dec9727ce459d4030d0f32",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 60,
"avg_line_length": 22.694444444444443,
"alnum_prop": 0.6719706242350061,
"repo_name": "mari-linhares/tensorflow-workshop",
"id": "4c8690f1eab0d549313fa647b8a26f370e09553d",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code_samples/GPUs/plot_gpu_use.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1089919"
},
{
"name": "Jupyter Notebook",
"bytes": "17536001"
},
{
"name": "Python",
"bytes": "151394"
},
{
"name": "Shell",
"bytes": "990"
}
],
"symlink_target": ""
} |
import os
import re
# read content from validate.html
htmlContent = [x for x in open('validate.html')]
# identify template boundaries
preIdx = htmlContent.index([ x for x in htmlContent if re.match(r'\s*<!-- START AUTO-POPULATED TESTS -->',x) ][0])
postIdx = htmlContent.index([ x for x in htmlContent if re.match(r'\s*<!-- END AUTO-POPULATED TESTS -->' ,x) ][0])
# initialize output array with start of validate.html
out = htmlContent[:preIdx+1]
# files to read
files = [ f for f in os.listdir('marked/test/tests') if f.endswith('.text') ]
# iterate over files
for f in files:
# base file name
base = f.replace('.text','')
# don't process tests for non-default marked.js modes
if base.endswith('.smartypants') or base.endswith('.nogfm') or base.endswith('.breaks'):
pass
else:
# extract text and html (skipping blank lines in html)
text = ''.join([
re.sub(r'\s+\n', '\n', ' '+x)
for x in open('marked/test/tests/'+base+'.text') ])
html = ''.join([
re.sub(r'\s+\n', '\n', ' '+x)
for x in open('marked/test/tests/'+base+'.html')
if not re.match(r'^\s*$',x) ])
# escape ampersands in text input
text = text.replace('&', '&')
# escape quotes in text input
text = text.replace('"', '"')
# generate corresponding output
out.append(' <!-- ' + base + '-->\n')
out.append(' <section id="' + base + '" data-text="\n')
out.append(text + '\n')
out.append(' ">\n')
out.append(html + '\n')
out.append(' </section>\n')
# extend output array with end of validate.html
out.append('\n')
out.extend(htmlContent[postIdx:])
# update validate.html file with new content
with open('validate.html', 'w') as handle:
handle.writelines(out) | {
"content_hash": "029bd8b319d323519db38b6193a9f9b7",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 116,
"avg_line_length": 32.73684210526316,
"alnum_prop": 0.579849946409432,
"repo_name": "daryl314/markdown-browser",
"id": "c5ca6feea334dd92b081295f110fefb83ebe54ed",
"size": "1889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extract-tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "14321"
},
{
"name": "CSS",
"bytes": "119808"
},
{
"name": "Dockerfile",
"bytes": "256"
},
{
"name": "HTML",
"bytes": "548568"
},
{
"name": "JavaScript",
"bytes": "2624935"
},
{
"name": "Makefile",
"bytes": "3398"
},
{
"name": "Python",
"bytes": "204574"
},
{
"name": "Shell",
"bytes": "1818"
},
{
"name": "TeX",
"bytes": "11078"
},
{
"name": "Vim script",
"bytes": "5656"
}
],
"symlink_target": ""
} |
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
import mechanize
def query_url(url, br = None):
if br == None:
br = mechanize.Browser()
br.set_handle_robots(False)
br.open(url['url'].encode("ascii","ignore"))
return
| {
"content_hash": "516f358d2dca7d8ab6600417bdefd22d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 67,
"avg_line_length": 21.76923076923077,
"alnum_prop": 0.6254416961130742,
"repo_name": "cmu-db/db-webcrawler",
"id": "85a79ea8382c9f984d7cc11fc38c2aff63397bb9",
"size": "283",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/drivers/submit/query.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "118058"
},
{
"name": "HTML",
"bytes": "56004"
},
{
"name": "JavaScript",
"bytes": "381"
},
{
"name": "Python",
"bytes": "231951"
},
{
"name": "Ruby",
"bytes": "900"
},
{
"name": "Shell",
"bytes": "4025"
}
],
"symlink_target": ""
} |
"""Abstract base class of Port-specific entry points for the layout tests
test infrastructure (the Port and Driver classes)."""
import cgi
import difflib
import errno
import itertools
import json
import logging
import os
import operator
import optparse
import re
import sys
try:
from collections import OrderedDict
except ImportError:
# Needed for Python < 2.7
from webkitpy.thirdparty.ordered_dict import OrderedDict
from webkitpy.common import find_files
from webkitpy.common import read_checksum_from_png
from webkitpy.common.memoized import memoized
from webkitpy.common.system import path
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.path import cygpath
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.layout_package.bot_test_expectations import BotTestExpectationsFactory
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.port import config as port_config
from webkitpy.layout_tests.port import driver
from webkitpy.layout_tests.port import server_process
from webkitpy.layout_tests.port.factory import PortFactory
from webkitpy.layout_tests.servers import apache_http
from webkitpy.layout_tests.servers import pywebsocket
from webkitpy.layout_tests.servers import wptserve
_log = logging.getLogger(__name__)
# FIXME: This class should merge with WebKitPort now that Chromium behaves mostly like other webkit ports.
class Port(object):
"""Abstract class for Port-specific hooks for the layout_test package."""
# Subclasses override this. This should indicate the basic implementation
# part of the port name, e.g., 'mac', 'win', 'gtk'; there is probably (?)
# one unique value per class.
# FIXME: We should probably rename this to something like 'implementation_name'.
port_name = None
# Test names resemble unix relative paths, and use '/' as a directory separator.
TEST_PATH_SEPARATOR = '/'
ALL_BUILD_TYPES = ('debug', 'release')
CONTENT_SHELL_NAME = 'content_shell'
# True if the port as aac and mp3 codecs built in.
PORT_HAS_AUDIO_CODECS_BUILT_IN = False
ALL_SYSTEMS = (
('snowleopard', 'x86'),
('lion', 'x86'),
# FIXME: We treat Retina (High-DPI) devices as if they are running
# a different operating system version. This isn't accurate, but will work until
# we need to test and support baselines across multiple O/S versions.
('retina', 'x86'),
('mountainlion', 'x86'),
('mavericks', 'x86'),
('yosemite', 'x86'),
('xp', 'x86'),
('win7', 'x86'),
('lucid', 'x86'),
('lucid', 'x86_64'),
# FIXME: Technically this should be 'arm', but adding a third architecture type breaks TestConfigurationConverter.
# If we need this to be 'arm' in the future, then we first have to fix TestConfigurationConverter.
('icecreamsandwich', 'x86'),
)
ALL_BASELINE_VARIANTS = [
'mac-yosemite', 'mac-mavericks', 'mac-retina', 'mac-mountainlion', 'mac-lion', 'mac-snowleopard',
'win-win7', 'win-xp',
'linux-x86_64', 'linux-x86',
]
CONFIGURATION_SPECIFIER_MACROS = {
'mac': ['snowleopard', 'lion', 'mountainlion', 'retina', 'mavericks', 'yosemite'],
'win': ['xp', 'win7'],
'linux': ['lucid'],
'android': ['icecreamsandwich'],
}
DEFAULT_BUILD_DIRECTORIES = ('out',)
# overridden in subclasses.
FALLBACK_PATHS = {}
SUPPORTED_VERSIONS = []
# URL to the build requirements page.
BUILD_REQUIREMENTS_URL = ''
@classmethod
def latest_platform_fallback_path(cls):
return cls.FALLBACK_PATHS[cls.SUPPORTED_VERSIONS[-1]]
@classmethod
def _static_build_path(cls, filesystem, build_directory, chromium_base, configuration, comps):
if build_directory:
return filesystem.join(build_directory, configuration, *comps)
hits = []
for directory in cls.DEFAULT_BUILD_DIRECTORIES:
base_dir = filesystem.join(chromium_base, directory, configuration)
path = filesystem.join(base_dir, *comps)
if filesystem.exists(path):
hits.append((filesystem.mtime(path), path))
if hits:
hits.sort(reverse=True)
return hits[0][1] # Return the newest file found.
# We have to default to something, so pick the last one.
return filesystem.join(base_dir, *comps)
@classmethod
def determine_full_port_name(cls, host, options, port_name):
"""Return a fully-specified port name that can be used to construct objects."""
# Subclasses will usually override this.
assert port_name.startswith(cls.port_name)
return port_name
def __init__(self, host, port_name, options=None, **kwargs):
# This value may be different from cls.port_name by having version modifiers
# and other fields appended to it (for example, 'qt-arm' or 'mac-wk2').
self._name = port_name
# These are default values that should be overridden in a subclasses.
self._version = ''
self._architecture = 'x86'
# FIXME: Ideally we'd have a package-wide way to get a
# well-formed options object that had all of the necessary
# options defined on it.
self._options = options or optparse.Values()
self.host = host
self._executive = host.executive
self._filesystem = host.filesystem
self._webkit_finder = WebKitFinder(host.filesystem)
self._config = port_config.Config(self._executive, self._filesystem, self.port_name)
self._helper = None
self._http_server = None
self._websocket_server = None
self._is_wpt_enabled = hasattr(options, 'enable_wptserve') and options.enable_wptserve
self._wpt_server = None
self._image_differ = None
self._server_process_constructor = server_process.ServerProcess # overridable for testing
self._http_lock = None # FIXME: Why does this live on the port object?
self._dump_reader = None
# Python's Popen has a bug that causes any pipes opened to a
# process that can't be executed to be leaked. Since this
# code is specifically designed to tolerate exec failures
# to gracefully handle cases where wdiff is not installed,
# the bug results in a massive file descriptor leak. As a
# workaround, if an exec failure is ever experienced for
# wdiff, assume it's not available. This will leak one
# file descriptor but that's better than leaking each time
# wdiff would be run.
#
# http://mail.python.org/pipermail/python-list/
# 2008-August/505753.html
# http://bugs.python.org/issue3210
self._wdiff_available = None
# FIXME: prettypatch.py knows this path, why is it copied here?
self._pretty_patch_path = self.path_from_webkit_base("Tools", "Scripts", "webkitruby", "PrettyPatch", "prettify.rb")
self._pretty_patch_available = None
if not hasattr(options, 'configuration') or not options.configuration:
self.set_option_default('configuration', self.default_configuration())
self._test_configuration = None
self._reftest_list = {}
self._results_directory = None
self._virtual_test_suites = None
def buildbot_archives_baselines(self):
return True
def additional_driver_flag(self):
if self.driver_name() == self.CONTENT_SHELL_NAME:
return ['--run-layout-test']
return []
def supports_per_test_timeout(self):
return False
def default_pixel_tests(self):
return True
def default_smoke_test_only(self):
return False
def default_timeout_ms(self):
timeout_ms = 6 * 1000
if self.get_option('configuration') == 'Debug':
# Debug is usually 2x-3x slower than Release.
return 3 * timeout_ms
return timeout_ms
def driver_stop_timeout(self):
""" Returns the amount of time in seconds to wait before killing the process in driver.stop()."""
# We want to wait for at least 3 seconds, but if we are really slow, we want to be slow on cleanup as
# well (for things like ASAN, Valgrind, etc.)
return 3.0 * float(self.get_option('time_out_ms', '0')) / self.default_timeout_ms()
def wdiff_available(self):
if self._wdiff_available is None:
self._wdiff_available = self.check_wdiff(logging=False)
return self._wdiff_available
def pretty_patch_available(self):
if self._pretty_patch_available is None:
self._pretty_patch_available = self.check_pretty_patch(logging=False)
return self._pretty_patch_available
def default_batch_size(self):
"""Return the default batch size to use for this port."""
if self.get_option('enable_sanitizer'):
# ASAN/MSAN/TSAN use more memory than regular content_shell. Their
# memory usage may also grow over time, up to a certain point.
# Relaunching the driver periodically helps keep it under control.
return 40
# The default is infinte batch size.
return None
def default_child_processes(self):
"""Return the number of child processes to use for this port."""
return self._executive.cpu_count()
def max_drivers_per_process(self):
"""The maximum number of drivers a child process can use for this port."""
return 2
def default_max_locked_shards(self):
"""Return the number of "locked" shards to run in parallel (like the http tests)."""
max_locked_shards = int(self.default_child_processes()) / 4
if not max_locked_shards:
return 1
return max_locked_shards
def baseline_path(self):
"""Return the absolute path to the directory to store new baselines in for this port."""
# FIXME: remove once all callers are calling either baseline_version_dir() or baseline_platform_dir()
return self.baseline_version_dir()
def baseline_platform_dir(self):
"""Return the absolute path to the default (version-independent) platform-specific results."""
return self._filesystem.join(self.layout_tests_dir(), 'platform', self.port_name)
def baseline_version_dir(self):
"""Return the absolute path to the platform-and-version-specific results."""
baseline_search_paths = self.baseline_search_path()
return baseline_search_paths[0]
def virtual_baseline_search_path(self, test_name):
suite = self.lookup_virtual_suite(test_name)
if not suite:
return None
return [self._filesystem.join(path, suite.name) for path in self.default_baseline_search_path()]
def baseline_search_path(self):
return self.get_option('additional_platform_directory', []) + self._compare_baseline() + self.default_baseline_search_path()
def default_baseline_search_path(self):
"""Return a list of absolute paths to directories to search under for
baselines. The directories are searched in order."""
return map(self._webkit_baseline_path, self.FALLBACK_PATHS[self.version()])
@memoized
def _compare_baseline(self):
factory = PortFactory(self.host)
target_port = self.get_option('compare_port')
if target_port:
return factory.get(target_port).default_baseline_search_path()
return []
def _check_file_exists(self, path_to_file, file_description,
override_step=None, logging=True):
"""Verify the file is present where expected or log an error.
Args:
file_name: The (human friendly) name or description of the file
you're looking for (e.g., "HTTP Server"). Used for error logging.
override_step: An optional string to be logged if the check fails.
logging: Whether or not log the error messages."""
if not self._filesystem.exists(path_to_file):
if logging:
_log.error('Unable to find %s' % file_description)
_log.error(' at %s' % path_to_file)
if override_step:
_log.error(' %s' % override_step)
_log.error('')
return False
return True
def check_build(self, needs_http, printer):
result = True
dump_render_tree_binary_path = self._path_to_driver()
result = self._check_file_exists(dump_render_tree_binary_path,
'test driver') and result
if not result and self.get_option('build'):
result = self._check_driver_build_up_to_date(
self.get_option('configuration'))
else:
_log.error('')
helper_path = self._path_to_helper()
if helper_path:
result = self._check_file_exists(helper_path,
'layout test helper') and result
if self.get_option('pixel_tests'):
result = self.check_image_diff(
'To override, invoke with --no-pixel-tests') and result
# It's okay if pretty patch and wdiff aren't available, but we will at least log messages.
self._pretty_patch_available = self.check_pretty_patch()
self._wdiff_available = self.check_wdiff()
if self._dump_reader:
result = self._dump_reader.check_is_functional() and result
if needs_http:
result = self.check_httpd() and result
return test_run_results.OK_EXIT_STATUS if result else test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
def _check_driver(self):
driver_path = self._path_to_driver()
if not self._filesystem.exists(driver_path):
_log.error("%s was not found at %s" % (self.driver_name(), driver_path))
return False
return True
def _check_port_build(self):
# Ports can override this method to do additional checks.
return True
def check_sys_deps(self, needs_http):
"""If the port needs to do some runtime checks to ensure that the
tests can be run successfully, it should override this routine.
This step can be skipped with --nocheck-sys-deps.
Returns whether the system is properly configured."""
cmd = [self._path_to_driver(), '--check-layout-test-sys-deps']
local_error = ScriptError()
def error_handler(script_error):
local_error.exit_code = script_error.exit_code
output = self._executive.run_command(cmd, error_handler=error_handler)
if local_error.exit_code:
_log.error('System dependencies check failed.')
_log.error('To override, invoke with --nocheck-sys-deps')
_log.error('')
_log.error(output)
if self.BUILD_REQUIREMENTS_URL is not '':
_log.error('')
_log.error('For complete build requirements, please see:')
_log.error(self.BUILD_REQUIREMENTS_URL)
return test_run_results.SYS_DEPS_EXIT_STATUS
return test_run_results.OK_EXIT_STATUS
def check_image_diff(self, override_step=None, logging=True):
"""This routine is used to check whether image_diff binary exists."""
image_diff_path = self._path_to_image_diff()
if not self._filesystem.exists(image_diff_path):
_log.error("image_diff was not found at %s" % image_diff_path)
return False
return True
def check_pretty_patch(self, logging=True):
"""Checks whether we can use the PrettyPatch ruby script."""
try:
_ = self._executive.run_command(['ruby', '--version'])
except OSError, e:
if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
if logging:
_log.warning("Ruby is not installed; can't generate pretty patches.")
_log.warning('')
return False
if not self._filesystem.exists(self._pretty_patch_path):
if logging:
_log.warning("Unable to find %s; can't generate pretty patches." % self._pretty_patch_path)
_log.warning('')
return False
return True
def check_wdiff(self, logging=True):
if not self._path_to_wdiff():
# Don't need to log here since this is the port choosing not to use wdiff.
return False
try:
_ = self._executive.run_command([self._path_to_wdiff(), '--help'])
except OSError:
if logging:
message = self._wdiff_missing_message()
if message:
for line in message.splitlines():
_log.warning(' ' + line)
_log.warning('')
return False
return True
def _wdiff_missing_message(self):
return 'wdiff is not installed; please install it to generate word-by-word diffs.'
def check_httpd(self):
httpd_path = self.path_to_apache()
try:
server_name = self._filesystem.basename(httpd_path)
env = self.setup_environ_for_server(server_name)
if self._executive.run_command([httpd_path, "-v"], env=env, return_exit_code=True) != 0:
_log.error("httpd seems broken. Cannot run http tests.")
return False
return True
except OSError:
_log.error("No httpd found. Cannot run http tests.")
return False
def do_text_results_differ(self, expected_text, actual_text):
return expected_text != actual_text
def do_audio_results_differ(self, expected_audio, actual_audio):
return expected_audio != actual_audio
def diff_image(self, expected_contents, actual_contents):
"""Compare two images and return a tuple of an image diff, and an error string.
If an error occurs (like image_diff isn't found, or crashes, we log an error and return True (for a diff).
"""
# If only one of them exists, return that one.
if not actual_contents and not expected_contents:
return (None, None)
if not actual_contents:
return (expected_contents, None)
if not expected_contents:
return (actual_contents, None)
tempdir = self._filesystem.mkdtemp()
expected_filename = self._filesystem.join(str(tempdir), "expected.png")
self._filesystem.write_binary_file(expected_filename, expected_contents)
actual_filename = self._filesystem.join(str(tempdir), "actual.png")
self._filesystem.write_binary_file(actual_filename, actual_contents)
diff_filename = self._filesystem.join(str(tempdir), "diff.png")
# image_diff needs native win paths as arguments, so we need to convert them if running under cygwin.
native_expected_filename = self._convert_path(expected_filename)
native_actual_filename = self._convert_path(actual_filename)
native_diff_filename = self._convert_path(diff_filename)
executable = self._path_to_image_diff()
# Note that although we are handed 'old', 'new', image_diff wants 'new', 'old'.
comand = [executable, '--diff', native_actual_filename, native_expected_filename, native_diff_filename]
result = None
err_str = None
try:
exit_code = self._executive.run_command(comand, return_exit_code=True)
if exit_code == 0:
# The images are the same.
result = None
elif exit_code == 1:
result = self._filesystem.read_binary_file(native_diff_filename)
else:
err_str = "Image diff returned an exit code of %s. See http://crbug.com/278596" % exit_code
except OSError, e:
err_str = 'error running image diff: %s' % str(e)
finally:
self._filesystem.rmtree(str(tempdir))
return (result, err_str or None)
def diff_text(self, expected_text, actual_text, expected_filename, actual_filename):
"""Returns a string containing the diff of the two text strings
in 'unified diff' format."""
# The filenames show up in the diff output, make sure they're
# raw bytes and not unicode, so that they don't trigger join()
# trying to decode the input.
def to_raw_bytes(string_value):
if isinstance(string_value, unicode):
return string_value.encode('utf-8')
return string_value
expected_filename = to_raw_bytes(expected_filename)
actual_filename = to_raw_bytes(actual_filename)
diff = difflib.unified_diff(expected_text.splitlines(True),
actual_text.splitlines(True),
expected_filename,
actual_filename)
# The diff generated by the difflib is incorrect if one of the files
# does not have a newline at the end of the file and it is present in
# the diff. Relevant Python issue: http://bugs.python.org/issue2142
def diff_fixup(diff):
for line in diff:
yield line
if not line.endswith('\n'):
yield '\n\ No newline at end of file\n'
return ''.join(diff_fixup(diff))
def driver_name(self):
if self.get_option('driver_name'):
return self.get_option('driver_name')
return self.CONTENT_SHELL_NAME
def expected_baselines_by_extension(self, test_name):
"""Returns a dict mapping baseline suffix to relative path for each baseline in
a test. For reftests, it returns ".==" or ".!=" instead of the suffix."""
# FIXME: The name similarity between this and expected_baselines() below, is unfortunate.
# We should probably rename them both.
baseline_dict = {}
reference_files = self.reference_files(test_name)
if reference_files:
# FIXME: How should this handle more than one type of reftest?
baseline_dict['.' + reference_files[0][0]] = self.relative_test_filename(reference_files[0][1])
for extension in self.baseline_extensions():
path = self.expected_filename(test_name, extension, return_default=False)
baseline_dict[extension] = self.relative_test_filename(path) if path else path
return baseline_dict
def baseline_extensions(self):
"""Returns a tuple of all of the non-reftest baseline extensions we use. The extensions include the leading '.'."""
return ('.wav', '.txt', '.png')
def expected_baselines(self, test_name, suffix, all_baselines=False):
"""Given a test name, finds where the baseline results are located.
Args:
test_name: name of test file (usually a relative path under LayoutTests/)
suffix: file suffix of the expected results, including dot; e.g.
'.txt' or '.png'. This should not be None, but may be an empty
string.
all_baselines: If True, return an ordered list of all baseline paths
for the given platform. If False, return only the first one.
Returns
a list of ( platform_dir, results_filename ), where
platform_dir - abs path to the top of the results tree (or test
tree)
results_filename - relative path from top of tree to the results
file
(port.join() of the two gives you the full path to the file,
unless None was returned.)
Return values will be in the format appropriate for the current
platform (e.g., "\\" for path separators on Windows). If the results
file is not found, then None will be returned for the directory,
but the expected relative pathname will still be returned.
This routine is generic but lives here since it is used in
conjunction with the other baseline and filename routines that are
platform specific.
"""
baseline_filename = self._filesystem.splitext(test_name)[0] + '-expected' + suffix
baseline_search_path = self.baseline_search_path()
baselines = []
for platform_dir in baseline_search_path:
if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if not all_baselines and baselines:
return baselines
# If it wasn't found in a platform directory, return the expected
# result in the test directory, even if no such file actually exists.
platform_dir = self.layout_tests_dir()
if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if baselines:
return baselines
return [(None, baseline_filename)]
def expected_filename(self, test_name, suffix, return_default=True):
"""Given a test name, returns an absolute path to its expected results.
If no expected results are found in any of the searched directories,
the directory in which the test itself is located will be returned.
The return value is in the format appropriate for the platform
(e.g., "\\" for path separators on windows).
Args:
test_name: name of test file (usually a relative path under LayoutTests/)
suffix: file suffix of the expected results, including dot; e.g. '.txt'
or '.png'. This should not be None, but may be an empty string.
platform: the most-specific directory name to use to build the
search list of directories, e.g., 'win', or
'chromium-cg-mac-leopard' (we follow the WebKit format)
return_default: if True, returns the path to the generic expectation if nothing
else is found; if False, returns None.
This routine is generic but is implemented here to live alongside
the other baseline and filename manipulation routines.
"""
# FIXME: The [0] here is very mysterious, as is the destructured return.
platform_dir, baseline_filename = self.expected_baselines(test_name, suffix)[0]
if platform_dir:
return self._filesystem.join(platform_dir, baseline_filename)
actual_test_name = self.lookup_virtual_test_base(test_name)
if actual_test_name:
return self.expected_filename(actual_test_name, suffix)
if return_default:
return self._filesystem.join(self.layout_tests_dir(), baseline_filename)
return None
def expected_checksum(self, test_name):
"""Returns the checksum of the image we expect the test to produce, or None if it is a text-only test."""
png_path = self.expected_filename(test_name, '.png')
if self._filesystem.exists(png_path):
with self._filesystem.open_binary_file_for_reading(png_path) as filehandle:
return read_checksum_from_png.read_checksum(filehandle)
return None
def expected_image(self, test_name):
"""Returns the image we expect the test to produce."""
baseline_path = self.expected_filename(test_name, '.png')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_audio(self, test_name):
baseline_path = self.expected_filename(test_name, '.wav')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_text(self, test_name):
"""Returns the text output we expect the test to produce, or None
if we don't expect there to be any text output.
End-of-line characters are normalized to '\n'."""
# FIXME: DRT output is actually utf-8, but since we don't decode the
# output from DRT (instead treating it as a binary string), we read the
# baselines as a binary string, too.
baseline_path = self.expected_filename(test_name, '.txt')
if not self._filesystem.exists(baseline_path):
return None
text = self._filesystem.read_binary_file(baseline_path)
return text.replace("\r\n", "\n")
def _get_reftest_list(self, test_name):
dirname = self._filesystem.join(self.layout_tests_dir(), self._filesystem.dirname(test_name))
if dirname not in self._reftest_list:
self._reftest_list[dirname] = Port._parse_reftest_list(self._filesystem, dirname)
return self._reftest_list[dirname]
@staticmethod
def _parse_reftest_list(filesystem, test_dirpath):
reftest_list_path = filesystem.join(test_dirpath, 'reftest.list')
if not filesystem.isfile(reftest_list_path):
return None
reftest_list_file = filesystem.read_text_file(reftest_list_path)
parsed_list = {}
for line in reftest_list_file.split('\n'):
line = re.sub('#.+$', '', line)
split_line = line.split()
if len(split_line) == 4:
# FIXME: Probably one of mozilla's extensions in the reftest.list format. Do we need to support this?
_log.warning("unsupported reftest.list line '%s' in %s" % (line, reftest_list_path))
continue
if len(split_line) < 3:
continue
expectation_type, test_file, ref_file = split_line
parsed_list.setdefault(filesystem.join(test_dirpath, test_file), []).append((expectation_type, filesystem.join(test_dirpath, ref_file)))
return parsed_list
def reference_files(self, test_name):
"""Return a list of expectation (== or !=) and filename pairs"""
reftest_list = self._get_reftest_list(test_name)
if not reftest_list:
reftest_list = []
for expectation, prefix in (('==', ''), ('!=', '-mismatch')):
for extention in Port._supported_file_extensions:
path = self.expected_filename(test_name, prefix + extention)
if self._filesystem.exists(path):
reftest_list.append((expectation, path))
return reftest_list
return reftest_list.get(self._filesystem.join(self.layout_tests_dir(), test_name), []) # pylint: disable=E1103
def tests(self, paths):
"""Return the list of tests found matching paths."""
tests = self._real_tests(paths)
suites = self.virtual_test_suites()
if paths:
tests.extend(self._virtual_tests_matching_paths(paths, suites))
else:
tests.extend(self._all_virtual_tests(suites))
return tests
def _real_tests(self, paths):
# When collecting test cases, skip these directories
skipped_directories = set(['.svn', '_svn', 'platform', 'resources', 'support', 'script-tests', 'reference', 'reftest'])
files = find_files.find(self._filesystem, self.layout_tests_dir(), paths, skipped_directories, Port.is_test_file, self.test_key)
return [self.relative_test_filename(f) for f in files]
# When collecting test cases, we include any file with these extensions.
_supported_file_extensions = set(['.html', '.xml', '.xhtml', '.xht', '.pl',
'.htm', '.php', '.svg', '.mht', '.pdf'])
@staticmethod
# If any changes are made here be sure to update the isUsedInReftest method in old-run-webkit-tests as well.
def is_reference_html_file(filesystem, dirname, filename):
if filename.startswith('ref-') or filename.startswith('notref-'):
return True
filename_wihout_ext, unused = filesystem.splitext(filename)
for suffix in ['-expected', '-expected-mismatch', '-ref', '-notref']:
if filename_wihout_ext.endswith(suffix):
return True
return False
@staticmethod
def _has_supported_extension(filesystem, filename):
"""Return true if filename is one of the file extensions we want to run a test on."""
extension = filesystem.splitext(filename)[1]
return extension in Port._supported_file_extensions
@staticmethod
def is_test_file(filesystem, dirname, filename):
return Port._has_supported_extension(filesystem, filename) and not Port.is_reference_html_file(filesystem, dirname, filename)
ALL_TEST_TYPES = ['audio', 'harness', 'pixel', 'ref', 'text', 'unknown']
def test_type(self, test_name):
fs = self._filesystem
if fs.exists(self.expected_filename(test_name, '.png')):
return 'pixel'
if fs.exists(self.expected_filename(test_name, '.wav')):
return 'audio'
if self.reference_files(test_name):
return 'ref'
txt = self.expected_text(test_name)
if txt:
if 'layer at (0,0) size 800x600' in txt:
return 'pixel'
for line in txt.splitlines():
if line.startswith('FAIL') or line.startswith('TIMEOUT') or line.startswith('PASS'):
return 'harness'
return 'text'
return 'unknown'
def test_key(self, test_name):
"""Turns a test name into a list with two sublists, the natural key of the
dirname, and the natural key of the basename.
This can be used when sorting paths so that files in a directory.
directory are kept together rather than being mixed in with files in
subdirectories."""
dirname, basename = self.split_test(test_name)
return (self._natural_sort_key(dirname + self.TEST_PATH_SEPARATOR), self._natural_sort_key(basename))
def _natural_sort_key(self, string_to_split):
""" Turns a string into a list of string and number chunks, i.e. "z23a" -> ["z", 23, "a"]
This can be used to implement "natural sort" order. See:
http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
http://nedbatchelder.com/blog/200712.html#e20071211T054956
"""
def tryint(val):
try:
return int(val)
except ValueError:
return val
return [tryint(chunk) for chunk in re.split('(\d+)', string_to_split)]
def test_dirs(self):
"""Returns the list of top-level test directories."""
layout_tests_dir = self.layout_tests_dir()
return filter(lambda x: self._filesystem.isdir(self._filesystem.join(layout_tests_dir, x)),
self._filesystem.listdir(layout_tests_dir))
@memoized
def test_isfile(self, test_name):
"""Return True if the test name refers to a directory of tests."""
# Used by test_expectations.py to apply rules to whole directories.
if self._filesystem.isfile(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isfile(self.abspath_for_test(base))
@memoized
def test_isdir(self, test_name):
"""Return True if the test name refers to a directory of tests."""
# Used by test_expectations.py to apply rules to whole directories.
if self._filesystem.isdir(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isdir(self.abspath_for_test(base))
@memoized
def test_exists(self, test_name):
"""Return True if the test name refers to an existing test or baseline."""
# Used by test_expectations.py to determine if an entry refers to a
# valid test and by printing.py to determine if baselines exist.
return self.test_isfile(test_name) or self.test_isdir(test_name)
def split_test(self, test_name):
"""Splits a test name into the 'directory' part and the 'basename' part."""
index = test_name.rfind(self.TEST_PATH_SEPARATOR)
if index < 1:
return ('', test_name)
return (test_name[0:index], test_name[index:])
def normalize_test_name(self, test_name):
"""Returns a normalized version of the test name or test directory."""
if test_name.endswith('/'):
return test_name
if self.test_isdir(test_name):
return test_name + '/'
return test_name
def driver_cmd_line(self):
"""Prints the DRT command line that will be used."""
driver = self.create_driver(0)
return driver.cmd_line(self.get_option('pixel_tests'), [])
def update_baseline(self, baseline_path, data):
"""Updates the baseline for a test.
Args:
baseline_path: the actual path to use for baseline, not the path to
the test. This function is used to update either generic or
platform-specific baselines, but we can't infer which here.
data: contents of the baseline.
"""
self._filesystem.write_binary_file(baseline_path, data)
# FIXME: update callers to create a finder and call it instead of these next five routines (which should be protected).
def webkit_base(self):
return self._webkit_finder.webkit_base()
def path_from_webkit_base(self, *comps):
return self._webkit_finder.path_from_webkit_base(*comps)
def path_from_chromium_base(self, *comps):
return self._webkit_finder.path_from_chromium_base(*comps)
def path_to_script(self, script_name):
return self._webkit_finder.path_to_script(script_name)
def layout_tests_dir(self):
return self._webkit_finder.layout_tests_dir()
def perf_tests_dir(self):
return self._webkit_finder.perf_tests_dir()
def skipped_layout_tests(self, test_list):
"""Returns tests skipped outside of the TestExpectations files."""
tests = set(self._skipped_tests_for_unsupported_features(test_list))
# We explicitly skip any tests in LayoutTests/w3c if need be to avoid running any tests
# left over from the old DEPS-pulled repos.
# We also will warn at the end of the test run if these directories still exist.
#
# TODO(dpranke): Remove this check after 1/1/2015 and let people deal with the warnings.
# Remove the check in controllers/manager.py as well.
if self._filesystem.isdir(self._filesystem.join(self.layout_tests_dir(), 'w3c')):
tests.add('w3c')
return tests
def _tests_from_skipped_file_contents(self, skipped_file_contents):
tests_to_skip = []
for line in skipped_file_contents.split('\n'):
line = line.strip()
line = line.rstrip('/') # Best to normalize directory names to not include the trailing slash.
if line.startswith('#') or not len(line):
continue
tests_to_skip.append(line)
return tests_to_skip
def _expectations_from_skipped_files(self, skipped_file_paths):
tests_to_skip = []
for search_path in skipped_file_paths:
filename = self._filesystem.join(self._webkit_baseline_path(search_path), "Skipped")
if not self._filesystem.exists(filename):
_log.debug("Skipped does not exist: %s" % filename)
continue
_log.debug("Using Skipped file: %s" % filename)
skipped_file_contents = self._filesystem.read_text_file(filename)
tests_to_skip.extend(self._tests_from_skipped_file_contents(skipped_file_contents))
return tests_to_skip
@memoized
def skipped_perf_tests(self):
return self._expectations_from_skipped_files([self.perf_tests_dir()])
def skips_perf_test(self, test_name):
for test_or_category in self.skipped_perf_tests():
if test_or_category == test_name:
return True
category = self._filesystem.join(self.perf_tests_dir(), test_or_category)
if self._filesystem.isdir(category) and test_name.startswith(test_or_category):
return True
return False
def is_chromium(self):
return True
def name(self):
"""Returns a name that uniquely identifies this particular type of port
(e.g., "mac-snowleopard" or "linux-x86_x64" and can be passed
to factory.get() to instantiate the port."""
return self._name
def operating_system(self):
# Subclasses should override this default implementation.
return 'mac'
def version(self):
"""Returns a string indicating the version of a given platform, e.g.
'leopard' or 'xp'.
This is used to help identify the exact port when parsing test
expectations, determining search paths, and logging information."""
return self._version
def architecture(self):
return self._architecture
def get_option(self, name, default_value=None):
return getattr(self._options, name, default_value)
def set_option_default(self, name, default_value):
return self._options.ensure_value(name, default_value)
@memoized
def path_to_generic_test_expectations_file(self):
return self._filesystem.join(self.layout_tests_dir(), 'TestExpectations')
def relative_test_filename(self, filename):
"""Returns a test_name a relative unix-style path for a filename under the LayoutTests
directory. Ports may legitimately return abspaths here if no relpath makes sense."""
# Ports that run on windows need to override this method to deal with
# filenames with backslashes in them.
if filename.startswith(self.layout_tests_dir()):
return self.host.filesystem.relpath(filename, self.layout_tests_dir())
else:
return self.host.filesystem.abspath(filename)
@memoized
def abspath_for_test(self, test_name):
"""Returns the full path to the file for a given test name. This is the
inverse of relative_test_filename()."""
return self._filesystem.join(self.layout_tests_dir(), test_name)
def results_directory(self):
"""Absolute path to the place to store the test results (uses --results-directory)."""
if not self._results_directory:
option_val = self.get_option('results_directory') or self.default_results_directory()
self._results_directory = self._filesystem.abspath(option_val)
return self._results_directory
def perf_results_directory(self):
return self._build_path()
def inspector_build_directory(self):
return self._build_path('resources', 'inspector')
def default_results_directory(self):
"""Absolute path to the default place to store the test results."""
try:
return self.path_from_chromium_base('webkit', self.get_option('configuration'), 'layout-test-results')
except AssertionError:
return self._build_path('layout-test-results')
def setup_test_run(self):
"""Perform port-specific work at the beginning of a test run."""
# Delete the disk cache if any to ensure a clean test run.
dump_render_tree_binary_path = self._path_to_driver()
cachedir = self._filesystem.dirname(dump_render_tree_binary_path)
cachedir = self._filesystem.join(cachedir, "cache")
if self._filesystem.exists(cachedir):
self._filesystem.rmtree(cachedir)
if self._dump_reader:
self._filesystem.maybe_make_directory(self._dump_reader.crash_dumps_directory())
def num_workers(self, requested_num_workers):
"""Returns the number of available workers (possibly less than the number requested)."""
return requested_num_workers
def clean_up_test_run(self):
"""Perform port-specific work at the end of a test run."""
if self._image_differ:
self._image_differ.stop()
self._image_differ = None
# FIXME: os.environ access should be moved to onto a common/system class to be more easily mockable.
def _value_or_default_from_environ(self, name, default=None):
if name in os.environ:
return os.environ[name]
return default
def _copy_value_from_environ_if_set(self, clean_env, name):
if name in os.environ:
clean_env[name] = os.environ[name]
def setup_environ_for_server(self, server_name=None):
# We intentionally copy only a subset of os.environ when
# launching subprocesses to ensure consistent test results.
clean_env = {
'LOCAL_RESOURCE_ROOT': self.layout_tests_dir(), # FIXME: Is this used?
}
variables_to_copy = [
'WEBKIT_TESTFONTS', # FIXME: Is this still used?
'WEBKITOUTPUTDIR', # FIXME: Is this still used?
'CHROME_DEVEL_SANDBOX',
'CHROME_IPC_LOGGING',
'ASAN_OPTIONS',
'TSAN_OPTIONS',
'MSAN_OPTIONS',
'LSAN_OPTIONS',
'UBSAN_OPTIONS',
'VALGRIND_LIB',
'VALGRIND_LIB_INNER',
]
if self.host.platform.is_linux() or self.host.platform.is_freebsd():
variables_to_copy += [
'XAUTHORITY',
'HOME',
'LANG',
'LD_LIBRARY_PATH',
'DBUS_SESSION_BUS_ADDRESS',
'XDG_DATA_DIRS',
]
clean_env['DISPLAY'] = self._value_or_default_from_environ('DISPLAY', ':1')
if self.host.platform.is_mac():
clean_env['DYLD_LIBRARY_PATH'] = self._build_path()
clean_env['DYLD_FRAMEWORK_PATH'] = self._build_path()
variables_to_copy += [
'HOME',
]
if self.host.platform.is_win():
variables_to_copy += [
'PATH',
'GYP_DEFINES', # Required to locate win sdk.
]
if self.host.platform.is_cygwin():
variables_to_copy += [
'HOMEDRIVE',
'HOMEPATH',
'_NT_SYMBOL_PATH',
]
for variable in variables_to_copy:
self._copy_value_from_environ_if_set(clean_env, variable)
for string_variable in self.get_option('additional_env_var', []):
[name, value] = string_variable.split('=', 1)
clean_env[name] = value
return clean_env
def show_results_html_file(self, results_filename):
"""This routine should display the HTML file pointed at by
results_filename in a users' browser."""
return self.host.user.open_url(path.abspath_to_uri(self.host.platform, results_filename))
def create_driver(self, worker_number, no_timeout=False):
"""Return a newly created Driver subclass for starting/stopping the test driver."""
return self._driver_class()(self, worker_number, pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
def start_helper(self):
"""If a port needs to reconfigure graphics settings or do other
things to ensure a known test configuration, it should override this
method."""
helper_path = self._path_to_helper()
if helper_path:
_log.debug("Starting layout helper %s" % helper_path)
# Note: Not thread safe: http://bugs.python.org/issue2320
self._helper = self._executive.popen([helper_path],
stdin=self._executive.PIPE, stdout=self._executive.PIPE, stderr=None)
is_ready = self._helper.stdout.readline()
if not is_ready.startswith('ready'):
_log.error("layout_test_helper failed to be ready")
def requires_http_server(self):
"""Does the port require an HTTP server for running tests? This could
be the case when the tests aren't run on the host platform."""
return False
def start_http_server(self, additional_dirs, number_of_drivers):
"""Start a web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a web server to be running."""
assert not self._http_server, 'Already running an http server.'
server = apache_http.ApacheHTTP(self, self.results_directory(),
additional_dirs=additional_dirs,
number_of_servers=(number_of_drivers * 4))
server.start()
self._http_server = server
def start_websocket_server(self):
"""Start a web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a websocket server to be running."""
assert not self._websocket_server, 'Already running a websocket server.'
server = pywebsocket.PyWebSocket(self, self.results_directory())
server.start()
self._websocket_server = server
def is_wpt_enabled(self):
"""Used as feature flag for WPT Serve feature."""
return self._is_wpt_enabled
def is_wpt_test(self, test):
"""Whether this test is part of a web-platform-tests which require wptserve servers."""
return "web-platform-tests" in test
def start_wptserve(self):
"""Start a WPT web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a WPT web server to be running."""
assert not self._wpt_server, 'Already running an http server.'
assert self.is_wpt_enabled(), 'Cannot start server if WPT is not enabled.'
# We currently don't support any output mechanism for the WPT server.
server = wptserve.WPTServe(self, self.results_directory())
server.start()
self._wpt_server = server
def stop_wptserve(self):
"""Shut down the WPT server if it is running. Do nothing if it isn't."""
if self._wpt_server:
self._wpt_server.stop()
self._wpt_server = None
def http_server_supports_ipv6(self):
# Apache < 2.4 on win32 does not support IPv6, nor does cygwin apache.
if self.host.platform.is_cygwin() or self.host.platform.is_win():
return False
return True
def stop_helper(self):
"""Shut down the test helper if it is running. Do nothing if
it isn't, or it isn't available. If a port overrides start_helper()
it must override this routine as well."""
if self._helper:
_log.debug("Stopping layout test helper")
try:
self._helper.stdin.write("x\n")
self._helper.stdin.close()
self._helper.wait()
except IOError, e:
pass
finally:
self._helper = None
def stop_http_server(self):
"""Shut down the http server if it is running. Do nothing if it isn't."""
if self._http_server:
self._http_server.stop()
self._http_server = None
def stop_websocket_server(self):
"""Shut down the websocket server if it is running. Do nothing if it isn't."""
if self._websocket_server:
self._websocket_server.stop()
self._websocket_server = None
#
# TEST EXPECTATION-RELATED METHODS
#
def test_configuration(self):
"""Returns the current TestConfiguration for the port."""
if not self._test_configuration:
self._test_configuration = TestConfiguration(self._version, self._architecture, self._options.configuration.lower())
return self._test_configuration
# FIXME: Belongs on a Platform object.
@memoized
def all_test_configurations(self):
"""Returns a list of TestConfiguration instances, representing all available
test configurations for this port."""
return self._generate_all_test_configurations()
# FIXME: Belongs on a Platform object.
def configuration_specifier_macros(self):
"""Ports may provide a way to abbreviate configuration specifiers to conveniently
refer to them as one term or alias specific values to more generic ones. For example:
(xp, vista, win7) -> win # Abbreviate all Windows versions into one namesake.
(lucid) -> linux # Change specific name of the Linux distro to a more generic term.
Returns a dictionary, each key representing a macro term ('win', for example),
and value being a list of valid configuration specifiers (such as ['xp', 'vista', 'win7'])."""
return self.CONFIGURATION_SPECIFIER_MACROS
def all_baseline_variants(self):
"""Returns a list of platform names sufficient to cover all the baselines.
The list should be sorted so that a later platform will reuse
an earlier platform's baselines if they are the same (e.g.,
'snowleopard' should precede 'leopard')."""
return self.ALL_BASELINE_VARIANTS
def _generate_all_test_configurations(self):
"""Returns a sequence of the TestConfigurations the port supports."""
# By default, we assume we want to test every graphics type in
# every configuration on every system.
test_configurations = []
for version, architecture in self.ALL_SYSTEMS:
for build_type in self.ALL_BUILD_TYPES:
test_configurations.append(TestConfiguration(version, architecture, build_type))
return test_configurations
try_builder_names = frozenset([
'linux_layout',
'mac_layout',
'win_layout',
'linux_layout_rel',
'mac_layout_rel',
'win_layout_rel',
])
def warn_if_bug_missing_in_test_expectations(self):
return True
def _port_specific_expectations_files(self):
paths = []
paths.append(self.path_from_chromium_base('skia', 'skia_test_expectations.txt'))
paths.append(self._filesystem.join(self.layout_tests_dir(), 'NeverFixTests'))
paths.append(self._filesystem.join(self.layout_tests_dir(), 'StaleTestExpectations'))
paths.append(self._filesystem.join(self.layout_tests_dir(), 'SlowTests'))
paths.append(self._filesystem.join(self.layout_tests_dir(), 'FlakyTests'))
return paths
def expectations_dict(self):
"""Returns an OrderedDict of name -> expectations strings.
The names are expected to be (but not required to be) paths in the filesystem.
If the name is a path, the file can be considered updatable for things like rebaselining,
so don't use names that are paths if they're not paths.
Generally speaking the ordering should be files in the filesystem in cascade order
(TestExpectations followed by Skipped, if the port honors both formats),
then any built-in expectations (e.g., from compile-time exclusions), then --additional-expectations options."""
# FIXME: rename this to test_expectations() once all the callers are updated to know about the ordered dict.
expectations = OrderedDict()
for path in self.expectations_files():
if self._filesystem.exists(path):
expectations[path] = self._filesystem.read_text_file(path)
for path in self.get_option('additional_expectations', []):
expanded_path = self._filesystem.expanduser(path)
if self._filesystem.exists(expanded_path):
_log.debug("reading additional_expectations from path '%s'" % path)
expectations[path] = self._filesystem.read_text_file(expanded_path)
else:
_log.warning("additional_expectations path '%s' does not exist" % path)
return expectations
def bot_expectations(self):
if not self.get_option('ignore_flaky_tests'):
return {}
full_port_name = self.determine_full_port_name(self.host, self._options, self.port_name)
builder_category = self.get_option('ignore_builder_category', 'layout')
factory = BotTestExpectationsFactory()
# FIXME: This only grabs release builder's flakiness data. If we're running debug,
# when we should grab the debug builder's data.
expectations = factory.expectations_for_port(full_port_name, builder_category)
if not expectations:
return {}
ignore_mode = self.get_option('ignore_flaky_tests')
if ignore_mode == 'very-flaky' or ignore_mode == 'maybe-flaky':
return expectations.flakes_by_path(ignore_mode == 'very-flaky')
if ignore_mode == 'unexpected':
return expectations.unexpected_results_by_path()
_log.warning("Unexpected ignore mode: '%s'." % ignore_mode)
return {}
def expectations_files(self):
return [self.path_to_generic_test_expectations_file()] + self._port_specific_expectations_files()
def repository_paths(self):
"""Returns a list of (repository_name, repository_path) tuples of its depending code base."""
return [('blink', self.layout_tests_dir()),
('chromium', self.path_from_chromium_base('build'))]
_WDIFF_DEL = '##WDIFF_DEL##'
_WDIFF_ADD = '##WDIFF_ADD##'
_WDIFF_END = '##WDIFF_END##'
def _format_wdiff_output_as_html(self, wdiff):
wdiff = cgi.escape(wdiff)
wdiff = wdiff.replace(self._WDIFF_DEL, "<span class=del>")
wdiff = wdiff.replace(self._WDIFF_ADD, "<span class=add>")
wdiff = wdiff.replace(self._WDIFF_END, "</span>")
html = "<head><style>.del { background: #faa; } "
html += ".add { background: #afa; }</style></head>"
html += "<pre>%s</pre>" % wdiff
return html
def _wdiff_command(self, actual_filename, expected_filename):
executable = self._path_to_wdiff()
return [executable,
"--start-delete=%s" % self._WDIFF_DEL,
"--end-delete=%s" % self._WDIFF_END,
"--start-insert=%s" % self._WDIFF_ADD,
"--end-insert=%s" % self._WDIFF_END,
actual_filename,
expected_filename]
@staticmethod
def _handle_wdiff_error(script_error):
# Exit 1 means the files differed, any other exit code is an error.
if script_error.exit_code != 1:
raise script_error
def _run_wdiff(self, actual_filename, expected_filename):
"""Runs wdiff and may throw exceptions.
This is mostly a hook for unit testing."""
# Diffs are treated as binary as they may include multiple files
# with conflicting encodings. Thus we do not decode the output.
command = self._wdiff_command(actual_filename, expected_filename)
wdiff = self._executive.run_command(command, decode_output=False,
error_handler=self._handle_wdiff_error)
return self._format_wdiff_output_as_html(wdiff)
_wdiff_error_html = "Failed to run wdiff, see error log."
def wdiff_text(self, actual_filename, expected_filename):
"""Returns a string of HTML indicating the word-level diff of the
contents of the two filenames. Returns an empty string if word-level
diffing isn't available."""
if not self.wdiff_available():
return ""
try:
# It's possible to raise a ScriptError we pass wdiff invalid paths.
return self._run_wdiff(actual_filename, expected_filename)
except OSError as e:
if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
# Silently ignore cases where wdiff is missing.
self._wdiff_available = False
return ""
raise
except ScriptError as e:
_log.error("Failed to run wdiff: %s" % e)
self._wdiff_available = False
return self._wdiff_error_html
# This is a class variable so we can test error output easily.
_pretty_patch_error_html = "Failed to run PrettyPatch, see error log."
def pretty_patch_text(self, diff_path):
if self._pretty_patch_available is None:
self._pretty_patch_available = self.check_pretty_patch(logging=False)
if not self._pretty_patch_available:
return self._pretty_patch_error_html
command = ("ruby", "-I", self._filesystem.dirname(self._pretty_patch_path),
self._pretty_patch_path, diff_path)
try:
# Diffs are treated as binary (we pass decode_output=False) as they
# may contain multiple files of conflicting encodings.
return self._executive.run_command(command, decode_output=False)
except OSError, e:
# If the system is missing ruby log the error and stop trying.
self._pretty_patch_available = False
_log.error("Failed to run PrettyPatch (%s): %s" % (command, e))
return self._pretty_patch_error_html
except ScriptError, e:
# If ruby failed to run for some reason, log the command
# output and stop trying.
self._pretty_patch_available = False
_log.error("Failed to run PrettyPatch (%s):\n%s" % (command, e.message_with_output()))
return self._pretty_patch_error_html
def default_configuration(self):
return self._config.default_configuration()
def clobber_old_port_specific_results(self):
pass
# FIXME: This does not belong on the port object.
@memoized
def path_to_apache(self):
"""Returns the full path to the apache binary.
This is needed only by ports that use the apache_http_server module."""
raise NotImplementedError('Port.path_to_apache')
def path_to_apache_config_file(self):
"""Returns the full path to the apache configuration file.
If the WEBKIT_HTTP_SERVER_CONF_PATH environment variable is set, its
contents will be used instead.
This is needed only by ports that use the apache_http_server module."""
config_file_from_env = os.environ.get('WEBKIT_HTTP_SERVER_CONF_PATH')
if config_file_from_env:
if not self._filesystem.exists(config_file_from_env):
raise IOError('%s was not found on the system' % config_file_from_env)
return config_file_from_env
config_file_name = self._apache_config_file_name_for_platform()
return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', config_file_name)
#
# PROTECTED ROUTINES
#
# The routines below should only be called by routines in this class
# or any of its subclasses.
#
def _apache_version(self):
config = self._executive.run_command([self.path_to_apache(), '-v'])
return re.sub(r'(?:.|\n)*Server version: Apache/(\d+\.\d+)(?:.|\n)*', r'\1', config)
def _apache_config_file_name_for_platform(self):
if self.host.platform.is_cygwin():
return 'cygwin-httpd.conf' # CYGWIN is the only platform to still use Apache 1.3.
if self.host.platform.is_linux():
distribution = self.host.platform.linux_distribution()
custom_configuration_distributions = ['arch', 'debian', 'redhat']
if distribution in custom_configuration_distributions:
return "%s-httpd-%s.conf" % (distribution, self._apache_version())
return 'apache2-httpd-' + self._apache_version() + '.conf'
def _path_to_driver(self, configuration=None):
"""Returns the full path to the test driver."""
return self._build_path(self.driver_name())
def _path_to_webcore_library(self):
"""Returns the full path to a built copy of WebCore."""
return None
def _path_to_helper(self):
"""Returns the full path to the layout_test_helper binary, which
is used to help configure the system for the test run, or None
if no helper is needed.
This is likely only used by start/stop_helper()."""
return None
def _path_to_image_diff(self):
"""Returns the full path to the image_diff binary, or None if it is not available.
This is likely used only by diff_image()"""
return self._build_path('image_diff')
@memoized
def _path_to_wdiff(self):
"""Returns the full path to the wdiff binary, or None if it is not available.
This is likely used only by wdiff_text()"""
for path in ("/usr/bin/wdiff", "/usr/bin/dwdiff"):
if self._filesystem.exists(path):
return path
return None
def _webkit_baseline_path(self, platform):
"""Return the full path to the top of the baseline tree for a
given platform."""
return self._filesystem.join(self.layout_tests_dir(), 'platform', platform)
def _driver_class(self):
"""Returns the port's driver implementation."""
return driver.Driver
def output_contains_sanitizer_messages(self, output):
if not output:
return None
if 'AddressSanitizer' in output:
return 'AddressSanitizer'
if 'MemorySanitizer' in output:
return 'MemorySanitizer'
return None
def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
if self.output_contains_sanitizer_messages(stderr):
# Running the symbolizer script can take a lot of memory, so we need to
# serialize access to it across all the concurrently running drivers.
llvm_symbolizer_path = self.path_from_chromium_base('third_party', 'llvm-build', 'Release+Asserts', 'bin', 'llvm-symbolizer')
if self._filesystem.exists(llvm_symbolizer_path):
env = os.environ.copy()
env['LLVM_SYMBOLIZER_PATH'] = llvm_symbolizer_path
else:
env = None
sanitizer_filter_path = self.path_from_chromium_base('tools', 'valgrind', 'asan', 'asan_symbolize.py')
sanitizer_strip_path_prefix = 'Release/../../'
if self._filesystem.exists(sanitizer_filter_path):
stderr = self._executive.run_command(['flock', sys.executable, sanitizer_filter_path, sanitizer_strip_path_prefix], input=stderr, decode_output=False, env=env)
name_str = name or '<unknown process name>'
pid_str = str(pid or '<unknown>')
# We require stdout and stderr to be bytestrings, not character strings.
if stdout:
assert isinstance(stdout, str)
stdout_lines = stdout.decode('utf8', 'replace').splitlines()
else:
stdout_lines = [u'<empty>']
if stderr:
assert isinstance(stderr, str)
stderr_lines = stderr.decode('utf8', 'replace').splitlines()
else:
stderr_lines = [u'<empty>']
return (stderr, 'crash log for %s (pid %s):\n%s\n%s\n' % (name_str, pid_str,
'\n'.join(('STDOUT: ' + l) for l in stdout_lines),
'\n'.join(('STDERR: ' + l) for l in stderr_lines)))
def look_for_new_crash_logs(self, crashed_processes, start_time):
pass
def look_for_new_samples(self, unresponsive_processes, start_time):
pass
def sample_process(self, name, pid):
pass
def physical_test_suites(self):
return [
# For example, to turn on force-compositing-mode in the svg/ directory:
# PhysicalTestSuite('svg', ['--force-compositing-mode']),
]
def virtual_test_suites(self):
if self._virtual_test_suites is None:
path_to_virtual_test_suites = self._filesystem.join(self.layout_tests_dir(), 'VirtualTestSuites')
assert self._filesystem.exists(path_to_virtual_test_suites), 'LayoutTests/VirtualTestSuites not found'
try:
test_suite_json = json.loads(self._filesystem.read_text_file(path_to_virtual_test_suites))
self._virtual_test_suites = [VirtualTestSuite(**d) for d in test_suite_json]
except ValueError as e:
raise ValueError("LayoutTests/VirtualTestSuites is not a valid JSON file: %s" % str(e))
return self._virtual_test_suites
def _all_virtual_tests(self, suites):
tests = []
for suite in suites:
self._populate_virtual_suite(suite)
tests.extend(suite.tests.keys())
return tests
def _virtual_tests_matching_paths(self, paths, suites):
tests = []
for suite in suites:
if any(p.startswith(suite.name) for p in paths):
self._populate_virtual_suite(suite)
for test in suite.tests:
if any(test.startswith(p) for p in paths):
tests.append(test)
return tests
def _populate_virtual_suite(self, suite):
if not suite.tests:
base_tests = self._real_tests([suite.base])
suite.tests = {}
for test in base_tests:
suite.tests[test.replace(suite.base, suite.name, 1)] = test
def is_virtual_test(self, test_name):
return bool(self.lookup_virtual_suite(test_name))
def lookup_virtual_suite(self, test_name):
for suite in self.virtual_test_suites():
if test_name.startswith(suite.name):
return suite
return None
def lookup_virtual_test_base(self, test_name):
suite = self.lookup_virtual_suite(test_name)
if not suite:
return None
return test_name.replace(suite.name, suite.base, 1)
def lookup_virtual_test_args(self, test_name):
for suite in self.virtual_test_suites():
if test_name.startswith(suite.name):
return suite.args
return []
def lookup_virtual_reference_args(self, test_name):
for suite in self.virtual_test_suites():
if test_name.startswith(suite.name):
return suite.reference_args
return []
def lookup_physical_test_args(self, test_name):
for suite in self.physical_test_suites():
if test_name.startswith(suite.name):
return suite.args
return []
def lookup_physical_reference_args(self, test_name):
for suite in self.physical_test_suites():
if test_name.startswith(suite.name):
return suite.reference_args
return []
def should_run_as_pixel_test(self, test_input):
if not self._options.pixel_tests:
return False
if self._options.pixel_test_directories:
return any(test_input.test_name.startswith(directory) for directory in self._options.pixel_test_directories)
# TODO(burnik): Make sure this is the right way to do it.
if self.is_wpt_enabled() and self.is_wpt_test(test_input.test_name):
return False
return True
def _modules_to_search_for_symbols(self):
path = self._path_to_webcore_library()
if path:
return [path]
return []
def _symbols_string(self):
symbols = ''
for path_to_module in self._modules_to_search_for_symbols():
try:
symbols += self._executive.run_command(['nm', path_to_module], error_handler=self._executive.ignore_error)
except OSError, e:
_log.warn("Failed to run nm: %s. Can't determine supported features correctly." % e)
return symbols
# Ports which use compile-time feature detection should define this method and return
# a dictionary mapping from symbol substrings to possibly disabled test directories.
# When the symbol substrings are not matched, the directories will be skipped.
# If ports don't ever enable certain features, then those directories can just be
# in the Skipped list instead of compile-time-checked here.
def _missing_symbol_to_skipped_tests(self):
if self.PORT_HAS_AUDIO_CODECS_BUILT_IN:
return {}
else:
return {
"ff_mp3_decoder": ["webaudio/codec-tests/mp3"],
"ff_aac_decoder": ["webaudio/codec-tests/aac"],
}
def _has_test_in_directories(self, directory_lists, test_list):
if not test_list:
return False
directories = itertools.chain.from_iterable(directory_lists)
for directory, test in itertools.product(directories, test_list):
if test.startswith(directory):
return True
return False
def _skipped_tests_for_unsupported_features(self, test_list):
# Only check the symbols of there are tests in the test_list that might get skipped.
# This is a performance optimization to avoid the calling nm.
# Runtime feature detection not supported, fallback to static detection:
# Disable any tests for symbols missing from the executable or libraries.
if self._has_test_in_directories(self._missing_symbol_to_skipped_tests().values(), test_list):
symbols_string = self._symbols_string()
if symbols_string is not None:
return reduce(operator.add, [directories for symbol_substring, directories in self._missing_symbol_to_skipped_tests().items() if symbol_substring not in symbols_string], [])
return []
def _convert_path(self, path):
"""Handles filename conversion for subprocess command line args."""
# See note above in diff_image() for why we need this.
if sys.platform == 'cygwin':
return cygpath(path)
return path
def _build_path(self, *comps):
return self._build_path_with_configuration(None, *comps)
def _build_path_with_configuration(self, configuration, *comps):
# Note that we don't do the option caching that the
# base class does, because finding the right directory is relatively
# fast.
configuration = configuration or self.get_option('configuration')
return self._static_build_path(self._filesystem, self.get_option('build_directory'),
self.path_from_chromium_base(), configuration, comps)
def _check_driver_build_up_to_date(self, configuration):
if configuration in ('Debug', 'Release'):
try:
debug_path = self._path_to_driver('Debug')
release_path = self._path_to_driver('Release')
debug_mtime = self._filesystem.mtime(debug_path)
release_mtime = self._filesystem.mtime(release_path)
if (debug_mtime > release_mtime and configuration == 'Release' or
release_mtime > debug_mtime and configuration == 'Debug'):
most_recent_binary = 'Release' if configuration == 'Debug' else 'Debug'
_log.warning('You are running the %s binary. However the %s binary appears to be more recent. '
'Please pass --%s.', configuration, most_recent_binary, most_recent_binary.lower())
_log.warning('')
# This will fail if we don't have both a debug and release binary.
# That's fine because, in this case, we must already be running the
# most up-to-date one.
except OSError:
pass
return True
def _chromium_baseline_path(self, platform):
if platform is None:
platform = self.name()
return self.path_from_webkit_base('LayoutTests', 'platform', platform)
class VirtualTestSuite(object):
def __init__(self, prefix=None, base=None, args=None, references_use_default_args=False):
assert base
assert args
assert prefix.find('/') == -1, "Virtual test suites prefixes cannot contain /'s: %s" % prefix
self.name = 'virtual/' + prefix + '/' + base
self.base = base
self.args = args
self.reference_args = [] if references_use_default_args else args
self.tests = {}
def __repr__(self):
return "VirtualTestSuite('%s', '%s', %s, %s)" % (self.name, self.base, self.args, self.reference_args)
class PhysicalTestSuite(object):
def __init__(self, base, args, reference_args=None):
self.name = base
self.base = base
self.args = args
self.reference_args = args if reference_args is None else reference_args
self.tests = set()
def __repr__(self):
return "PhysicalTestSuite('%s', '%s', %s, %s)" % (self.name, self.base, self.args, self.reference_args)
| {
"content_hash": "24b30a8b9242404e536844fa00192173",
"timestamp": "",
"source": "github",
"line_count": 1773,
"max_line_length": 189,
"avg_line_length": 43.51494641849972,
"alnum_prop": 0.6244037743674824,
"repo_name": "crosswalk-project/blink-crosswalk",
"id": "f62dd5e8c630d27738b184ae6a50ad9abd9d2104",
"size": "78674",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Tools/Scripts/webkitpy/layout_tests/port/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import sys
import argparse
import textwrap
from pandashells.lib import parallel_lib
def main():
msg = "Tool to run shell commands in parallel. Spawns processes "
msg += "to concurrently run commands supplied on stdin. "
msg = textwrap.dedent(
"""
Read a list of commands from stdin and execute them in parrallel.
-----------------------------------------------------------------------
Examples:
* This line generates commands that will be used in the examples.
time seq 10 \\
| p.format -t 'sleep 1; echo done {n}' --names n -i noheader
* Execute the commands one at a time, no parallelism
time seq 10 \\
| p.format -t 'sleep 1; echo done {n}' --names n -i noheader \\
| p.parallel -n 1
* Execute all commands in parallel
time seq 10 \\
| p.format -t 'sleep 1; echo done {n}' --names n -i noheader \\
| p.parallel -n 10
* Suppress stdout from processes and echo commands
time seq 10 \\
| p.format -t 'sleep 1; echo done {n}' --names n -i noheader \\
| p.parallel -n 10 -c -s stdout
* Make a histogram of how long the individual jobs took
time seq 100 \\
| p.format -t 'sleep 1; echo done {n}' --names n -i noheader \\
| p.parallel -n 50 -v \\
| grep __job__ \\
| p.df 'df.dropna()' 'df.duration_sec.hist(bins=20)'
-----------------------------------------------------------------------
"""
)
# read command line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=msg)
msg = "The number of jobs to run in parallel. If not supplied, will "
msg += "default to the number of detected cores."
parser.add_argument('--njobs', '-n', dest='njobs', default=[None],
nargs=1, type=int, help=msg)
parser.add_argument("-v", "--verbose", action="store_true", default=False,
help="Enable verbose output")
parser.add_argument("-c", "--show_commands", action="store_true",
default=False, help="Print commands to stdout")
msg = "Suppress stdout, stderr, or both for all running jobs"
parser.add_argument("-s", "--suppress",
choices=['stdout', 'stderr', 'both'], default=[None],
nargs=1, help=msg)
# parse arguments
args = parser.parse_args()
# get the commands from stdin
cmd_list = sys.stdin.readlines()
# get suppression vars from args
suppress_stdout = 'stdout' in args.suppress or 'both' in args.suppress
suppress_stderr = 'stderr' in args.suppress or 'both' in args.suppress
# run the commands
parallel_lib.parallel(
cmd_list,
njobs=args.njobs[0],
verbose=args.verbose,
suppress_cmd=(not args.show_commands),
suppress_stdout=suppress_stdout,
suppress_stderr=suppress_stderr,
assume_hyperthread=True)
if __name__ == '__main__': # pragma: no cover
main()
| {
"content_hash": "06650b612a690943873600b97ca8cf4a",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 79,
"avg_line_length": 37.03409090909091,
"alnum_prop": 0.5372813746548021,
"repo_name": "iiSeymour/pandashells",
"id": "8814bc9bdbb8e6cfdbf7885a65df3572e9d95d61",
"size": "3283",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pandashells/bin/p_parallel.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "2646"
},
{
"name": "Python",
"bytes": "180184"
},
{
"name": "Shell",
"bytes": "6872"
}
],
"symlink_target": ""
} |
import math
from datetime import datetime
from datetime import timedelta
"""
Gets current starting time with flooding delay
"""
def get_cst_and_fd(nw_size_estimate, overlapping_bits):
hr_in_ms = 3600000.0
flood_delay_in_ms = (hr_in_ms/2) - (hr_in_ms/math.pi) * (math.atan(overlapping_bits - nw_size_estimate))
starttime = datetime.utcnow().replace(minute = 0, second = 0, microsecond = 0)
ct_plus_fd = starttime + timedelta(milliseconds = flood_delay_in_ms)
return ct_plus_fd
"""
Computes processing delay of messages
"""
def compute_process_delay(send_time):
time_difference = send_time - datetime.utcnow()
(delay_in_min, delay_in_sec) = divmod(time_difference.total_seconds(), 60)
delay_min_to_sec = delay_in_min * 60
computed_time_sec = delay_min_to_sec + delay_in_sec
return computed_time_sec
if __name__ == "__main__":
current_time = datetime.utcnow().replace(minute = 0, second = 0, microsecond = 0)
print(current_time) | {
"content_hash": "37709a6be5f88e1a44986f45ddce479b",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 108,
"avg_line_length": 35.214285714285715,
"alnum_prop": 0.6825557809330629,
"repo_name": "ansin218/p2p-voidphone-nse",
"id": "6cc7d7bc3e29a0110d68c8099eb68e883f8c5179",
"size": "986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/compute_time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41843"
}
],
"symlink_target": ""
} |
from datautil import findFile
from datautil import filterEvents
from datautil import toTxt
import json
def printKeys(dict):
try:
print(dict.keys())
except:
print("Not a Dict Object")
def printList(list, indent = 0):
spacing = ""
for i in range(indent): spacing += " "
for elem in list:
try:
elem[0]
printList(list)
except:
print(spacing + str(elem))
manual = True
if(manual): data_path = findFile()
else: data_path = 'C:/Users/Miguel Guerrero/git/Charcoal/RealAppAnalysis/sample traces/test page/simple action.json'
with open(data_path, "r") as json_file:
data = json.load(json_file)
cpu_profile = filterEvents(data, "name", "CpuProfile")
print("Length of in: %d" % len(cpu_profile))
focus = cpu_profile[0]["args"]["data"]["cpuProfile"]["nodes"]
print("Length: %d" % len(focus))
#print(focus)
| {
"content_hash": "39b87d518ceb5ace0ece8b7dc7129593",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 122,
"avg_line_length": 24.58974358974359,
"alnum_prop": 0.5964546402502607,
"repo_name": "benjaminy/Charcoal",
"id": "014bf917dbef0595b06ffe5db229d3c6a3ca80d7",
"size": "959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RealAppAnalysis/test dump/browsers/cpuprofile_from_json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "168860"
},
{
"name": "C++",
"bytes": "342986"
},
{
"name": "CSS",
"bytes": "2466"
},
{
"name": "Go",
"bytes": "1373"
},
{
"name": "HTML",
"bytes": "5520"
},
{
"name": "Haskell",
"bytes": "2836"
},
{
"name": "Java",
"bytes": "3893"
},
{
"name": "JavaScript",
"bytes": "47815"
},
{
"name": "Makefile",
"bytes": "15619"
},
{
"name": "Python",
"bytes": "195186"
},
{
"name": "Shell",
"bytes": "8963"
},
{
"name": "Standard ML",
"bytes": "1765"
},
{
"name": "TeX",
"bytes": "1114333"
}
],
"symlink_target": ""
} |
from office365.runtime.client_value import ClientValue
class ShiftAvailability(ClientValue):
"""Availability of the user to be scheduled for a shift and its recurrence pattern."""
pass
| {
"content_hash": "7b468980766601c6c3f5d8baa91df06b",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 90,
"avg_line_length": 32.5,
"alnum_prop": 0.7743589743589744,
"repo_name": "vgrem/Office365-REST-Python-Client",
"id": "ba73b882a9713f27e06845351eb094ec231b6c04",
"size": "195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "office365/teams/shifts/availability.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1659292"
}
],
"symlink_target": ""
} |
from collections import Counter
from typing import Tuple
import numpy as np
import sklearn.metrics
from numpy.typing import ArrayLike
from sklearn.base import BaseEstimator
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection._split import _CVIterableWrapper
from .types import CalibrationMethod
def det_curve(y_true: ArrayLike, scores: ArrayLike, distances: bool = False) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, float]:
"""DET curve
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
Returns
-------
fpr : numpy array
False alarm rate
fnr : numpy array
False rejection rate
thresholds : numpy array
Corresponding thresholds
eer : float
Equal error rate
"""
if distances:
scores = -scores
# compute false positive and false negative rates
# (a.k.a. false alarm and false rejection rates)
fpr, tpr, thresholds = sklearn.metrics.roc_curve(
y_true, scores, pos_label=True)
fnr = 1 - tpr
if distances:
thresholds = -thresholds
# estimate equal error rate
eer_index = np.where(fpr > fnr)[0][0]
eer = .25 * (fpr[eer_index - 1] + fpr[eer_index] +
fnr[eer_index - 1] + fnr[eer_index])
return fpr, fnr, thresholds, eer
def precision_recall_curve(y_true: ArrayLike,
scores: ArrayLike,
distances: bool = False) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray, float]:
"""Precision-recall curve
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
Returns
-------
precision : numpy array
Precision
recall : numpy array
Recall
thresholds : numpy array
Corresponding thresholds
auc : float
Area under curve
"""
if distances:
scores = -scores
precision, recall, thresholds = sklearn.metrics.precision_recall_curve(
y_true, scores, pos_label=True)
if distances:
thresholds = -thresholds
auc = sklearn.metrics.auc(precision, recall, reorder=True)
return precision, recall, thresholds, auc
class _Passthrough(BaseEstimator):
"""Dummy binary classifier used by score Calibration class"""
def __init__(self):
super().__init__()
self.classes_ = np.array([False, True], dtype=np.bool)
def fit(self, scores, y_true):
return self
def decision_function(self, scores: ArrayLike):
"""Returns the input scores unchanged"""
return scores
class Calibration:
"""Probability calibration for binary classification tasks
Parameters
----------
method : {'isotonic', 'sigmoid'}, optional
See `CalibratedClassifierCV`. Defaults to 'isotonic'.
equal_priors : bool, optional
Set to True to force equal priors. Default behavior is to estimate
priors from the data itself.
Usage
-----
>>> calibration = Calibration()
>>> calibration.fit(train_score, train_y)
>>> test_probability = calibration.transform(test_score)
See also
--------
CalibratedClassifierCV
"""
def __init__(self, equal_priors: bool = False,
method: CalibrationMethod = 'isotonic'):
self.method = method
self.equal_priors = equal_priors
def fit(self, scores: ArrayLike, y_true: ArrayLike):
"""Train calibration
Parameters
----------
scores : (n_samples, ) array-like
Uncalibrated scores.
y_true : (n_samples, ) array-like
True labels (dtype=bool).
"""
# to force equal priors, randomly select (and average over)
# up to fifty balanced (i.e. #true == #false) calibration sets.
if self.equal_priors:
counter = Counter(y_true)
positive, negative = counter[True], counter[False]
if positive > negative:
majority, minority = True, False
n_majority, n_minority = positive, negative
else:
majority, minority = False, True
n_majority, n_minority = negative, positive
n_splits = min(50, n_majority // n_minority + 1)
minority_index = np.where(y_true == minority)[0]
majority_index = np.where(y_true == majority)[0]
cv = []
for _ in range(n_splits):
test_index = np.hstack([
np.random.choice(majority_index,
size=n_minority,
replace=False),
minority_index])
cv.append(([], test_index))
cv = _CVIterableWrapper(cv)
# to estimate priors from the data itself, use the whole set
else:
cv = 'prefit'
self.calibration_ = CalibratedClassifierCV(
base_estimator=_Passthrough(), method=self.method, cv=cv)
self.calibration_.fit(scores.reshape(-1, 1), y_true)
return self
def transform(self, scores: ArrayLike):
"""Calibrate scores into probabilities
Parameters
----------
scores : (n_samples, ) array-like
Uncalibrated scores.
Returns
-------
probabilities : (n_samples, ) array-like
Calibrated scores (i.e. probabilities)
"""
return self.calibration_.predict_proba(scores.reshape(-1, 1))[:, 1]
| {
"content_hash": "cc0dcc385320e139e9126270f36ab93f",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 78,
"avg_line_length": 28.57766990291262,
"alnum_prop": 0.5853575675216579,
"repo_name": "pyannote/pyannote-metrics",
"id": "1c80738026bfe32a6a747359475d07a6bc390252",
"size": "7091",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyannote/metrics/binary_classification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "48601"
},
{
"name": "Python",
"bytes": "265388"
}
],
"symlink_target": ""
} |
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CreateFriendship(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CreateFriendship Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CreateFriendship, self).__init__(temboo_session, '/Library/Twitter/FriendsAndFollowers/CreateFriendship')
def new_input_set(self):
return CreateFriendshipInputSet()
def _make_result_set(self, result, path):
return CreateFriendshipResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CreateFriendshipChoreographyExecution(session, exec_id, path)
class CreateFriendshipInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CreateFriendship
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret provided by Twitter or retrieved during the OAuth process.)
"""
super(CreateFriendshipInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token provided by Twitter or retrieved during the OAuth process.)
"""
super(CreateFriendshipInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The API Key (or Consumer Key) provided by Twitter.)
"""
super(CreateFriendshipInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The API Secret (or Consumer Secret) provided by Twitter.)
"""
super(CreateFriendshipInputSet, self)._set_input('ConsumerSecret', value)
def set_Follow(self, value):
"""
Set the value of the Follow input for this Choreo. ((optional, boolean) A boolean flag that enables notifications for the target user when set to true.)
"""
super(CreateFriendshipInputSet, self)._set_input('Follow', value)
def set_ScreenName(self, value):
"""
Set the value of the ScreenName input for this Choreo. ((conditional, string) The screen name for the friend you want to create a friendship with. Required if UserId isn't specified.)
"""
super(CreateFriendshipInputSet, self)._set_input('ScreenName', value)
def set_UserId(self, value):
"""
Set the value of the UserId input for this Choreo. ((conditional, string) The user id for the friend you want to create a friendship with. Required if ScreenName isn't specified.)
"""
super(CreateFriendshipInputSet, self)._set_input('UserId', value)
class CreateFriendshipResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CreateFriendship Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Twitter.)
"""
return self._output.get('Response', None)
class CreateFriendshipChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CreateFriendshipResultSet(response, path)
| {
"content_hash": "9bb79fc7d45ccb6c133727ce193c3916",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 191,
"avg_line_length": 46.395348837209305,
"alnum_prop": 0.700250626566416,
"repo_name": "jordanemedlock/psychtruths",
"id": "d12231bdbf7e7383526b04c883ca0fd812e3849c",
"size": "4901",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/core/Library/Twitter/FriendsAndFollowers/CreateFriendship.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
} |
__author__ = 'sohje'
__version__ = 0.02
import pymongo
from flask import Flask, g
from flask.ext.socketio import SocketIO
from fabric import main as fab_main
import config
from fabfile import env
def connect(host, port):
client = False
try:
client = pymongo.MongoClient(host, port)
except pymongo.errors.ConnectionFailure, messages:
print messages
if config.MONGO_USER and config.MONGO_PW:
auth = client['admin']
try:
auth.authenticate(config.MONGO_USER, config.MONGO_PW)
except KeyError:
raise Exception('KeyError: Not authenticating!')
return client
socketio = SocketIO()
db = connect(config.MONGO_HOST, config.MONGO_PORT)
docstring, fab_tasks = fab_main.load_fabfile(config.fabfile)[:2]
def create_app(debug=False):
app = Flask(__name__)
@app.before_request
def before_request():
g.docstring, g.tasks = fab_main.load_fabfile(config.fabfile)[:2]
g.env, g.env.user, g.env.key_filename = env, config.user, config.key_filename
g.db = connect(config.MONGO_HOST, config.MONGO_PORT)
app.debug = debug
from main import main as main_blueprint
from task import tasks as task_blueprint
app.register_blueprint(main_blueprint)
app.register_blueprint(task_blueprint)
socketio.init_app(app)
return app | {
"content_hash": "be91d343e90d5dc60d2fc8c3df7c4154",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 85,
"avg_line_length": 29.866666666666667,
"alnum_prop": 0.6837797619047619,
"repo_name": "sohje/Nipsy",
"id": "1dd582034325356e491444050e2a2a1d2b21d09f",
"size": "1344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Nipsy/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "292"
},
{
"name": "JavaScript",
"bytes": "3046"
},
{
"name": "Python",
"bytes": "9143"
}
],
"symlink_target": ""
} |
"""Tests for sparse ops."""
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.math import sparse
def _assert_sparse_tensor_value(test_case_instance, expected, actual):
test_case_instance.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case_instance.assertAllEqual(expected.indices, actual.indices)
test_case_instance.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case_instance.assertAllEqual(expected.values, actual.values)
test_case_instance.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case_instance.assertAllEqual(expected.dense_shape, actual.dense_shape)
@test_util.test_all_tf_execution_regimes
class SparseTest(test_util.TestCase):
# Copied (with modifications) from:
# tensorflow/contrib/layers/python/ops/sparse_ops.py.
def test_dense_to_sparse_1d(self):
st = sparse.dense_to_sparse([1, 0, 2, 0])
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.int32)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([1, 2], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_1d_float(self):
st = sparse.dense_to_sparse([1.5, 0.0, 2.3, 0.0])
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.float32)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllClose([1.5, 2.3], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_1d_bool(self):
st = sparse.dense_to_sparse([True, False, True, False])
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.bool_)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([True, True], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_1d_str(self):
st = sparse.dense_to_sparse([b'qwe', b'', b'ewq', b''])
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object_)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([b'qwe', b'ewq'], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_1d_str_special_ignore(self):
st = sparse.dense_to_sparse([b'qwe', b'', b'ewq', b''], ignore_value=b'qwe')
result = self.evaluate(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object_)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[1], [2], [3]], result.indices)
self.assertAllEqual([b'', b'ewq', b''], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_2d(self):
st = sparse.dense_to_sparse([[1, 2, 0, 0], [3, 4, 5, 0]])
result = self.evaluate(st)
self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5], result.values)
self.assertAllEqual([2, 4], result.dense_shape)
def test_dense_to_sparse_3d(self):
st = sparse.dense_to_sparse([[[1, 2, 0, 0], [3, 4, 5, 0]],
[[7, 8, 0, 0], [9, 0, 0, 0]]])
result = self.evaluate(st)
self.assertAllEqual(
[[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.dense_shape)
def test_dense_to_sparse_unknown_1d_shape(self):
tensor = tf1.placeholder_with_default(
np.array([0, 100, 0, 3], np.int32), shape=[None])
st = sparse.dense_to_sparse(tensor)
result = self.evaluate(st)
self.assertAllEqual([[1], [3]], result.indices)
self.assertAllEqual([100, 3], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_unknown_3d_shape(self):
tensor = tf1.placeholder_with_default(
np.array([[[1, 2, 0, 0], [3, 4, 5, 0]], [[7, 8, 0, 0], [9, 0, 0, 0]]],
np.int32),
shape=[None, None, None])
st = sparse.dense_to_sparse(tensor)
result = self.evaluate(st)
self.assertAllEqual(
[[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[0, 1, 2],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.dense_shape)
def test_dense_to_sparse_unknown_rank(self):
ph = tf1.placeholder_with_default(
np.array([[1, 2, 0, 0], [3, 4, 5, 0]], np.int32), shape=None)
st = sparse.dense_to_sparse(ph)
result = self.evaluate(st)
self.assertAllEqual(
[[0, 0],
[0, 1],
[1, 0],
[1, 1],
[1, 2]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5], result.values)
self.assertAllEqual([2, 4], result.dense_shape)
if __name__ == '__main__':
test_util.main()
| {
"content_hash": "32a85a90b864945564b78362551cad3a",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 80,
"avg_line_length": 37.43333333333333,
"alnum_prop": 0.6258236865538735,
"repo_name": "tensorflow/probability",
"id": "af3d4030bee4875f39eedf54005f64aa0a88c8a6",
"size": "6293",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_probability/python/math/sparse_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "55552121"
},
{
"name": "Python",
"bytes": "17339674"
},
{
"name": "Shell",
"bytes": "24852"
},
{
"name": "Starlark",
"bytes": "663851"
}
],
"symlink_target": ""
} |
from django.urls import path
from django.utils.translation import pgettext_lazy
from .views import follow, embed_follow, confirm_follow, unfollow_by_link
urlpatterns = [
path("<int:pk>/follow/", follow, name="foirequestfollower-follow"),
path(
"<int:pk>/follow/embed/", embed_follow, name="foirequestfollower-follow_embed"
),
path(
pgettext_lazy("url part", "confirm/<int:follow_id>/<str:check>/"),
confirm_follow,
name="foirequestfollower-confirm_follow",
),
path(
pgettext_lazy("url part", "unfollow/<int:follow_id>/<str:check>/"),
unfollow_by_link,
name="foirequestfollower-confirm_unfollow",
),
]
| {
"content_hash": "0a47853f498fc2551dcc3f910737d4f7",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 86,
"avg_line_length": 32.714285714285715,
"alnum_prop": 0.653566229985444,
"repo_name": "fin/froide",
"id": "5759cfc630e7e2ddefe2f1ed5ec4b339357ed2b6",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "froide/foirequestfollower/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "302838"
},
{
"name": "JavaScript",
"bytes": "47357"
},
{
"name": "Makefile",
"bytes": "535"
},
{
"name": "Python",
"bytes": "1706123"
},
{
"name": "SCSS",
"bytes": "39397"
},
{
"name": "TypeScript",
"bytes": "57910"
},
{
"name": "Vue",
"bytes": "218866"
}
],
"symlink_target": ""
} |
''' This file serves db_content.py '''
import json
from sqlalchemy.orm import sessionmaker, scoped_session
# this are for parse_skill_name()
from atod.db import engine
from atod.db_models.hero import HeroModel
from atod.utils import dictionary
session = scoped_session(sessionmaker(bind=engine))
# FIXME: write heroes_base_info table to fix note of parse_skill_name().
def heroes_to_rows(heroes_dict, schema):
''' Gets the data from json files according to given db scheme.
Idea: json file contents a lot of information that I don't need in db,
so this function parses file to get the needed attributes.
Args:
heroes_dict (dict) : parsed to dict npc_heroes.txt
schema (list of str): fields what should be extracted from file
Yields:
dict: where schema elements are keys
'''
data = heroes_dict['DOTAHeroes']
for in_game_name, description in data.items():
# TODO: check if this needed in the new file format
if in_game_name == 'Version' or 'hero_base' in in_game_name \
or 'workshop_guide_name' not in description:
continue
tmp = dict()
tmp['in_game_name'] = in_game_name.split('npc_dota_hero_')[1]
tmp['name'] = description['workshop_guide_name']
del description['workshop_guide_name']
# add name aliases
try:
tmp['aliases'] = description['NameAliases']
except KeyError:
tmp['aliases'] = None
flat_description = dictionary.make_flat_dict(description)
# add all over keys
for key in schema:
if key in tmp:
continue
try:
tmp[key] = flat_description[key]
# hero_base doesn't have some fields
except KeyError:
tmp[key] = None
# there are some non hero fields causes this
except TypeError:
pass
if len(tmp) > 0:
yield tmp
# DEPRECATED IN CURRENT VERSION
def items_file_to_rows(filename, scheme):
''' Get rows for items table. '''
with open(filename, 'r') as fp:
data = json.load(fp)
global_key = list(data.keys())[0]
data = data[global_key]
for in_game_name, description in data.items():
if in_game_name == 'Version':
continue
tmp = dict()
tmp['in_game_name'] = in_game_name.split('item_')[1]
tmp['name'] = tmp['in_game_name'].replace('_', ' ').title()
if 'ItemAliases' in description:
tmp['aliases'] = description['ItemAliases']
else:
tmp['aliases'] = None
for key in scheme.keys():
try:
tmp[key] = description[key]
except KeyError:
tmp[key] = None
# there are some non hero fields causes this
except TypeError:
pass
# extract specials
try:
specials = description['AbilitySpecial']
for key, value in specials.items():
for k, v in value.items():
if k != 'var_type':
tmp[k] = v
except TypeError:
print(description)
except KeyError:
pass
for kk in tmp.keys():
if kk not in scheme.keys():
print('retard alert')
if len(tmp) > 0:
yield tmp
def get_types(abilities):
''' Maps AbilitySpecial to type of this field.
Args:
abilities (dict) - DOTAAbilities from items.json or npc_abilities
Returns:
fields_types (dict) - mapping of field to its type
'''
fields_types = {}
for ability, properties in abilities.items():
try:
for key, value in properties['AbilitySpecial'].items():
for k, v in value.items():
if key != 'var_type' and key:
fields_types[k] = value['var_type']
# all the recipies will fall there
except KeyError:
pass
except TypeError:
pass
return fields_types
def ability_to_row(description, schema):
''' Transforms ability description to database row.
This function produce descriptions for all levels of ability from
its description.
Args:
description (dict): mapping property of ability to its values
schema (list of strings): fields that should be extracted from
the description
Yields:
dict: mapping of properties to its values on some level. Level is
added to result (as 'lvl' key).
'''
base = dict()
with_list_values = dict()
for k, v in description.items():
if isinstance(v, list):
with_list_values[k] = v
else:
base[k] = v
for col in schema:
if col not in base and col not in with_list_values:
base[col] = None
if len(with_list_values) != 0:
max_lvl = max([len(v) for _, v in with_list_values.items()])
lvl_part = dict()
for lvl in range(max_lvl):
for k, v in with_list_values.items():
lvl_part[k] = v[lvl] if lvl < len(v) else v[-1]
# TODO: why should I use copy here?
result = base.copy()
for k, v in lvl_part.items():
result[k] = v
result['lvl'] = lvl + 1
yield result
else:
base['lvl'] = 1
yield base
def parse_skill_name(skill):
''' Splits skill name from game to hero name and skill name.
In-game files store skills names as <hero>_<skill>, this function
parses this representation to the names of the hero and of the skill.
Notes:
For this function to work heroes table for current version should
be created.
Args:
skill (str): skills names from in-game files
Returns:
parsed (hero, skill_name):
'''
heroes_names = [h[0] for h in session.query(HeroModel.in_game_name).all()]
for hero in heroes_names:
parts = skill.partition(hero)
if hero in skill and parts[0] == '':
return parts[1], parts[2].lstrip('_')
return ()
def binarize_labels(labels, schema):
''' Binarize abilities labels.
Creates a dictionary which maps schema to labels, fills unmarked
labels with 0.
Args:
labels (iterable): numbers of marked labels
schema (iterable): keys in result dictionary
Returns:
binary (dict): mapping of schema to labels
'''
binary = dict()
for i, label in enumerate(schema):
binary[label] = 1 if i+1 in labels else 0
return binary
| {
"content_hash": "45eccbbf2d292eff10897fa3136e9baa",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 78,
"avg_line_length": 28.326446280991735,
"alnum_prop": 0.5571115973741795,
"repo_name": "gasabr/AtoD",
"id": "5f31a88387c33b1ce8607890eae18198cfe37f6a",
"size": "6855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atod/utils/json2rows.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "10271"
},
{
"name": "Python",
"bytes": "125304"
}
],
"symlink_target": ""
} |
import os
import pathlib
import json
import time
import shutil
from filelock import Timeout
import pytest
import responses
import torch
from allennlp.common.file_utils import (
FileLock,
_resource_to_filename,
filename_to_url,
cached_path,
open_compressed,
CacheFile,
_Meta,
_find_entries,
inspect_cache,
remove_cache_entries,
LocalCacheResource,
TensorCache,
)
from allennlp.common import Params
from allennlp.modules.token_embedders import ElmoTokenEmbedder
from allennlp.common.testing import AllenNlpTestCase
from allennlp.predictors import Predictor
def set_up_glove(url: str, byt: bytes, change_etag_every: int = 1000):
# Mock response for the datastore url that returns glove vectors
responses.add(
responses.GET,
url,
body=byt,
status=200,
content_type="application/gzip",
stream=True,
headers={"Content-Length": str(len(byt))},
)
etags_left = change_etag_every
etag = "0"
def head_callback(_):
"""
Writing this as a callback allows different responses to different HEAD requests.
In our case, we're going to change the ETag header every `change_etag_every`
requests, which will allow us to simulate having a new version of the file.
"""
nonlocal etags_left, etag
headers = {"ETag": etag}
# countdown and change ETag
etags_left -= 1
if etags_left <= 0:
etags_left = change_etag_every
etag = str(int(etag) + 1)
return (200, headers, "")
responses.add_callback(responses.HEAD, url, callback=head_callback)
class TestFileLock(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
# Set up a regular lock and a read-only lock.
open(self.TEST_DIR / "lock", "a").close()
open(self.TEST_DIR / "read_only_lock", "a").close()
os.chmod(self.TEST_DIR / "read_only_lock", 0o555)
# Also set up a read-only directory.
os.mkdir(self.TEST_DIR / "read_only_dir", 0o555)
def test_locking(self):
with FileLock(self.TEST_DIR / "lock"):
# Trying to acquire the lock again should fail.
with pytest.raises(Timeout):
with FileLock(self.TEST_DIR / "lock", timeout=0.1):
pass
# Trying to acquire a lock when lacking write permissions on the file should fail.
with pytest.raises(PermissionError):
with FileLock(self.TEST_DIR / "read_only_lock"):
pass
# But this should only issue a warning if we set the `read_only_ok` flag to `True`.
with pytest.warns(UserWarning, match="Lacking permissions"):
with FileLock(self.TEST_DIR / "read_only_lock", read_only_ok=True):
pass
# However this should always fail when we lack write permissions and the file lock
# doesn't exist yet.
with pytest.raises(PermissionError):
with FileLock(self.TEST_DIR / "read_only_dir" / "lock", read_only_ok=True):
pass
class TestFileUtils(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.glove_file = self.FIXTURES_ROOT / "embeddings/glove.6B.100d.sample.txt.gz"
with open(self.glove_file, "rb") as glove:
self.glove_bytes = glove.read()
def test_resource_to_filename(self):
for url in [
"http://allenai.org",
"http://allennlp.org",
"https://www.google.com",
"http://pytorch.org",
"https://allennlp.s3.amazonaws.com" + "/long" * 20 + "/url",
]:
filename = _resource_to_filename(url)
assert "http" not in filename
with pytest.raises(FileNotFoundError):
filename_to_url(filename, cache_dir=self.TEST_DIR)
pathlib.Path(os.path.join(self.TEST_DIR, filename)).touch()
with pytest.raises(FileNotFoundError):
filename_to_url(filename, cache_dir=self.TEST_DIR)
json.dump(
{"url": url, "etag": None},
open(os.path.join(self.TEST_DIR, filename + ".json"), "w"),
)
back_to_url, etag = filename_to_url(filename, cache_dir=self.TEST_DIR)
assert back_to_url == url
assert etag is None
def test_resource_to_filename_with_etags(self):
for url in [
"http://allenai.org",
"http://allennlp.org",
"https://www.google.com",
"http://pytorch.org",
]:
filename = _resource_to_filename(url, etag="mytag")
assert "http" not in filename
pathlib.Path(os.path.join(self.TEST_DIR, filename)).touch()
json.dump(
{"url": url, "etag": "mytag"},
open(os.path.join(self.TEST_DIR, filename + ".json"), "w"),
)
back_to_url, etag = filename_to_url(filename, cache_dir=self.TEST_DIR)
assert back_to_url == url
assert etag == "mytag"
baseurl = "http://allenai.org/"
assert _resource_to_filename(baseurl + "1") != _resource_to_filename(baseurl, etag="1")
def test_resource_to_filename_with_etags_eliminates_quotes(self):
for url in [
"http://allenai.org",
"http://allennlp.org",
"https://www.google.com",
"http://pytorch.org",
]:
filename = _resource_to_filename(url, etag='"mytag"')
assert "http" not in filename
pathlib.Path(os.path.join(self.TEST_DIR, filename)).touch()
json.dump(
{"url": url, "etag": "mytag"},
open(os.path.join(self.TEST_DIR, filename + ".json"), "w"),
)
back_to_url, etag = filename_to_url(filename, cache_dir=self.TEST_DIR)
assert back_to_url == url
assert etag == "mytag"
@responses.activate
def test_cached_path(self):
url = "http://fake.datastore.com/glove.txt.gz"
set_up_glove(url, self.glove_bytes)
# non-existent file
with pytest.raises(FileNotFoundError):
filename = cached_path(self.FIXTURES_ROOT / "does_not_exist" / "fake_file.tar.gz")
# unparsable URI
with pytest.raises(ValueError):
filename = cached_path("fakescheme://path/to/fake/file.tar.gz")
# existing file as path
assert cached_path(self.glove_file) == str(self.glove_file)
# caches urls
# filename = cached_path(url, cache_dir=self.TEST_DIR)
# assert len(responses.calls) == 2
# assert filename == os.path.join(self.TEST_DIR, _resource_to_filename(url, etag="0"))
# with open(filename, "rb") as cached_file:
# assert cached_file.read() == self.glove_bytes
# archives
filename = cached_path(
self.FIXTURES_ROOT / "common" / "quote.tar.gz!quote.txt",
extract_archive=True,
cache_dir=self.TEST_DIR,
)
with open(filename, "r") as f:
assert f.read().startswith("I mean, ")
@responses.activate
def test_cached_path_http_err_handling(self):
url_404 = "http://fake.datastore.com/does-not-exist"
byt = b"Does not exist"
for method in (responses.GET, responses.HEAD):
responses.add(
method,
url_404,
body=byt,
status=404,
headers={"Content-Length": str(len(byt))},
)
with pytest.raises(FileNotFoundError):
cached_path(url_404, cache_dir=self.TEST_DIR)
def test_extract_with_external_symlink(self):
dangerous_file = self.FIXTURES_ROOT / "common" / "external_symlink.tar.gz"
with pytest.raises(ValueError):
cached_path(dangerous_file, extract_archive=True)
@pytest.mark.parametrize("suffix", ["bz2", "gz", "xz"])
def test_open_compressed(self, suffix: str):
uncompressed_file = self.FIXTURES_ROOT / "embeddings/fake_embeddings.5d.txt"
with open_compressed(uncompressed_file) as f:
uncompressed_lines = [line.strip() for line in f]
compressed_file = f"{uncompressed_file}.{suffix}"
with open_compressed(compressed_file) as f:
compressed_lines = [line.strip() for line in f]
assert compressed_lines == uncompressed_lines
def test_meta_backwards_compatible(self):
url = "http://fake.datastore.com/glove.txt.gz"
etag = "some-fake-etag"
filename = os.path.join(self.TEST_DIR, _resource_to_filename(url, etag))
with open(filename, "wb") as f:
f.write(self.glove_bytes)
with open(filename + ".json", "w") as meta_file:
json.dump({"url": url, "etag": etag}, meta_file)
meta = _Meta.from_path(filename + ".json")
assert meta.resource == url
assert meta.etag == etag
assert meta.creation_time is not None
assert meta.size == len(self.glove_bytes)
def create_cache_entry(self, url: str, etag: str, as_extraction_dir: bool = False):
filename = os.path.join(self.TEST_DIR, _resource_to_filename(url, etag))
cache_path = filename
if as_extraction_dir:
cache_path = filename + "-extracted"
filename = filename + "-extracted/glove.txt"
os.mkdir(cache_path)
with open(filename, "wb") as f:
f.write(self.glove_bytes)
open(cache_path + ".lock", "a").close()
meta = _Meta(
resource=url,
cached_path=cache_path,
etag=etag,
creation_time=time.time(),
size=len(self.glove_bytes),
extraction_dir=as_extraction_dir,
)
meta.to_file()
def test_inspect(self, capsys):
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-1")
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-2")
self.create_cache_entry(
"http://fake.datastore.com/glove.txt.gz", "etag-3", as_extraction_dir=True
)
inspect_cache(cache_dir=self.TEST_DIR)
captured = capsys.readouterr()
assert "http://fake.datastore.com/glove.txt.gz" in captured.out
assert "2 versions cached" in captured.out
assert "1 version extracted" in captured.out
def test_inspect_with_patterns(self, capsys):
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-1")
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-2")
self.create_cache_entry("http://other.fake.datastore.com/glove.txt.gz", "etag-4")
inspect_cache(cache_dir=self.TEST_DIR, patterns=["http://fake.*"])
captured = capsys.readouterr()
assert "http://fake.datastore.com/glove.txt.gz" in captured.out
assert "2 versions" in captured.out
assert "http://other.fake.datastore.com/glove.txt.gz" not in captured.out
def test_remove_entries(self):
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-1")
self.create_cache_entry("http://fake.datastore.com/glove.txt.gz", "etag-2")
self.create_cache_entry(
"http://fake.datastore.com/glove.txt.gz", "etag-3", as_extraction_dir=True
)
self.create_cache_entry("http://other.fake.datastore.com/glove.txt.gz", "etag-4")
self.create_cache_entry(
"http://other.fake.datastore.com/glove.txt.gz", "etag-5", as_extraction_dir=True
)
reclaimed_space = remove_cache_entries(["http://fake.*"], cache_dir=self.TEST_DIR)
assert reclaimed_space == 3 * len(self.glove_bytes)
size_left, entries_left = _find_entries(cache_dir=self.TEST_DIR)
assert size_left == 2 * len(self.glove_bytes)
assert len(entries_left) == 1
entry_left = list(entries_left.values())[0]
# one regular cache file and one extraction dir
assert len(entry_left[0]) == 1
assert len(entry_left[1]) == 1
# Now remove everything.
remove_cache_entries(["*"], cache_dir=self.TEST_DIR)
assert len(os.listdir(self.TEST_DIR)) == 0
class TestCachedPathWithArchive(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.tar_file = self.TEST_DIR / "utf-8.tar.gz"
shutil.copyfile(
self.FIXTURES_ROOT / "utf-8_sample" / "archives" / "utf-8.tar.gz", self.tar_file
)
self.zip_file = self.TEST_DIR / "utf-8.zip"
shutil.copyfile(
self.FIXTURES_ROOT / "utf-8_sample" / "archives" / "utf-8.zip", self.zip_file
)
def check_extracted(self, extracted: str):
assert os.path.isdir(extracted)
assert pathlib.Path(extracted).parent == self.TEST_DIR
assert os.path.exists(os.path.join(extracted, "dummy.txt"))
assert os.path.exists(os.path.join(extracted, "folder/utf-8_sample.txt"))
assert os.path.exists(extracted + ".json")
def test_cached_path_extract_local_tar(self):
extracted = cached_path(self.tar_file, cache_dir=self.TEST_DIR, extract_archive=True)
self.check_extracted(extracted)
def test_cached_path_extract_local_zip(self):
extracted = cached_path(self.zip_file, cache_dir=self.TEST_DIR, extract_archive=True)
self.check_extracted(extracted)
@responses.activate
@pytest.mark.skip(reason="until cached-path/rich versions are resolved")
def test_cached_path_extract_remote_tar(self):
url = "http://fake.datastore.com/utf-8.tar.gz"
byt = open(self.tar_file, "rb").read()
responses.add(
responses.GET,
url,
body=byt,
status=200,
content_type="application/tar+gzip",
stream=True,
headers={"Content-Length": str(len(byt))},
)
responses.add(
responses.HEAD,
url,
status=200,
headers={"ETag": "fake-etag"},
)
extracted = cached_path(url, cache_dir=self.TEST_DIR, extract_archive=True)
assert extracted.endswith("-extracted")
self.check_extracted(extracted)
@responses.activate
@pytest.mark.skip(reason="until cached-path/rich versions are resolved")
def test_cached_path_extract_remote_zip(self):
url = "http://fake.datastore.com/utf-8.zip"
byt = open(self.zip_file, "rb").read()
responses.add(
responses.GET,
url,
body=byt,
status=200,
content_type="application/zip",
stream=True,
headers={"Content-Length": str(len(byt))},
)
responses.add(
responses.HEAD,
url,
status=200,
headers={"ETag": "fake-etag"},
)
extracted = cached_path(url, cache_dir=self.TEST_DIR, extract_archive=True)
assert extracted.endswith("-extracted")
self.check_extracted(extracted)
class TestCacheFile(AllenNlpTestCase):
def test_temp_file_removed_on_error(self):
cache_filename = self.TEST_DIR / "cache_file"
with pytest.raises(IOError, match="I made this up"):
with CacheFile(cache_filename) as handle:
raise IOError("I made this up")
assert not os.path.exists(handle.name)
assert not os.path.exists(cache_filename)
class TestLocalCacheResource(AllenNlpTestCase):
def test_local_cache_resource(self):
with LocalCacheResource("some-computation", "version-1", cache_dir=self.TEST_DIR) as cache:
assert not cache.cached()
with cache.writer() as w:
json.dump({"a": 1}, w)
with LocalCacheResource("some-computation", "version-1", cache_dir=self.TEST_DIR) as cache:
assert cache.cached()
with cache.reader() as r:
data = json.load(r)
assert data["a"] == 1
class TestTensorCache(AllenNlpTestCase):
def test_tensor_cache(self):
cache = TensorCache(self.TEST_DIR / "cache")
assert not cache.read_only
# Insert some stuff into the cache.
cache["a"] = torch.tensor([1, 2, 3])
# Close cache.
del cache
# Now let's open another one in read-only mode.
cache = TensorCache(self.TEST_DIR / "cache", read_only=True)
assert cache.read_only
# If we try to write we should get a ValueError
with pytest.raises(ValueError, match="cannot write"):
cache["b"] = torch.tensor([1, 2, 3])
# But we should be able to retrieve from the cache.
assert cache["a"].shape == (3,)
# Close this one.
del cache
# Now we're going to tell the OS to make the cache file read-only.
os.chmod(self.TEST_DIR / "cache", 0o444)
os.chmod(self.TEST_DIR / "cache-lock", 0o444)
# This time when we open the cache, it should automatically be set to read-only.
with pytest.warns(UserWarning, match="cache will be read-only"):
cache = TensorCache(self.TEST_DIR / "cache")
assert cache.read_only
def test_tensor_cache_open_twice(self):
cache1 = TensorCache(self.TEST_DIR / "multicache")
cache1["foo"] = torch.tensor([1, 2, 3])
cache2 = TensorCache(self.TEST_DIR / "multicache")
assert cache1 is cache2
def test_tensor_cache_upgrade(self):
cache0 = TensorCache(self.TEST_DIR / "upcache")
cache0["foo"] = torch.tensor([1, 2, 3])
del cache0
cache1 = TensorCache(self.TEST_DIR / "upcache", read_only=True)
cache2 = TensorCache(self.TEST_DIR / "upcache")
assert not cache1.read_only
assert not cache2.read_only
assert torch.allclose(cache1["foo"], torch.tensor([1, 2, 3]))
cache2["bar"] = torch.tensor([2, 3, 4])
assert torch.allclose(cache1["bar"], cache2["bar"])
class TestHFHubDownload(AllenNlpTestCase):
def test_cached_download(self):
params = Params(
{
"options_file": "hf://lysandre/test-elmo-tiny/options.json",
"weight_file": "hf://lysandre/test-elmo-tiny/lm_weights.hdf5",
}
)
embedding_layer = ElmoTokenEmbedder.from_params(vocab=None, params=params)
assert isinstance(
embedding_layer, ElmoTokenEmbedder
), "Embedding layer badly instantiated from HF Hub."
assert (
embedding_layer.get_output_dim() == 32
), "Embedding layer badly instantiated from HF Hub."
def test_snapshot_download(self):
predictor = Predictor.from_path("hf://lysandre/test-simple-tagger-tiny")
assert predictor._dataset_reader._token_indexers["tokens"].namespace == "test_tokens"
def test_cached_download_no_user_or_org(self):
path = cached_path("hf://t5-small/config.json", cache_dir=self.TEST_DIR)
assert os.path.isfile(path)
assert pathlib.Path(os.path.dirname(path)) == self.TEST_DIR
assert os.path.isfile(path + ".json")
meta = _Meta.from_path(path + ".json")
assert meta.etag is not None
assert meta.resource == "hf://t5-small/config.json"
def test_snapshot_download_no_user_or_org(self):
# This is the smallest snapshot I could find that is not associated with a user / org.
model_name = "distilbert-base-german-cased"
path = cached_path(f"hf://{model_name}")
assert os.path.isdir(path)
assert os.path.isfile(path + ".json")
meta = _Meta.from_path(path + ".json")
assert meta.resource == f"hf://{model_name}"
| {
"content_hash": "70bd2abf77d2c9037ab5c7de737dd66f",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 99,
"avg_line_length": 38.036468330134355,
"alnum_prop": 0.5955492758742493,
"repo_name": "allenai/allennlp",
"id": "14c61a2291a26e71383357faa5ecd9bc6198167b",
"size": "19817",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/common/file_utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39870"
},
{
"name": "Dockerfile",
"bytes": "1190"
},
{
"name": "Jsonnet",
"bytes": "4469"
},
{
"name": "Makefile",
"bytes": "5306"
},
{
"name": "Perl",
"bytes": "101"
},
{
"name": "Python",
"bytes": "3575059"
},
{
"name": "Scilab",
"bytes": "4085"
},
{
"name": "Shell",
"bytes": "2092"
}
],
"symlink_target": ""
} |
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUBootstrap(NURESTObject):
""" Represents a Bootstrap in the VSD
Notes:
Gateway bootstrap details.
"""
__rest_name__ = "bootstrap"
__resource_name__ = "bootstraps"
## Constants
CONST_ZFB_MATCH_ATTRIBUTE_NONE = "NONE"
CONST_STATUS_INACTIVE = "INACTIVE"
CONST_ZFB_MATCH_ATTRIBUTE_NSGATEWAY_ID = "NSGATEWAY_ID"
CONST_ZFB_MATCH_ATTRIBUTE_UUID = "UUID"
CONST_STATUS_NOTIFICATION_APP_REQ_SENT = "NOTIFICATION_APP_REQ_SENT"
CONST_ZFB_MATCH_ATTRIBUTE_HOSTNAME = "HOSTNAME"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_ZFB_MATCH_ATTRIBUTE_MAC_ADDRESS = "MAC_ADDRESS"
CONST_ZFB_MATCH_ATTRIBUTE_IP_ADDRESS = "IP_ADDRESS"
CONST_ZFB_MATCH_ATTRIBUTE_SERIAL_NUMBER = "SERIAL_NUMBER"
CONST_STATUS_ACTIVE = "ACTIVE"
CONST_STATUS_NOTIFICATION_APP_REQ_ACK = "NOTIFICATION_APP_REQ_ACK"
CONST_STATUS_CERTIFICATE_SIGNED = "CERTIFICATE_SIGNED"
def __init__(self, **kwargs):
""" Initializes a Bootstrap instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> bootstrap = NUBootstrap(id=u'xxxx-xxx-xxx-xxx', name=u'Bootstrap')
>>> bootstrap = NUBootstrap(data=my_dict)
"""
super(NUBootstrap, self).__init__()
# Read/Write Attributes
self._zfb_info = None
self._zfb_match_attribute = None
self._zfb_match_value = None
self._last_updated_by = None
self._installer_id = None
self._entity_scope = None
self._associated_entity_type = None
self._status = None
self._external_id = None
self.expose_attribute(local_name="zfb_info", remote_name="ZFBInfo", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_match_attribute", remote_name="ZFBMatchAttribute", attribute_type=str, is_required=False, is_unique=False, choices=[u'HOSTNAME', u'IP_ADDRESS', u'MAC_ADDRESS', u'NONE', u'NSGATEWAY_ID', u'SERIAL_NUMBER', u'UUID'])
self.expose_attribute(local_name="zfb_match_value", remote_name="ZFBMatchValue", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="installer_id", remote_name="installerID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="associated_entity_type", remote_name="associatedEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="status", remote_name="status", attribute_type=str, is_required=False, is_unique=False, choices=[u'ACTIVE', u'CERTIFICATE_SIGNED', u'INACTIVE', u'NOTIFICATION_APP_REQ_ACK', u'NOTIFICATION_APP_REQ_SENT'])
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def zfb_info(self):
""" Get zfb_info value.
Notes:
Base64 Encoded JSON String of NSG ZFB Attribute Value Pairs
This attribute is named `ZFBInfo` in VSD API.
"""
return self._zfb_info
@zfb_info.setter
def zfb_info(self, value):
""" Set zfb_info value.
Notes:
Base64 Encoded JSON String of NSG ZFB Attribute Value Pairs
This attribute is named `ZFBInfo` in VSD API.
"""
self._zfb_info = value
@property
def zfb_match_attribute(self):
""" Get zfb_match_attribute value.
Notes:
Attribute to auto match on
This attribute is named `ZFBMatchAttribute` in VSD API.
"""
return self._zfb_match_attribute
@zfb_match_attribute.setter
def zfb_match_attribute(self, value):
""" Set zfb_match_attribute value.
Notes:
Attribute to auto match on
This attribute is named `ZFBMatchAttribute` in VSD API.
"""
self._zfb_match_attribute = value
@property
def zfb_match_value(self):
""" Get zfb_match_value value.
Notes:
Attribute value to auto match on
This attribute is named `ZFBMatchValue` in VSD API.
"""
return self._zfb_match_value
@zfb_match_value.setter
def zfb_match_value(self, value):
""" Set zfb_match_value value.
Notes:
Attribute value to auto match on
This attribute is named `ZFBMatchValue` in VSD API.
"""
self._zfb_match_value = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def installer_id(self):
""" Get installer_id value.
Notes:
The Installer ID
This attribute is named `installerID` in VSD API.
"""
return self._installer_id
@installer_id.setter
def installer_id(self, value):
""" Set installer_id value.
Notes:
The Installer ID
This attribute is named `installerID` in VSD API.
"""
self._installer_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def associated_entity_type(self):
""" Get associated_entity_type value.
Notes:
Object type of the associated entity.
This attribute is named `associatedEntityType` in VSD API.
"""
return self._associated_entity_type
@associated_entity_type.setter
def associated_entity_type(self, value):
""" Set associated_entity_type value.
Notes:
Object type of the associated entity.
This attribute is named `associatedEntityType` in VSD API.
"""
self._associated_entity_type = value
@property
def status(self):
""" Get status value.
Notes:
Bootstrap status.
"""
return self._status
@status.setter
def status(self, value):
""" Set status value.
Notes:
Bootstrap status.
"""
self._status = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| {
"content_hash": "02bec313d7667657d73cbd461a31e945",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 259,
"avg_line_length": 27.941690962099127,
"alnum_prop": 0.5547787979966611,
"repo_name": "nuagenetworks/vspk-python",
"id": "2f1a1d8d394d3f2456a81b3724c617728b00e002",
"size": "11197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vspk/v5_0/nubootstrap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12909327"
}
],
"symlink_target": ""
} |
"""Create the asset."""
import argparse
import subprocess
# Remember to also update the go_win asset when this is updated.
GO_URL = "https://dl.google.com/go/go1.13.5.linux-amd64.tar.gz"
def create_asset(target_dir):
"""Create the asset."""
p1 = subprocess.Popen(["curl", GO_URL], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["tar", "-C", target_dir, "-xzf" "-"], stdin=p1.stdout)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
_,_ = p2.communicate()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
| {
"content_hash": "f620b511e7a15e3dde7a13baa5745e06",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 24.821428571428573,
"alnum_prop": 0.6575539568345323,
"repo_name": "endlessm/chromium-browser",
"id": "57a307aba633d11708e59005b04ee68cf4b55ec2",
"size": "854",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/skia/infra/bots/assets/go/create.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Tests Face interface compliance of the gRPC Python Beta API."""
import collections
import unittest
from grpc._adapter import _intermediary_low
from grpc.beta import beta
from grpc_test import resources
from grpc_test import test_common as grpc_test_common
from grpc_test.beta import test_utilities
from grpc_test.framework.common import test_constants
from grpc_test.framework.interfaces.face import test_cases
from grpc_test.framework.interfaces.face import test_interfaces
_SERVER_HOST_OVERRIDE = 'foo.test.google.fr'
class _SerializationBehaviors(
collections.namedtuple(
'_SerializationBehaviors',
('request_serializers', 'request_deserializers', 'response_serializers',
'response_deserializers',))):
pass
def _serialization_behaviors_from_test_methods(test_methods):
request_serializers = {}
request_deserializers = {}
response_serializers = {}
response_deserializers = {}
for (group, method), test_method in test_methods.iteritems():
request_serializers[group, method] = test_method.serialize_request
request_deserializers[group, method] = test_method.deserialize_request
response_serializers[group, method] = test_method.serialize_response
response_deserializers[group, method] = test_method.deserialize_response
return _SerializationBehaviors(
request_serializers, request_deserializers, response_serializers,
response_deserializers)
class _Implementation(test_interfaces.Implementation):
def instantiate(
self, methods, method_implementations, multi_method_implementation):
serialization_behaviors = _serialization_behaviors_from_test_methods(
methods)
# TODO(nathaniel): Add a "groups" attribute to _digest.TestServiceDigest.
service = next(iter(methods))[0]
# TODO(nathaniel): Add a "cardinalities_by_group" attribute to
# _digest.TestServiceDigest.
cardinalities = {
method: method_object.cardinality()
for (group, method), method_object in methods.iteritems()}
server_options = beta.server_options(
request_deserializers=serialization_behaviors.request_deserializers,
response_serializers=serialization_behaviors.response_serializers,
thread_pool_size=test_constants.POOL_SIZE)
server = beta.server(method_implementations, options=server_options)
server_credentials = beta.ssl_server_credentials(
[(resources.private_key(), resources.certificate_chain(),),])
port = server.add_secure_port('[::]:0', server_credentials)
server.start()
client_credentials = beta.ssl_client_credentials(
resources.test_root_certificates(), None, None)
channel = test_utilities.create_not_really_secure_channel(
'localhost', port, client_credentials, _SERVER_HOST_OVERRIDE)
stub_options = beta.stub_options(
request_serializers=serialization_behaviors.request_serializers,
response_deserializers=serialization_behaviors.response_deserializers,
thread_pool_size=test_constants.POOL_SIZE)
generic_stub = beta.generic_stub(channel, options=stub_options)
dynamic_stub = beta.dynamic_stub(
channel, service, cardinalities, options=stub_options)
return generic_stub, {service: dynamic_stub}, server
def destantiate(self, memo):
memo.stop(test_constants.SHORT_TIMEOUT).wait()
def invocation_metadata(self):
return grpc_test_common.INVOCATION_INITIAL_METADATA
def initial_metadata(self):
return grpc_test_common.SERVICE_INITIAL_METADATA
def terminal_metadata(self):
return grpc_test_common.SERVICE_TERMINAL_METADATA
def code(self):
return _intermediary_low.Code.OK
def details(self):
return grpc_test_common.DETAILS
def metadata_transmitted(self, original_metadata, transmitted_metadata):
return original_metadata is None or grpc_test_common.metadata_transmitted(
original_metadata, transmitted_metadata)
def load_tests(loader, tests, pattern):
return unittest.TestSuite(
tests=tuple(
loader.loadTestsFromTestCase(test_case_class)
for test_case_class in test_cases.test_cases(_Implementation())))
if __name__ == '__main__':
unittest.main(verbosity=2)
| {
"content_hash": "bbd16400d4dbf56bbda0fa5b23c55099",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 80,
"avg_line_length": 38.80555555555556,
"alnum_prop": 0.739203054163684,
"repo_name": "cgvarela/grpc",
"id": "ce4c59c0eea6be3043000ce39782907ed68f2d0c",
"size": "5720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/grpcio_test/grpc_test/beta/_face_interface_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3959"
},
{
"name": "C",
"bytes": "3684372"
},
{
"name": "C#",
"bytes": "660363"
},
{
"name": "C++",
"bytes": "1022025"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "199086"
},
{
"name": "Makefile",
"bytes": "2033933"
},
{
"name": "Objective-C",
"bytes": "243434"
},
{
"name": "PHP",
"bytes": "66795"
},
{
"name": "Protocol Buffer",
"bytes": "100612"
},
{
"name": "Python",
"bytes": "1460679"
},
{
"name": "Ruby",
"bytes": "350560"
},
{
"name": "Shell",
"bytes": "25857"
}
],
"symlink_target": ""
} |
import time
from subprocess import call, Popen, PIPE
import sys
import os
#--------------- GUI Modules -------------------------
from Tkinter import *
import ttk
import tkFileDialog
from PIL import Image, ImageTk # need to install: sudo apt-get install python-imaging-tk
#--------------- Representation related --------------
import xml.etree.ElementTree as ET # XML parser
from Module import *
from Connection import *
from AssociateJoint import *
from GaitEntry import *
from Section import *
#--------------- Communiation related ----------------
sys.path.append("../../Util/python_util")
from gait_recorder_message_pb2 import *
# import eventlet # need to install: $:sudo pip install eventlet
# from pygazebo import * #need to install: $: sudo pip install pygazebo
# from gztopic import *
from gztopic_multithreading import *
#--------------- Mathematic related ------------------
import numpy as np
#--------------- Debuggin Tools ----------------------
import pdb
#------------- Window Size Settings ------------------
## Window width
window_width = 800
## Window height
window_height = 520
## Window left and right padding
Border_width = 20
## Window up and bottom padding
Border_hieht = 40
## Dialog window width
DIALOG_WIDTH = 400
## Dialog window height
DIALOG_HEIGHT = 260
## PI used in this module
PI = np.pi
## This is the class of gait recorder python gui application
class GaitRecorder(Frame):
## Constructor
# @param self Object pointer
# @param parent Parent of this app, which is tk root
# @param flag Specifies the state of the this python program,
# 0 for normal mode, 1 for gui debug mode, 2 for python only mode
def __init__(self, parent, flag):
Frame.__init__(self, parent)
#------------ Variables Initialization ---------------
## Parent of the App class, which is a tk root
self.parent = parent
## Variable stores name string in the wedget
self.modelname = StringVar()
#----------- Common Command Entry Section ------------
self.jointSelection = IntVar()
self.commandType = IntVar()
self.typeSelection = IntVar()
self.associatedJointsList = StringVar()
self.nodeSelect1 = IntVar()
self.nodeSelect2 = IntVar()
self.module2Select = StringVar()
self.jointAngleDifferenceTracking = [0]*4
## Variable records the entered value right next to scroll bar
self.valueInBox = DoubleVar()
#----------- Extra Information Section --------------
self.condition = StringVar()
self.dependency = StringVar()
self.elapstime = DoubleVar()
self.elapsedTimeType = StringVar()
#----------- Save Path Section -----------------------
self.savepath = StringVar()
#----------- Manually Update Section -----------------
self.selectedcommand = StringVar()
#------------ Command Recorder -----------------------
self.selectedkeyFrame = StringVar()
#------------ Save Path Selection --------------------
self.savePath = "~/"
#------------ Joint Associated Joints ----------------
self.frontWheelAssociates = []
self.lftWheelAssociates = []
self.rgtWheelAssociates = []
self.centralBendAssociates = []
self.currentAssociates = []
#------------ Gait History Related ------------------
self.commandhis = StringVar()
self.frameListForGaitRecord =[]
self.frameButtonList = []
self.frameListList = []
self.keyFrames = []
self.keyFrameList = []
self.modulenames = []
# self.FrameList = []
self.CurrentFrameRec = []
self.CurrentFrameHis = []
self.DependencyList = []
self.ModuleList = []
self.ConnectionList = []
self.ConnectableModules = []
self.DisconnectableModules = []
self.initflag = flag
#-------------- File Definition ---------------------------
# define options for opening or saving a file
self.file_opt = options = {}
# options['defaultextension'] = '.txt'
options['filetypes'] = [('all files', '*'), ('text files', '.txt')]
options['initialdir'] = '~/'
# options['initialfile'] = 'myfile.txt'
options['parent'] = parent
options['title'] = 'Open Configuration File'
#------------ Run Simulation and GUI ---------------------
if flag == 0:
self.rungzserver = Popen(['sh', 'RunSimulation.sh'], stdout=PIPE)
time.sleep(1)
# self.rungzclient = Popen(['gzclient'], stdout=PIPE)
# time.sleep(1)
#-------------- Establish Connection With Simulator -------
if flag == 0 or flag == 2:
self.communicator = GzCommunicator()
self.communicator.start()
self.gaitPub = self.communicator.CreatePulisher(
"/gazebo/GaitRecorder/gaitSubscriber",'gait_recorder_message.GaitRecMessage')
self.initUI()
# self.SaveCurrentPose()
self.SelectCommonCommand()
## Initializes the UI of the current python app
# @param self Object pointer
def initUI(self):
self.parent.title("Gait Table Recorder")
self.pack(fill=BOTH, expand=1)
# okButton = Button(self, text="OK")
# okButton.pack(side=RIGHT)
n = ttk.Notebook(self)
f1 = Frame(n,height=window_height-Border_hieht,width=window_width-Border_width,relief=RAISED); # first page, which would get widgets gridded into it
f3 = Frame(n,height=window_height-Border_hieht,width=window_width-Border_width,relief=RAISED)
f2 = Frame(n,height=window_height-Border_hieht,width=window_width-Border_width,relief=RAISED); # second page
n.add(f1, text='Record New Gaits')
n.add(f3, text='Gait Records')
n.add(f2, text='Manage Gait Table')
n.pack()
# --------------- Close Button ------------------------------
closeButton = Button(f1, text="Close")
closeButton["command"] = self.CloseWindow
closeButton.place(x = window_width-Border_width-5, y = window_height-Border_hieht-5, anchor = SE)
closeButton2 = Button(f2, text="Close")
closeButton2["command"] = self.CloseWindow
closeButton2.place(x = window_width-Border_width-5, y = window_height-Border_hieht-5, anchor = SE)
#---------------- Model Name ---------------------------------
label = Label(f1, text='Select Model: ')
label.place(x = 10, y = 10)
self.name = ttk.Combobox(f1, textvariable=self.modelname, width = 10) #, command = self.checkConnectivity
self.name['values'] = ()
self.name.bind('<<ComboboxSelected>>',self.UpdateJoint)
self.name.place(x = 100, y = 10)
#--------------- Joint Angle Modification -------------------
JointModSec = ttk.Labelframe(f1, text='Common Command Entry ', width = 760, height = 250)
JointModSec.place(x = 10, y = 40)
self.selectCommonCommand = Radiobutton(JointModSec, text='Add Joint Update Command', variable=self.commandType, value=0, command = self.SelectCommonCommand)
self.selectCommonCommand.place(x = 10, y = 0)
label2 = Label(JointModSec, text='1. Select Joint ')
label2.place(x = 10, y = 25)
bard = Image.open("SmallSmores.png")
bardejov = ImageTk.PhotoImage(bard)
label3 = Label(JointModSec, image=bardejov)
label3.image = bardejov
label3.place(x=90, y=70)
self.bend_joint = Radiobutton(JointModSec, text='Central Bending', variable=self.jointSelection, value=3, command = self.UpdateJointValue)
self.left_joint = Radiobutton(JointModSec, text='Lft Wheel', variable=self.jointSelection, value=1, command = self.UpdateJointValue)
self.right_joint = Radiobutton(JointModSec, text='Rgt Wheel', variable=self.jointSelection, value=2, command = self.UpdateJointValue)
self.front_joint = Radiobutton(JointModSec, text='Front Wheel', variable=self.jointSelection, value=0, command = self.UpdateJointValue)
self.front_joint.select()
self.bend_joint.place(x= 125, y = 55,anchor = CENTER)
self.front_joint.place(x= 125, y = 165,anchor = CENTER)
self.right_joint.place(x= 45, y = 120,anchor = CENTER)
self.left_joint.place(x= 215, y = 120,anchor = CENTER)
label4 = Label(JointModSec, text='2. Select Type ')
label4.place(x = 280, y = 25)
self.modeAngle = Radiobutton(JointModSec, text='Angle', variable=self.typeSelection, value=0, command = self.UpdateJointValue)
self.modeSpeed = Radiobutton(JointModSec, text='Speed', variable=self.typeSelection, value=1, state=DISABLED, command = self.UpdateJointValue)
self.modeTorque = Radiobutton(JointModSec, text='Torque', variable=self.typeSelection, value=2, state=DISABLED, command = self.UpdateJointValue)
self.modeAngle.select()
self.modeAngle.place(x = 280, y = 60)
self.modeSpeed.place(x = 280, y = 90)
self.modeTorque.place(x = 280, y = 120)
label5 = Label(JointModSec, text='4. Select Value ')
label5.place(x = 10, y = 175)
self.valueSetting = Scale(JointModSec, from_=-180, to=180, orient=HORIZONTAL,length = 320, resolution = 5, state = NORMAL, command = self.DynamicUpdate)
self.valueSetting.place(x = 10, y = 190)
self.valueSettingBox = Entry(JointModSec, width=6, textvariable=self.valueInBox)
self.valueSettingBox.bind('<Return>',self.UpdateFromValueBox)
self.valueSettingBox.place(x = 340, y = 205)
label6 = Label(JointModSec, text='3. Select Associated Joints ')
label6.place(x = 400, y = 0)
self.associatedJoints = Listbox(JointModSec, width=40, height=10,listvariable = self.associatedJointsList)
self.associatedJoints.place(x = 400, y = 60)
joint_scroller = Scrollbar(JointModSec, command=self.associatedJoints.yview)
self.associatedJoints.config(yscrollcommand=joint_scroller.set)
joint_scroller.place(x = 723, y = 60, height = 165)
self.ascModify = Button(JointModSec, text='Add', width = 8, command = self.AddAssociates)
self.ascModify.place(x = 400, y = 30)
self.ascDelete = Button(JointModSec, text='Delete', width = 8, command = self.DeleteAnAssociate)
self.ascDelete.place(x = 645, y = 30)
#---------------- Connection & Disconnection ----------------
SpecialCommandSec = ttk.Labelframe(f1, text='Sepcial Command Entry ', width = 350, height = 140)
SpecialCommandSec.place(x = 10, y = 300)
label_sc1 = Label(SpecialCommandSec, text = "Add")
label_sc1.place(x = 10,y = 0)
self.connectionCommand = Radiobutton(SpecialCommandSec, text='Connection', variable=self.commandType, value=1, command = self.SelectConnection)
self.connectionCommand.place(x = 40, y = 0)
label_sc2 = Label(SpecialCommandSec, text = "or")
label_sc2.place(x = 150, y = 0)
self.disconnectionCommand = Radiobutton(SpecialCommandSec, text='Disconnection', variable=self.commandType, value=2, command = self.SelectDisconnection)
self.disconnectionCommand.place(x = 170, y = 0)
label_sc3 = Label(SpecialCommandSec, text = "Node of current module")
label_sc3.place(x = 10, y = 30)
self.node1Selection = ttk.Combobox(SpecialCommandSec, textvariable=self.nodeSelect1, width = 10)
self.node1Selection['values'] = ()
self.node1Selection.bind('<<ComboboxSelected>>',self.SelectNode1)
self.node1Selection.place(x = 170, y = 30)
label_sc4 = Label(SpecialCommandSec, text = "Module 2")
label_sc4.place(x = 10, y = 60)
self.module2Selection = ttk.Combobox(SpecialCommandSec, textvariable=self.module2Select, width = 10)
self.module2Selection['values'] = ()
self.module2Selection.bind('<<ComboboxSelected>>',self.SelectSecondModule)
self.module2Selection.place(x = 90, y = 60)
label_sc5 = Label(SpecialCommandSec, text = "Node of module 2")
label_sc5.place(x = 10, y = 90)
self.node2Selection = ttk.Combobox(SpecialCommandSec, textvariable=self.nodeSelect2, width = 10)
self.node2Selection['values'] = ()
self.node2Selection.place(x = 130, y = 90)
#---------------- Extra Information -------------------------
ExtraInfoSec = ttk.Labelframe(f1, text='Extra Information ', width = 400, height = 90)
ExtraInfoSec.place(x = 370, y = 300)
label7 = Label(ExtraInfoSec, text='Condition ')
label7.place(x = 10, y = 10)
Condition = Entry(ExtraInfoSec, textvariable=self.condition, width = 12)
Condition.place(x = 10, y = 35)
label8 = Label(ExtraInfoSec, text='Dependency ')
label8.place(x = 120, y = 10)
self.Dependency = ttk.Combobox(ExtraInfoSec, textvariable=self.dependency, width = 12) #, command = self.checkConnectivity
self.Dependency['values'] = ()
# self.Dependency.bind('<<ComboboxSelected>>',self.UpdateJoint)
self.Dependency.place(x = 120, y = 35)
label9 = Label(ExtraInfoSec, text='Expected elapsed time ')
label9.place(x = 240, y = 10)
ElapsTime = Entry(ExtraInfoSec, textvariable=self.elapstime, width = 10)
ElapsTime.place(x = 240, y = 35)
Timeoption = ttk.Combobox(ExtraInfoSec, textvariable = self.elapsedTimeType, width =5)
Timeoption['values'] = ('none','sec','msec')
Timeoption.set('none')
Timeoption.place(x = 330, y = 35)
#--------------- Command Records ---------------------------
label_f3_0 = Label(f3, text = "Select section: ")
label_f3_0.place(x = 10, y = 5)
self.keyFrame = ttk.Combobox(f3, textvariable=self.selectedkeyFrame, width = 10)
self.keyFrame.place(x = 105, y = 5)
self.keyFrame.bind('<<ComboboxSelected>>',self.UpdateFrameWindows)
self.panedWindow = PanedWindow(f3)
self.panedWindow.place(x = 0, y = 25, relheight = 0.95, width = window_width - 30)
self.scollForCommands = Scrollbar(f3)
self.scollForCommands.place(x = window_width - 30, y = 50, relheight = 0.90)
#---------------- Save Path -------------------------------
SavePathSec = ttk.Labelframe(f1, text='Save Path ', width = 400, height = 50)
SavePathSec.place(x = 370, y = 390)
Savepath = Entry(SavePathSec, textvariable=self.savepath, width = 38)
self.savepath.set(self.savePath)
Savepath.place(x = 10, y = 3)
selectPath = Button(SavePathSec, text = 'Select', command = self.SelectSavePath)
selectPath.place(x = 325, y = 0)
#----------------- Command History --------------------------
self.CommandHis = Listbox(f2, width=44, height=24,listvariable = self.commandhis)
self.CommandHis.bind('<<ListboxSelect>>', self.ModifyHistory)
CommandHisScroller = Scrollbar(f2, command=self.CommandHis.yview)
self.CommandHis.config(yscrollcommand=CommandHisScroller.set)
CommandHisScroller.place(x = 361, y = 40, height = 390)
self.CommandHis.place(x = 10, y = 40)
#----------------- Update Command --------------------------
UpdateCommandSec = ttk.Labelframe(f2, text='Update Command ', width = 370, height = 90)
UpdateCommandSec.place(x = 390, y = 40)
CommandEntry = Entry(UpdateCommandSec, textvariable=self.selectedcommand, width = 42)
CommandEntry.place(x = 10, y = 5)
self.CommandUpdateBtn = Button(UpdateCommandSec, text = "Update", command = self.UpdateSingleGaitEntry, state = DISABLED)
self.CommandUpdateBtn.place(x = 10, y = 35)
self.CommandDeleteBtn = Button(UpdateCommandSec, text = "Delete", command = self.DeleteSingleGait, state = DISABLED)
self.CommandDeleteBtn.place(x = 290, y = 35)
#---------------- Frame Based Operation --------------------
FrameOpSec = ttk.Labelframe(f2, text='Section Based Commands ', width = 370, height = 65)
FrameOpSec.place(x = 390, y = 140)
FramePlay = Button(FrameOpSec, text = "Play Current Section", state = DISABLED)
FramePlay.place(x = 5, y = 10)
self.FrameDelete = Button(FrameOpSec, text = "Delete All After Current Section", state = DISABLED)
self.FrameDelete.place(x = 152, y = 10)
# WarningLabel = Label(FrameOpSec, text = "Warning: Delete current frame will delte all the frames after current frame") # , font={"family":"Times", "size":8, "weight":"BOLD"}
# WarningLabel.place(x = 10, y = 40)
#--------------- Play All Button --------------------------
PlayallButton = Button(f2, text = "Play all the sections")
PlayallButton.place(x = window_width-Border_width-130, y = window_height-Border_hieht-5, anchor = SE)
#---------------- Open File Button ---------------------------
Openfile = Button(f1, text = "Open Configuration", command = self.AskOpenFile)
Openfile.place(x = window_width-Border_width-130, y = window_height-Border_hieht-5, anchor = SE)
#---------------- Open Existing Gait ---------------------------
self.OpenGait = Button(f1, text = "Open Gait File", command = self.OpenGaitFile, state = DISABLED)
self.OpenGait.place(x = window_width-Border_width-270, y = window_height-Border_hieht-5, anchor = SE)
#---------------- Save Button --------------------------------
self.saveButton = Button(f1, text="Save", command = self.SaveGaitTable, state = DISABLED)
self.saveButton.place(x = window_width-Border_width-65, y = window_height-Border_hieht-5, anchor = SE)
self.saveButton2 = Button(f2, text="Save", command = self.SaveGaitTable, state = DISABLED)
self.saveButton2.place(x = window_width-Border_width-65, y = window_height-Border_hieht-5, anchor = SE)
#---------------- Play Frame --------------------------------
self.Playframe = Button(f1, text = "Play Section", command = self.PlayFrame) #, state = DISABLED)
self.Playframe.place(x = 125, y = window_height-Border_hieht-5, anchor = SW)
#---------------- Reset Frame -------------------------------
self.Resetframe = Button(f1,text = "Reset", command = self.Reset)
self.Resetframe.place(x = 225, y = window_height-Border_hieht-5, anchor = SW)
#---------------- Add Frame --------------------------------
self.Addframe = Button(f1,text = "Save Section", command = self.SaveFrame, state = DISABLED)
self.Addframe.place(x = 285, y = window_height-Border_hieht-5, anchor = SW)
#----------------- Add Current Command ---------------------
self.Addcommand = Button(f1,text = "Add Command", command = self.AddGaitEntry, state = DISABLED, width = 11)
self.Addcommand.place(x = 5, y = window_height-Border_hieht-5, anchor = SW)
## Callback for close button
# @param self object pointer
def CloseWindow(self):
if self.initflag==0 or self.initflag==2:
self.communicator.Close()
if self.initflag==0:
self.rungzserver.terminate()
try:
self.rungzclient.terminate()
except Exception, e:
pass
call(["pkill", "gzserver"])
call(["pkill", "gzclient"])
self.quit()
## Initializaed the section, don't be fooled by the name
# @param self Object pointer
def InitialFrame(self):
self.currentFrame = Section("Section_0")
self.currentFrame.RecordCurrentPosition(self.ModuleList)
self.keyFrames = []
self.keyFrames.append(self.currentFrame)
self.DeleteAllWidgetsOnHisWindow()
self.keyFrameList.append(self.currentFrame.FrameID)
self.keyFrame["values"] = tuple(self.keyFrameList)
self.selectedkeyFrame.set("Section_0")
self.allCommands = []
#---------------- Open Configurations ----------------------
## Callback of Open Configuration button
# @param self Object pointer
def AskOpenFile(self):
self.file_opt['title'] = 'Open Configuration File'
filename = tkFileDialog.askopenfilename(**self.file_opt)
if filename == "":
return
# filename = "/home/edward/.gazebo/models/SMORES8Jack/InitialConfiguration"
# open file on your own
if filename:
print "Filename is : ",filename
# configFile = open(filename, 'r')
self.savePath = filename[0:filename.rfind("/")]
self.savepath.set(self.savePath)
self.ReadInConfiguration(filename)
self.OpenGait["state"] = NORMAL
## Parse the configuration file and build the configuration tree
# @param self Objetc pointer
# @param filename Name of the configuration file
def ReadInConfiguration(self, filename):
self.ModuleList = []
self.modulenames = []
self.ConnectionList = []
self.tree = ET.parse(filename)
root = self.tree.getroot()
print "Root is: ",root.tag
modules = root.find("modules")
# self.modulenames = []
for eachmodule in modules.findall('module') :
modelname = eachmodule.find('name').text
print "Module name: ",modelname
jointanglestr = eachmodule.find('joints').text
print "Joint angle: ",self.StringToList(jointanglestr)
positionstr = eachmodule.find('position').text
newmodule = Module(modelname, self.StringToList(jointanglestr),self.StringToList(positionstr))
self.ModuleList.append(newmodule)
self.modulenames.append(modelname)
connections = root.find("connections")
for eachconnection in connections.findall('connection') :
model1name = eachconnection.find("module1").text
model2name = eachconnection.find("module2").text
node1 = int(eachconnection.find("node1").text)
node2 = int(eachconnection.find("node2").text)
newconnection = Connection(self.GetModuleByName(model1name),self.GetModuleByName(model2name),node1,node2)
self.ConnectionList.append(newconnection)
self.GetModuleByName(model1name).connection(node1,self.ConnectionList[-1])
self.GetModuleByName(model2name).connection(node2,self.ConnectionList[-1])
self.name['values'] = tuple(self.modulenames)
if self.initflag==0 or self.initflag==2:
self.SendLoadConfigurationMessage(filename)
if self.initflag==0:
try:
self.rungzclient.terminate()
except Exception, e:
pass
time.sleep(2)
self.rungzclient = Popen(['gzclient'], stdout=PIPE)
self.InitialFrame()
## Creates a open configuration request and publish to worldplugin
# @param self Object pointer
# @param configure_path Path of the configuration file
def SendLoadConfigurationMessage(self,configure_path):
newmessage = GaitRecMessage()
newmessage.ModelName = "Module_0"
newmessage.NewFrame = False
newmessage.PlayStatus = False
newmessage.LoadConfiguration = True
newmessage.ExtrInfo = configure_path
if self.initflag==0 or self.initflag==2:
self.gaitPub.Publish(newmessage)
#---------------- Module Selection ----------------------
## Callback of Select Model Combobox
# @param self Object pointer
# @param args Other arguments
def UpdateJoint(self,*args):
modelname = self.modelname.get()
moduleObj = self.GetModuleByName(modelname)
self.UpdateJointValue()
# self.elapstime.set(0.0)
self.jointAngleDifferenceTracking = [0]*4
self.Addcommand["state"] = NORMAL
self.ResetAssociateWindow()
# self.Disconnect["state"] = NORMAL
#---------------- Utility functions --------------------
## Get module object by specifying the name
# @param self Object pointer
# @param modelname String: Name of the model
# @return if there is a model return the model object, otherwise return False
def GetModuleByName(self,modelname):
for eachmodule in self.ModuleList:
if eachmodule.ModelName == modelname:
return eachmodule
return False
## Refresh the dependency list
# @param self Object pointer
def RefreshDependencyList(self):
self.Dependency['values'] = tuple(self.DependencyList)
## Convert a string that has multiple values to tuple
# @param self Object pointer
# @param anglestring A string that has values separated by spaces
# @return Tuple of those values
def StringToTuple(self, anglestring):
jointangles = [float(x) for x in anglestring.split()]
return tuple(jointangles)
## Convert a string that has multiple values to list
# @param self Object pointer
# @param anglestring A string that has values separated by spaces
# @return List of those values
def StringToList(self, anglestring):
jointangles = [float(x) for x in anglestring.split()]
return jointangles
#--------------- Add Gait Table ------------------------
## Callback for Add Command Button
# @param self Object pointer
def AddGaitEntry(self):
if self.elapsedTimeType.get() == "sec" :
currenttimer = int(self.elapstime.get()*1000)
elif self.elapsedTimeType.get() == "msec" :
currenttimer = int(self.elapstime.get())
elif self.elapsedTimeType.get() == "none" :
currenttimer = 0
if not self.condition.get() in self.DependencyList and \
len(self.condition.get()) > 0:
self.DependencyList.append(self.condition.get())
print "Dependency list: ",self.DependencyList
if self.commandType.get() == 0:
module_list_of_gait = []
module_list_of_gait.append(self.modelname.get())
for each_associate in self.frontWheelAssociates:
if not each_associate.ModuleName in module_list_of_gait:
module_list_of_gait.append(each_associate.ModuleName)
for each_associate in self.lftWheelAssociates:
if not each_associate.ModuleName in module_list_of_gait:
module_list_of_gait.append(each_associate.ModuleName)
for each_associate in self.rgtWheelAssociates:
if not each_associate.ModuleName in module_list_of_gait:
module_list_of_gait.append(each_associate.ModuleName)
for each_associate in self.centralBendAssociates:
if not each_associate.ModuleName in module_list_of_gait:
module_list_of_gait.append(each_associate.ModuleName)
#moduleObj = self.GetModuleByName(module_id)
for each_module_name in module_list_of_gait:
module_obj = self.GetModuleByName(each_module_name)
jointsflags = [self.typeSelection.get()]*4
newgaits = GaitEntry(each_module_name,module_obj.JointAngle[:],currenttimer, \
self.dependency.get(),self.condition.get(),False,jointsflags)
# self.CurrentFrameRec.append(newgaits)
self.currentFrame.AddGaitToSection(newgaits)
self.allCommands.append(newgaits.GaitToStr())
self.commandhis.set(tuple(self.allCommands))
self.AddGaitToCurrentFrame(newgaits)
elif self.commandType.get() == 1:
self.ConnectSend(currenttimer)
elif self.commandType.get() == 2:
self.DisconnectSend(currenttimer)
self.RefreshDependencyList()
#self.RefreshGaitRecorder()
self.jointAngleDifferenceTracking = [0]*4
self.name["state"] = NORMAL
self.saveButton["state"] = NORMAL
self.saveButton2["state"] = NORMAL
self.Addframe["state"] = NORMAL
self.Addcommand["state"] = DISABLED
self.Resetframe["state"] = NORMAL
## Add gait to current section and display them in the Manage Gait Table tag
# @param self Object pointer
# @param gait_obj A gait object
def AddGaitToCurrentFrame(self,gait_obj):
if not gait_obj.ModuleName in self.currentFrame.ModulesInThisFrame:
self.currentFrame.AddNewChangedModuleToFrame(gait_obj)
new_rec_frame = Frame(self.panedWindow)
if len(self.frameListForGaitRecord)>0 and \
len(self.frameListForGaitRecord)<=8 and \
len(self.frameListForGaitRecord)>len(self.frameButtonList):
self.frameListForGaitRecord[-1].destroy()
self.frameListForGaitRecord.append(new_rec_frame)
self.panedWindow.add(self.frameListForGaitRecord[-1], minsize = 10, width = 100)
new_button = Button(self.frameListForGaitRecord[-1], \
text = gait_obj.ModuleName, height = 1, \
command = lambda: self.ResizePanelSize(len(self.frameButtonList)+1))
new_button.place(x = 0, y = 0, relwidth = 1)
self.frameButtonList.append(new_button)
list_var =StringVar()
self.frameListVars.append(list_var)
new_list = Listbox(self.frameListForGaitRecord[-1], \
listvariable=self.frameListVars[-1])
new_list.place(x = 0, y = 25, relwidth = 1, height = 460)
new_list.config(yscrollcommand=self.scollForCommands.set)
if len(self.currentFrame.ModulesInThisFrame) == 0:
self.scollForCommands["command"] = new_list.yview
self.frameListList.append(new_list)
self.currentFrame.AddGaitToModule(gait_obj)
self.frameListVars[-1].set(tuple(self.currentFrame.GaitStrListOfModule[-1]))
# self.currentFrame.GaitStrListOfModule.append([gait_obj.GaitToStr()])
if len(self.frameListForGaitRecord)<8:
occupied_frame = Frame(self.panedWindow)
self.frameListForGaitRecord.append(occupied_frame)
self.panedWindow.add(self.frameListForGaitRecord[-1], minsize = 10)
else:
self.currentFrame.AddNewChangedModuleToFrame(gait_obj)
list_idx = self.currentFrame.ModulesInThisFrame.index(gait_obj.ModuleName)
# self.currentFrame.GaitStrListOfModule[list_idx].append(gait_obj.GaitToStr())
self.currentFrame.AddGaitToModule(gait_obj,list_idx)
gait_list = self.currentFrame.GaitStrListOfModule[list_idx]
self.frameListVars[list_idx].set(tuple(gait_list))
## Send disconnect message to worldplugin
# @param self Object pointer
# @param time_interval How long this command will be executed
def DisconnectSend(self, time_interval):
newmessage = GaitRecMessage()
newmessage.ModelName = self.modelname.get()
newmessage.NewFrame = False
newmessage.PlayStatus = True
the_connection = self.GetModuleByName(self.modelname.get()).nodes[self.nodeSelect1.get()]
connection_idx = self.ConnectionList.index(the_connection)
del self.ConnectionList[connection_idx]
self.GetModuleByName(self.modelname.get()).nodes[self.nodeSelect1.get()] = []
self.GetModuleByName(self.module2Select.get()).node[self.nodeSelect2.get()] = []
if len(self.module2Select.get())>0:
newmessage.ExtrInfo = "$ - &"+self.modelname.get()+" &"+self.module2Select.get()
else:
newmessage.ExtrInfo = "$ - &"+self.modelname.get()+" "+"&X"
if time_interval != 0 :
newmessage.ExtrInfo += " [" + str(time_interval) +"]"
if self.condition.get() != "" :
newmessage.ExtrInfo += " {" + self.condition.get() +"}"
if self.dependency.get() != "" :
newmessage.ExtrInfo += " (" + self.dependency.get() +")"
newmessage.ExtrInfo += " ;"
if self.initflag==0 or self.initflag==2:
self.gaitPub.Publish(newmessage)
newgaits = GaitEntry(self.modelname.get(),[0,0,0,0],self.elapstime.get(),self.dependency.get(),self.condition.get())
newgaits.AddExtraInfo(newmessage.ExtrInfo)
newgaits.SpecialEntry = True
self.AddGaitToCurrentFrame(newgaits)
## Send connect message to worldplugin
# @param self Object pointer
# @param time_interval How long this command will be executed
def ConnectSend(self, time_interval):
if len(self.modelname.get())>0 and len(self.module2Select.get())>0 and len(self.nodeSelect1.get())>0 and len(self.nodeSelect2.get())>0:
newmessage = GaitRecMessage()
newmessage.ModelName = self.modelname.get()
newmessage.NewFrame = False
newmessage.PlayStatus = True
new_connection = Connection(self.GetModuleByName(self.modelname.get()), \
self.GetModuleByName(self.module2Select.get()),self.nodeSelect1.get(), \
self.nodeSelect2.get())
self.ConnectionList.append(new_connection)
self.GetModuleByName(self.modelname.get()).nodes[self.nodeSelect1.get()] = self.ConnectionList[-1]
self.GetModuleByName(self.module2Select.get()).node[self.nodeSelect2.get()] = self.ConnectionList[-1]
newmessage.ExtrInfo = "$ + &"+self.modelname.get()+" #"+self.nodeSelect1.get()+" &"+self.module2Select.get()+" #"+self.nodeSelect2.get()
if time_interval != 0 :
newmessage.ExtrInfo += " [" + str(time_interval) +"]"
if self.condition.get() != "" :
newmessage.ExtrInfo += " {" + self.condition.get() +"}"
if self.dependency.get() != "" :
newmessage.ExtrInfo += " (" + self.dependency.get() +")"
newmessage.ExtrInfo += " ;"
if self.initflag==0 or self.initflag==2:
self.gaitPub.Publish(newmessage)
newgaits = GaitEntry(self.modelname.get(),[0,0,0,0],self.elapstime.get(),self.dependency.get(),self.condition.get())
newgaits.AddExtraInfo(newmessage.ExtrInfo)
newgaits.SpecialEntry = True
self.AddGaitToCurrentFrame(newgaits)
## Delets all the widgets on the second tag page
# @param self Object pointer
def DeleteAllWidgetsOnHisWindow(self):
for each_widget in self.frameListForGaitRecord:
each_widget.destroy()
for each_widget in self.frameButtonList:
each_widget.destroy()
for each_widget in self.frameListList:
each_widget.destroy()
self.frameListVars = []
self.scollForCommands["command"] = ()
## Click the button on the top to resize the panel size
# It doesn't work
# @param self Object pointer
# @param button_idx Integer: index of the panel
def ResizePanelSize(self, button_idx):
print "Button index is ", button_idx
self.panedWindow.paneconfigure(self.frameListForGaitRecord[button_idx], \
width = 100)
## Converts gait object to string
# @param self Object pointer
# @param gaittableobj A GaitEntry object
# @return Gait string
def GaitObjToStr(self,gaittableobj):
if gaittableobj.SpecialEntry :
gaitstr = gaittableobj.ModuleName
return gaitstr
else:
gaitstr = ""
gaitstr += gaittableobj.ModuleName+" "
for i in xrange(4):
if gaittableobj.AngleFlags[i] == 0:
gaitstr+= "p"
elif gaittableobj.AngleFlags[i] == 1:
gaitstr+= "s"
elif gaittableobj.AngleFlags[i] == 2:
gaitstr+= "t"
gaitstr+= str(gaittableobj.Joints[i])+" "
if gaittableobj.Timer > 0:
gaitstr+= "["+str(gaittableobj.Timer)+"] "
if len(gaittableobj.condition_id) > 0 :
gaitstr+= "{"+gaittableobj.condition_id+"} "
if len(gaittableobj.dependency_id) > 0 :
gaitstr+= "("+gaittableobj.dependency_id+") "
gaitstr+=";"
return gaitstr
#---------------- Save & Reset & Play Frames ---------------
## Saves current section, save section button callback
# @param self Object pointer
def SaveFrame(self):
framenum = len(self.keyFrameList)
while True:
if not "Section_"+str(framenum) in self.DependencyList:
break
else:
framenum += 1
for each_indep_module in self.currentFrame.ModulesInThisFrame:
for each_gait in reversed(self.currentFrame.GaitEntries):
if each_gait.ModuleName == each_indep_module:
if each_gait.condition_id == "":
each_gait.condition_id = self.currentFrame.FrameID
break
self.currentFrame = Section("Section_"+str(framenum))
self.currentFrame.RecordCurrentPosition(self.ModuleList)
self.keyFrames.append(self.currentFrame)
self.DeleteAllWidgetsOnHisWindow()
self.keyFrameList.append(self.currentFrame.FrameID)
self.keyFrame["values"] = tuple(self.keyFrameList)
self.selectedkeyFrame.set("Section_"+str(framenum))
self.DependencyList.append("Section_"+str(framenum-1))
self.RefreshDependencyList()
self.dependency.set("Section_"+str(framenum-1))
self.Resetframe["state"] = DISABLED
# self.keyFrameList.append(self.CurrentFrameRec)
# self.CurrentFrameRec = []
# self.RefreshGaitRecorder()
self.UpdateFrameBox()
self.SaveCurrentPose()
## Updates the section selction ComboBox
# @param self Object pointer
def UpdateFrameBox(self):
self.allCommands = []
for each_section in self.keyFrames:
for each_entry in each_section.GaitEntries:
self.allCommands.append(each_entry.GaitToStr())
# self.Framename['values'] = tuple(frameliststr)
self.commandhis.set(tuple(self.allCommands))
## Resets the current simulation world to the begin of the section
# @param self Object pointer
def Reset(self):
newmessage = GaitRecMessage()
if self.currentFrame.GaitEntries:
newmessage.ModelName = self.currentFrame.GaitEntries[0].ModuleName
else:
newmessage.ModelName = "Module_0"
newmessage.NewFrame = False
newmessage.PlayStatus = True
newmessage.ResetFlag = True
if self.initflag==0 or self.initflag==2:
self.gaitPub.Publish(newmessage)
self.Playframe["state"] = NORMAL
# self.Resetframe["state"] = DISABLED
if self.commandType.get() == 0:
self.GetModuleByName(self.modelname.get()).JointAngle[0] \
-= self.jointAngleDifferenceTracking[0]
for each_associate in self.frontWheelAssociates:
self.GetModuleByName(each_associate.ModuleName).JointAngle[each_associate.Node] \
-= self.jointAngleDifferenceTracking[0]*each_associate.Ratio*self.InterpretCorrelation(each_associate.Correlation)
self.GetModuleByName(self.modelname.get()).JointAngle[1] \
-= self.jointAngleDifferenceTracking[1]
for each_associate in self.lftWheelAssociates:
self.GetModuleByName(each_associate.ModuleName).JointAngle[each_associate.Node] \
-= self.jointAngleDifferenceTracking[1]*each_associate.Ratio*self.InterpretCorrelation(each_associate.Correlation)
self.GetModuleByName(self.modelname.get()).JointAngle[2] \
-= self.jointAngleDifferenceTracking[2]
for each_associate in self.rgtWheelAssociates:
self.GetModuleByName(each_associate.ModuleName).JointAngle[each_associate.Node] \
-= self.jointAngleDifferenceTracking[2]*each_associate.Ratio*self.InterpretCorrelation(each_associate.Correlation)
self.GetModuleByName(self.modelname.get()).JointAngle[3] \
-= self.jointAngleDifferenceTracking[3]
for each_associate in self.centralBendAssociates:
self.GetModuleByName(each_associate.ModuleName).JointAngle[each_associate.Node] \
-= self.jointAngleDifferenceTracking[3]*each_associate.Ratio*self.InterpretCorrelation(each_associate.Correlation)
self.UpdateJointValue()
print "Reset message sent"
## Plays the current section
# @param self Object pointer
def PlayFrame(self):
for eachgait in self.currentFrame.GaitEntries :
if not eachgait.SpecialEntry:
self.PublishMessage(eachgait,True)
else:
self.PublishMessageSpecial(eachgait,True)
# self.Playframe["state"] = DISABLED
print "All information published"
self.Resetframe["state"] = NORMAL
## Generates and publish save current section command to worldplugin
# @param self Object pointer
def SaveCurrentPose(self):
newmessage = GaitRecMessage()
newmessage.ModelName = "SaveFrame"
newmessage.NewFrame = True
newmessage.PlayStatus = True
if self.initflag==0 or self.initflag==2:
self.gaitPub.Publish(newmessage)
## Updates the panel window after selecting a section
# @param self Object pointer
# @param args Other variables
def UpdateFrameWindows(self,*args):
print "Runs update frame window"
self.DeleteAllWidgetsOnHisWindow()
frame_id = self.selectedkeyFrame.get()
idx = self.keyFrameList.index(frame_id)
print "Section index: ",idx
self.currentFrame = self.keyFrames[idx]
self.currentFrame.EmptyTheContainer()
for each_gait in self.keyFrames[idx].GaitEntries:
print "gait takes forever"
self.AddGaitToCurrentFrame(each_gait)
#----------- Manually Update Section -----------------
## Callback when you click a gait entry in the third tag ("Gait Records")
# @param self Object pointer
# @param args Other variables
def ModifyHistory(self,*args):
self.historyidx = int(self.CommandHis.curselection()[0])
print "Select item: ",self.historyidx
self.selectedcommand.set(self.allCommands[self.historyidx])
self.CommandDeleteBtn["state"] = NORMAL
self.CommandUpdateBtn["state"] = NORMAL
## Update a GaitEntry object when you finish editing
# Callback of the Update button in the third tag ("Gait Records")
# @param self Object pointer
def UpdateSingleGaitEntry(self):
newsinglegait = self.selectedcommand.get()
self.allCommands[self.historyidx] = newsinglegait
updatedgait = self.InterpretGaitString(newsinglegait)
# self.CurrentFrameHis[self.historyidx] = updatedgait
(modified_gait,gaits,idx) = self.FindGaitEntryBasedOnIdx(self.historyidx)
gaits[idx] = updatedgait
self.UpdateFrameBox()
self.saveButton["state"] = NORMAL
self.saveButton2["state"] = NORMAL
## Delete a GaitEntry object after you hit the delete button
# Callback of the Delete button in the third tag ("Gait Records")
# @param self Object pointer
def DeleteSingleGait(self):
del self.allCommands[self.historyidx]
(modified_gait,gaits,idx) = self.FindGaitEntryBasedOnIdx(self.historyidx)
del gaits[idx]
self.selectedcommand.set("")
self.UpdateFrameBox()
self.saveButton["state"] = NORMAL
self.saveButton2["state"] = NORMAL
self.CommandDeleteBtn["state"] = DISABLED
self.CommandUpdateBtn["state"] = DISABLED
## Finds the GaitEntry object by index
# for update and delete gait in the Gait Records window
# @param self Object pointer
# @param gait_idx Integer, the index number of the gait
# @return (the GaitEntry object, Section GaitEntry List, And Section GaitEntry index)
def FindGaitEntryBasedOnIdx(self,gait_idx):
before_substr = gait_idx
for each_section in self.keyFrames:
after_substr = before_substr-len(each_section.GaitEntries)
if before_substr>=0 and after_substr<0:
return (each_section.GaitEntries[before_substr],each_section.GaitEntries, \
before_substr)
else:
before_substr = after_substr
# def CommandRecModify(self):
# self.recidx = int(self.CommandRec.curselection()[0])
# gaitentryobj = self.CurrentFrameRec[self.recidx]
# if gaitentryobj.ModuleName[0] == "-" or gaitentryobj.ModuleName[0] == "+" :
# tmpstring = gaitentryobj.ModuleName[2:]
# self.modelname.set(tmpstring[0:tmpstring.find(" ")])
# tmpstring = gaitentryobj.ModuleName[tmpstring.find(" ")+1:]
# self.othermodelname.set(tmpstring[0:tmpstring.find(" ")])
# else:
# self.Joint3.set(gaitentryobj.Joints[3]/PI*180)
# # self.front_angle.set(gaitentryobj.Joints[0]/PI*180)
# # self.left_angle.set(gaitentryobj.Joints[1]/PI*180)
# # self.right_angle.set(gaitentryobj.Joints[2]/PI*180)
# # self.group.set(gaitentryobj.Group)
# self.elapstime.set(gaitentryobj.Timer/1000.0)
# self.modelname.set(gaitentryobj.ModuleName)
# if gaitentryobj.AngleFlags[0] == 0:
# self.frontModA.select()
# self.frontangle.set(gaitentryobj.Joints[0]/PI*180)
# self.frontspeed.set(0)
# else:
# self.frontspeed.set(gaitentryobj.Joints[0])
# self.frontangle.set(0)
# self.frontModS.select()
# if gaitentryobj.AngleFlags[1] == 0:
# self.WheelModA.select()
# self.left_angle.set(gaitentryobj.Joints[1]/PI*180)
# self.right_angle.set(gaitentryobj.Joints[2]/PI*180)
# self.leftspeed.set(0)
# self.rightspeed.set(0)
# else:
# self.left_angle.set(0)
# self.right_angle.set(0)
# self.leftspeed.set(gaitentryobj.Joints[1])
# self.rightspeed.set(gaitentryobj.Joints[2])
# self.WheelModS.select()
# self.saveButton["state"] = NORMAL
# self.saveButton2["state"] = NORMAL
# def CommandRecSave(self):
# gaitentryobj = self.CurrentFrameRec[self.recidx]
# if gaitentryobj.ModuleName[0] == "-" or gaitentryobj.ModuleName[0] == "+" :
# tmpstring = gaitentryobj.ModuleName[2:]
# namestring1 = gaitentryobj.ModuleName[2:tmpstring.find(" ")]
# tmpstring = gaitentryobj.ModuleName[tmpstring.find(" ")+1:]
# tmpstring = gaitentryobj.ModuleName[tmpstring.find(" ")+1:]
# gaitentryobj.ModuleName == gaitentryobj.ModuleName[0:2]+namestring1+" "+self.othermodelname.get()+" "+tmpstring
# else:
# gaitentryobj.Joints[3] = self.Joint3.get()/180.0*PI
# if self.frontmode.get() == 0 :
# gaitentryobj.AngleFlags[0] = 0
# gaitentryobj.Joints[0] = self.frontangle.get()/180.0*PI
# else:
# gaitentryobj.AngleFlags[0] = 1
# gaitentryobj.Joints[0] = self.frontspeed.get()
# if self.wheelmode.get() == 0 :
# gaitentryobj.AngleFlags[1] = 0
# gaitentryobj.AngleFlags[2] = 0
# gaitentryobj.Joints[1] = self.left_angle.get()/180.0*PI
# gaitentryobj.Joints[2] = self.right_angle.get()/180.0*PI
# else:
# gaitentryobj.AngleFlags[1] = 1
# gaitentryobj.AngleFlags[2] = 1
# gaitentryobj.Joints[1] = self.leftspeed.get()
# gaitentryobj.Joints[2] = self.rightspeed.get()
# # gaitentryobj.GroupIncr = self.group.get() - gaitentryobj.Group + gaitentryobj.GroupIncr
# # gaitentryobj.Group = self.group.get()
# gaitentryobj.Timer = int(self.elapstime.get()*1000)
# # self.RefreshGaitRecorder()
# def CommandRecDelete(self):
# self.recidx = int(self.CommandRec.curselection()[0])
# del self.CurrentFrameRec[self.recidx]
# # self.RefreshGaitRecorder()
# self.saveButton["state"] = NORMAL
# self.saveButton2["state"] = NORMAL
# if len(self.CurrentFrameRec)>0:
# self.Addframe["state"] = NORMAL
# else:
# self.Addframe["state"] = DISABLED
## Publish common command message
# Control joint position, speed or torque
# @param self Object Pointer
# @param eachgaittable A GaitEntry object
# @param playstate Bool, True: for playmode, update model state using gait string;
# False: update model state immediately
def PublishMessage(self,eachgaittable,playstate):
newmessage = GaitRecMessage()
newmessage.ModelName = eachgaittable.ModuleName
newmessage.NewFrame = False # self.Newframe
newmessage.PlayStatus = playstate
for i in xrange(4):
newmessage.JointAngles.append(eachgaittable.Joints[i])
# print newmessage.ModelName,":",newmessage.JointAngles
newmessage.Timer = eachgaittable.Timer
newmessage.Condition = eachgaittable.condition_id
newmessage.Dependency = eachgaittable.dependency_id
for i in xrange(4):
newmessage.Flags.append(eachgaittable.AngleFlags[i])
# print "Listeners are ",self.publisher.showlisteners()
# self.newconnection.sendData(newmessage)
# self.publisher.publish(newmessage)
if self.initflag==0 or self.initflag==2:
self.gaitPub.Publish(newmessage)
# eventlet.sleep(1.0)
print "Information published"
## Publish special command messgae
# Send connection and disconnection message
# @param self Object Pointer
# @param eachgaittable A GaitEntry object
# @param playstate Bool, True: for playmode, update model state using gait string;
# False: update model state immediately
def PublishMessageSpecial(self,eachgaittable,playstate):
newmessage = GaitRecMessage()
newmessage.ModelName = eachgaittable.ModuleName
newmessage.NewFrame = False # self.Newframe
newmessage.PlayStatus = playstate
# for i in xrange(4):
# newmessage.JointAngles.append(eachgaittable.Joints[i])
newmessage.Timer = eachgaittable.Timer
newmessage.Condition = eachgaittable.condition_id
newmessage.Dependency = eachgaittable.dependency_id
newmessage.ExtrInfo = eachgaittable.ExtraInfo
# for i in xrange(3):
# newmessage.Flags.append(eachgaittable.AngleFlags[i])
# print "Listeners are ",self.publisher.showlisteners()
# self.newconnection.sendData(newmessage)
# self.publisher.publish(newmessage)
if self.initflag==0 or self.initflag==2:
self.gaitPub.Publish(newmessage)
# eventlet.sleep(1.0)
print "Information published"
#----------------- Save & Load Gait Table -------------------
## Write recorded gait table to file
# @param self Object Pointer
def SaveGaitTable(self):
# Need a regular expression
# commandpath = self.savepath.get()
# if commandpath[-1] != "/":
# f = open(commandpath, 'w')
# else:
# f = open(commandpath+"Commands", 'w')
if not os.path.isdir(self.savepath.get()):
os.system("mkdir "+self.savepath.get())
if os.path.isdir(self.savepath.get()):
self.savePath = self.savepath.get()
print "path is ",self.savePath
command_file_opt = {}
command_file_opt['filetypes'] = [('smart gait table', '.gait'),('text files', '.txt'),('all files', '*')]
command_file_opt["defaultextension"] = ".gait"
command_file_opt['initialdir'] = self.savePath
command_file_opt['parent'] = self.parent
command_file_opt['title'] = 'Save Gait Command'
f = tkFileDialog.asksaveasfile(mode='w', **command_file_opt)
if f is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
GaitStringList = []
for eachframe in self.keyFrames:
for eachentry in eachframe.GaitEntries:
GaitStringList.append(eachentry.GaitToStr()+'\n')
f.writelines(GaitStringList)
f.close()
print "Gait saved"
self.saveButton["state"] = DISABLED
self.saveButton2["state"] = DISABLED
## Open and read in a gait file
# @param self Object Pointer
def OpenGaitFile(self):
self.file_opt['title'] = 'Open Gait Command'
filename = tkFileDialog.askopenfilename(**self.file_opt)
if filename == '':
return
# filename = "/home/edward/Simulation Plugins/GaitRecorder/pythonGUI/Commands"
gaitfile = open(filename, 'r')
# print gaitfile.readlines()
self.currentFrame = Section("Section_0")
self.currentFrame.RecordCurrentPosition(self.ModuleList)
self.keyFrames = []
self.keyFrames.append(self.currentFrame)
self.DeleteAllWidgetsOnHisWindow()
for eachlines in gaitfile.readlines():
# print eachlines[0:-1]
if eachlines.find("//") != -1:
eachlines = eachlines[0:eachlines.find("//")]
semicolon_index = eachlines.find(";")
while semicolon_index != -1:
newgait = self.InterpretGaitString(eachlines[0:semicolon_index])
eachlines = eachlines[semicolon_index+1:]
semicolon_index = eachlines.find(";")
newcondition = newgait.condition_id
if not newcondition in self.DependencyList and len(newcondition)>0:
self.DependencyList.append(newcondition)
self.currentFrame.AddGaitToSection(newgait)
self.allCommands.append(newgait.GaitToStr())
self.commandhis.set(tuple(self.allCommands))
self.AddGaitToCurrentFrame(newgait)
# print "Dependency list: ",self.DependencyList
framenum = len(self.keyFrameList)
while True:
if not "Section_"+str(framenum) in self.DependencyList:
break
else:
framenum += 1
self.currentFrame.FrameID = "Section_"+str(framenum)
self.keyFrameList = [self.currentFrame.FrameID]
self.keyFrame["values"] = tuple(self.keyFrameList)
self.selectedkeyFrame.set("Section_"+str(framenum))
self.RefreshDependencyList()
# self.RefreshGaitRecorder()
self.saveButton["state"] = NORMAL
self.saveButton2["state"] = NORMAL
self.Addframe["state"] = NORMAL
## Convert a gait string to a GaitEntry object
# @param self Object Pointer
# @param gaitstring A gait command string
# @return A GaitEntry object
def InterpretGaitString(self,gaitstring):
# print "gait string: ",gaitstring
if gaitstring.find("[") != -1:
timer = int(gaitstring[gaitstring.find("[")+1:gaitstring.find("]")])
else:
timer = 0
if gaitstring.find("{") != -1:
condition = gaitstring[gaitstring.find("{")+1:gaitstring.find("}")]
else:
condition = ""
if gaitstring.find("(") != -1:
dependency = gaitstring[gaitstring.find("(")+1:gaitstring.find(")")]
else:
dependency = ""
if gaitstring.find("$") != -1 :
and_idx = gaitstring.find("&")
module_name = gaitstring[and_idx+1:gaitstring.find(" ",and_idx)]
return GaitEntry(module_id = module_name,jointangles = [0,0,0,0], \
timer = 0,dependency = dependency, condition = condition, \
special = True, extra_info = gaitstring)
else:
gaitstring = gaitstring.strip()
fields = gaitstring.split()
# for x in range(len(gaitstring)):
# if gaitstring[x] != " ":
# start_idx = x
# break
# idx = gaitstring.find(" ",start_idx)
modelname = fields[0]
# gaitstring = gaitstring[idx+1:]
joints = []
jointsflags = []
for i in xrange(1,5):
value_candidate = fields[i]
if value_candidate[0] == "p":
jointsflags.append(0)
elif value_candidate[0] == "s" :
jointsflags.append(1)
elif value_candidate[0] == "t" :
jointsflags.append(2)
elif value_candidate[0] == "i" :
jointsflags.append(3)
elif value_candidate[0] == "c" :
jointsflags.append(4)
elif value_candidate[0] == "d" :
jointsflags.append(5)
if value_candidate[1:] == "":
joints.append(0.0)
else:
joints.append(float(value_candidate[1:]))
# gaitstring = gaitstring[idx+1:]
# # print "joint size: ", len(joints)
return GaitEntry(modelname,joints,timer,dependency,condition, \
False,jointsflags[0:4])
#----------------- Common Command Related -------------------
## Callback function of the scale bar moving event
# Update the joint angle in simulator when slide the scale bar
# @param self Object pointer
# @args Other arguements
def DynamicUpdate(self, *args):
if len(self.modelname.get()) > 0 :
message_queue = []
newmessage = GaitRecMessage()
newmessage.ModelName = self.modelname.get()
newmessage.NewFrame = False
newmessage.PlayStatus = False
the_module = self.GetModuleByName(self.modelname.get())
diff = self.valueSetting.get()/180.0*PI - \
the_module.JointAngle[self.jointSelection.get()]
the_module.JointAngle[self.jointSelection.get()] = \
self.valueSetting.get()/180.0*PI
for i in xrange(4):
newmessage.JointAngles.append(the_module.JointAngle[i])
message_queue.append(newmessage)
for each_associates in self.currentAssociates:
if not self.CheckTheExistingCommand(each_associates.ModuleName, message_queue):
newmessage = GaitRecMessage()
newmessage.ModelName = each_associates.ModuleName
newmessage.NewFrame = False
newmessage.PlayStatus = False
the_module = self.GetModuleByName(each_associates.ModuleName)
the_module.JointAngle[each_associates.Node] = \
the_module.JointAngle[each_associates.Node] + \
diff*each_associates.Ratio* \
self.InterpretCorrelation(each_associates.Correlation)
for i in xrange(4):
newmessage.JointAngles.append(the_module.JointAngle[i])
message_queue.append(newmessage)
else:
the_messgae = self.CheckTheExistingCommand( \
each_associates.ModuleName, message_queue)
the_module = self.GetModuleByName(the_messgae.ModelName)
the_module.JointAngle[each_associates.Node] = \
the_module.JointAngle[each_associates.Node] + \
diff*each_associates.Ratio* \
self.InterpretCorrelation(each_associates.Correlation)
for i in xrange(4):
newmessage.JointAngles.append(the_module.JointAngle[i])
if self.initflag==0 or self.initflag==2:
for each_message in message_queue:
self.gaitPub.Publish(each_message)
print "Angle Updating"
self.Addcommand["state"] = NORMAL
self.jointAngleDifferenceTracking[self.jointSelection.get()] += diff
if sum([abs(x) for x in self.jointAngleDifferenceTracking]) > 1.0/180.0*PI:
print "Angle difference is ", diff
self.name["state"] = DISABLED
else:
self.name["state"] = NORMAL
if self.jointSelection.get() != 3 and self.typeSelection.get() == 0:
if self.valueSetting.get() == self.valueSetting["to"]:
self.valueSetting["to"] = self.valueSetting["to"] + 90
self.valueSetting["from"] = self.valueSetting["from"] + 90;
self.valueSetting.set(self.valueSetting["to"] - 180)
time.sleep(0.3)
if self.valueSetting.get() == self.valueSetting["from"]:
self.valueSetting["to"] = self.valueSetting["to"] - 90
self.valueSetting["from"] = self.valueSetting["from"] - 90
self.valueSetting.set(self.valueSetting["from"] + 180)
time.sleep(0.3)
self.valueInBox.set(self.valueSetting.get())
if self.jointSelection.get() == 3 and self.typeSelection.get() == 0:
self.valueInBox.set(self.valueSetting.get())
## Callback function binded to the entry box next to scale with enter key
# @param self Object pointer
# @param args Other arguements
def UpdateFromValueBox(self, *args):
if len(self.modelname.get()) > 0 :
message_queue = []
newmessage = GaitRecMessage()
newmessage.ModelName = self.modelname.get()
newmessage.NewFrame = False
newmessage.PlayStatus = False
the_module = self.GetModuleByName(self.modelname.get())
diff = self.valueInBox.get()/180.0*PI - \
the_module.JointAngle[self.jointSelection.get()]
the_module.JointAngle[self.jointSelection.get()] = \
self.valueInBox.get()/180.0*PI
for i in xrange(4):
newmessage.JointAngles.append(the_module.JointAngle[i])
message_queue.append(newmessage)
for each_associates in self.currentAssociates:
if not self.CheckTheExistingCommand(each_associates.ModuleName, message_queue):
newmessage = GaitRecMessage()
newmessage.ModelName = each_associates.ModuleName
newmessage.NewFrame = False
newmessage.PlayStatus = False
the_module = self.GetModuleByName(each_associates.ModuleName)
the_module.JointAngle[each_associates.Node] = \
the_module.JointAngle[each_associates.Node] + \
diff*each_associates.Ratio* \
self.InterpretCorrelation(each_associates.Correlation)
for i in xrange(4):
newmessage.JointAngles.append(the_module.JointAngle[i])
message_queue.append(newmessage)
else:
the_messgae = self.CheckTheExistingCommand( \
each_associates.ModuleName, message_queue)
the_module = self.GetModuleByName(the_messgae.ModelName)
the_module.JointAngle[each_associates.Node] = \
the_module.JointAngle[each_associates.Node] + \
diff*each_associates.Ratio* \
self.InterpretCorrelation(each_associates.Correlation)
for i in xrange(4):
newmessage.JointAngles.append(the_module.JointAngle[i])
if self.initflag==0 or self.initflag==2:
for each_message in message_queue:
self.gaitPub.Publish(each_message)
print "Angle Updating"
self.Addcommand["state"] = NORMAL
self.jointAngleDifferenceTracking[self.jointSelection.get()] += diff
if sum([abs(x) for x in self.jointAngleDifferenceTracking]) > 1.0/180.0*PI:
print "Angle difference is ", diff
self.name["state"] = DISABLED
else:
self.name["state"] = NORMAL
if self.typeSelection.get() == 0:
if self.jointSelection.get() != 3:
if (self.valueInBox.get() >= self.valueSetting["to"]) or \
(self.valueInBox.get() <= self.valueSetting["from"]):
self.valueSetting["to"] = self.valueInBox.get() + 180
self.valueSetting["from"] = self.valueInBox.get() - 180
self.valueSetting.set(self.valueInBox.get())
else:
if self.valueInBox.get() > 90:
self.valueInBox.set(90)
if self.valueInBox.get() < -90:
self.valueInBox.set(-90)
self.valueSetting.set(self.valueInBox.get())
## Check whether there is a command of the same model in a command list
# @param self Object pointer
# @param model_name Name string of the model
# @param command_queue A command list
# @return If found a command, then return the messga object; otherwise return False
def CheckTheExistingCommand(self,model_name,command_queue):
for each_command in command_queue:
if each_command.ModelName == model_name:
return each_command
return False
## Convert a correlation boolean value to number that can be used in calculation
# @param self Object pointer
# @param corr Bool value stored in AssociateJ?oint object
# @return Position correlation: 1.0 ; negtive correlation: -1.0
def InterpretCorrelation(self, corr):
if corr:
return 1.0
else:
return -1.0
#---------------- Associates Related -----------------------
## Clear the association window
# @param self Object pointer
def ResetAssociateWindow(self):
self.frontWheelAssociates = []
self.lftWheelAssociates = []
self.rgtWheelAssociates = []
self.centralBendAssociates = []
self.currentAssociates = []
self.associatedJointsList.set(())
## Refresh association window
# @param self Object pointer
def RefreshAssociates(self):
associate_list = []
if self.jointSelection.get() == 0:
self.currentAssociates = self.frontWheelAssociates
if self.jointSelection.get() == 1:
self.currentAssociates = self.lftWheelAssociates
if self.jointSelection.get() == 2:
self.currentAssociates = self.rgtWheelAssociates
if self.jointSelection.get() == 3:
self.currentAssociates = self.centralBendAssociates
for each_associate in self.currentAssociates:
associate_list.append(each_associate.ToString())
self.associatedJointsList.set(tuple(associate_list))
## Delete an association
# @param self Object pointer
def DeleteAnAssociate(self):
if self.associatedJoints.curselection():
self.associateIdx = int(self.associatedJoints.curselection()[0])
if self.jointSelection.get() == 0:
del self.frontWheelAssociates[self.associateIdx]
self.currentAssociates = self.frontWheelAssociates
if self.jointSelection.get() == 1:
del self.lftWheelAssociates[self.associateIdx]
self.currentAssociates = self.lftWheelAssociates
if self.jointSelection.get() == 2:
del self.rgtWheelAssociates[self.associateIdx]
self.currentAssociates = self.rgtWheelAssociates
if self.jointSelection.get() == 3:
del self.centralBendAssociates[self.associateIdx]
self.currentAssociates = self.centralBendAssociates
self.RefreshAssociates()
#---------------- Update Frames ----------------------------
# def UpdateFrame(self):
#---------------- Select File Save Path --------------------
## Select gaitfile save path
# @param self Object pointer
def SelectSavePath(self):
select_dir_options = {}
select_dir_options['initialdir'] = '~/'
select_dir_options['parent'] = self.parent
select_dir_options['title'] = 'Select Save Path'
self.savePath = tkFileDialog.askdirectory(**select_dir_options)
if self.savePath == "":
return
self.savepath.set(self.savePath+"/")
#---------------- Add Common Command -----------------------
## Open an add association window
# @param self Object pointer
def AddAssociates(self):
asc = AddAssociate(self)
self.wait_window(asc)
## Update joint value on the scale bar and entry next to it
# @param self Object pointer
def UpdateJointValue(self):
node_idx = self.jointSelection.get()
value_type = self.typeSelection.get()
a_module = self.GetModuleByName(self.modelname.get())
if node_idx == 3 and value_type == 0:
self.valueSetting["from_"] = -90
self.valueSetting["to"] = 90
if node_idx != 3 and value_type == 0:
if a_module.JointAngle[node_idx]/PI*180 > -180 and \
a_module.JointAngle[node_idx]/PI*180 < 180:
self.valueSetting["from_"] = -180
self.valueSetting["to"] = 180
else:
self.valueSetting["from_"] = a_module.JointAngle[node_idx]/PI*180-180
self.valueSetting["to"] = a_module.JointAngle[node_idx]/PI*180+180
if a_module:
self.valueSetting.set(a_module.JointAngle[node_idx]/PI*180)
self.valueInBox.set(a_module.JointAngle[node_idx]/PI*180)
self.RefreshAssociates()
## Callback of common command select radiobutton
# @param self Object pointer
def SelectCommonCommand(self):
self.DisableSpecialCommand()
self.EnableCommonCommand()
self.ResetSpecialComand()
## Disable common command
# @param self Object pointer
def DisableCommonCommand(self):
self.bend_joint["state"] = DISABLED
self.left_joint["state"] = DISABLED
self.right_joint["state"] = DISABLED
self.front_joint["state"] = DISABLED
self.modeAngle["state"] = DISABLED
# self.modeSpeed["state"] = DISABLED
# self.modeTorque["state"] = DISABLED
self.associatedJoints["state"] = DISABLED
self.ascModify["state"] = DISABLED
self.ascDelete["state"] = DISABLED
self.valueSetting["state"] = DISABLED
## Enable common command
# @param self Object pointer
def EnableCommonCommand(self):
self.bend_joint["state"] = NORMAL
self.left_joint["state"] = NORMAL
self.right_joint["state"] = NORMAL
self.front_joint["state"] = NORMAL
self.modeAngle["state"] = NORMAL
# self.modeSpeed["state"] = NORMAL
# self.modeTorque["state"] = NORMAL
self.associatedJoints["state"] = NORMAL
self.ascModify["state"] = NORMAL
self.ascDelete["state"] = NORMAL
self.valueSetting["state"] = NORMAL
#---------------- Add Special Command ----------------------
## Callback of select connection radiobutton
# @param self Object pointer
def SelectConnection(self):
self.ResetSpecialComand()
self.EnableSpecialCommand()
self.DisableCommonCommand()
self.ConnectableModules = []
for eachmodule in self.ModuleList:
has_unoccupied_node = False
for x in xrange(4):
if len(eachmodule.nodes[x]) == 0:
has_unoccupied_node = True
break
if has_unoccupied_node:
self.ConnectableModules.append(eachmodule.ModelName)
self.module2Selection['values'] = tuple(self.ConnectableModules)
module1_name = self.modelname.get()
node1_list = self.UpdateConnectableNodes(module1_name)
self.node1Selection['values'] = tuple(node1_list)
self.node2Selection["values"] = ()
## Select the second module which the current module connect to or disconnect to
# @param self Object pointer
# @param args Other arguments
def SelectSecondModule(self,*args):
if self.commandType.get() == 1:
module2_name = self.module2Select.get()
node2_list = self.UpdateConnectableNodes(module2_name)
self.node2Selection['values'] = tuple(node2_list)
elif self.commandType.get() == 2:
module1_name = self.modelname.get()
module2_name = self.module2Select.get()
module_1 = self.GetModuleByName(module1_name)
for x in xrange(4):
if len(module_1.nodes[x]) > 0:
if module_1.nodes[x].Module1.ModelName == module2_name:
# self.node1Selection['values'] = (module_1.nodes[x].Node2)
self.node2Selection['values'] = (module_1.nodes[x].Node1)
self.nodeSelect1.set(module_1.nodes[x].Node2)
self.nodeSelect2.set(module_1.nodes[x].Node1)
break
if module_1.nodes[x].Module2.ModelName == module2_name:
# self.node1Selection['values'] = (module_1.nodes[x].Node1)
self.node2Selection['values'] = (module_1.nodes[x].Node2)
self.nodeSelect1.set(module_1.nodes[x].Node1)
self.nodeSelect2.set(module_1.nodes[x].Node2)
break
## Select the node of the first module
# @param self Object pointer
# @param args Other arguments
def SelectNode1(self,*args):
if self.commandType.get() == 2:
module1_name = self.modelname.get()
node_1 = self.nodeSelect1.get()
module_1 = self.GetModuleByName(module1_name)
module_1_name = module_1.nodes[node_1].Module1.ModelName
module_2_name = module_1.nodes[node_1].Module2.ModelName
if module_1_name != module1_name:
self.module2Select.set(module_1_name)
self.nodeSelect2.set(module_1.nodes[node_1].Node1)
self.node2Selection['values'] = (module_1.nodes[node_1].Node1)
if module_2_name != module1_name:
self.module2Select.set(module_2_name)
self.nodeSelect2.set(module_1.nodes[node_1].Node2)
self.node2Selection['values'] = (module_1.nodes[node_1].Node2)
## Update the available nodes of the other module the current module connect to
# @param self Object pointer
# @param module_name Name string of the other module
# @return A list of interger, which are the id of the available nodes
def UpdateConnectableNodes(self, module_name):
a_module = self.GetModuleByName(module_name)
node_available = []
for x in xrange(4):
if len(a_module.nodes[x]) == 0:
node_available.append(str(x))
return node_available
## Reset the module and nodes selection in the special command section
# @param self Object pointer
def ResetSpecialComand(self):
self.module2Select.set("")
self.nodeSelect1.set("")
self.nodeSelect2.set("")
## Callback of select disconnection radiobutton
# @param self Object pointer
def SelectDisconnection(self):
self.ResetSpecialComand()
self.EnableSpecialCommand()
self.DisableCommonCommand()
module_1 = self.GetModuleByName(self.modelname.get())
self.DisconnectableModules = []
self.DisconnectableNodes = []
if module_1:
for x in xrange(4):
if len(module_1.nodes[x]) > 0:
module1_name = module_1.nodes[x].Module1.ModelName
module2_name = module_1.nodes[x].Module2.ModelName
if module1_name != self.modelname.get():
self.DisconnectableModules.append(module1_name)
if module2_name != self.modelname.get():
self.DisconnectableModules.append(module2_name)
self.DisconnectableNodes.append(x)
self.module2Selection['values'] = tuple(self.DisconnectableModules)
self.node1Selection["values"] = tuple(self.DisconnectableNodes)
self.node2Selection["values"] = ()
## Disable special command section
# @param self Object pointer
def DisableSpecialCommand(self):
self.node1Selection["state"] = DISABLED
self.module2Selection["state"] = DISABLED
self.node2Selection["state"] = DISABLED
## Enable special command section
# @param self Object pointer
def EnableSpecialCommand(self):
self.node1Selection["state"] = NORMAL
self.module2Selection["state"] = NORMAL
self.node2Selection["state"] = NORMAL
## A class for adding associates dialog
class AddAssociate(Toplevel):
## Constructor
# @param self Object pointer
# @param parent Parent widget, which is the python gui window
def __init__(self, parent):
Toplevel.__init__(self,parent)
## Inherit from Toplevel object
self.transient(parent)
## Title object of Toplevel object
self.title("Add association")
## Parent, which is the class GaitRecorder
self.parent = parent
## Variable stores the module name list for selecting
self.moduleList = StringVar()
## Integer, Joint identity number
self.jointSelection = IntVar()
## Integer, correlation type, -1 for negtive, 1 for positive
self.corelation = IntVar()
## Correlation ratio
self.ratio = DoubleVar()
body = Frame(self, width = DIALOG_WIDTH, height = DIALOG_HEIGHT)
## Pointer to the object that needs to be focused initially
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("%dx%d" % (DIALOG_WIDTH,
DIALOG_HEIGHT))
self.initial_focus.focus_set()
# self.wait_window(self)
## Dialog UI boday
# @param self Object pointer
# @param master Body's frame object
# @return moduleName Combobox object
def body(self, master):
# create dialog body. return widget that should have
# initial focus. this method should be overridden
cancel = Button(master, text="Cancel", command = self.cancel, width = 6)
cancel.place(x = DIALOG_WIDTH-20, y = DIALOG_HEIGHT-20, anchor = SE)
add = Button(master, text="Add", width = 6, command = self.Add)
add.place(x = 10, y = DIALOG_HEIGHT-20, anchor = SW)
label1 = Label(master, text = "1. Select module:")
label1.place(x = 10, y = 10)
moduleName = ttk.Combobox(master,width = 10, textvariable = self.moduleList)
moduleName["values"] = tuple(self.parent.modulenames)
moduleName.place(x = 120, y = 10)
label3 = Label(master, text = "2. Select joint:")
label3.place(x = 10, y = 40)
bard = Image.open("SmallSmores.png")
bardejov = ImageTk.PhotoImage(bard)
label2 = Label(master, image = bardejov)
label2.image = bardejov
label2.place(x=90, y=90)
## Ratio button for bend joint
self.bend_joint = Radiobutton(master, text='Central Bending', variable=self.jointSelection, value=3)
## Ratio button for left joint
self.left_joint = Radiobutton(master, text='Lft Wheel', variable=self.jointSelection, value=1)
## Ratio button for right joint
self.right_joint = Radiobutton(master, text='Rgt Wheel', variable=self.jointSelection, value=2)
## Ratio button for front joint
self.front_joint = Radiobutton(master, text='Front Wheel', variable=self.jointSelection, value=0)
self.front_joint.select()
self.bend_joint.place(x= 85, y = 65)
self.front_joint.place(x= 85, y = 180)
self.right_joint.place(x= 45, y = 140,anchor = CENTER)
self.left_joint.place(x= 215, y = 140,anchor = CENTER)
label4 = Label(master, text = "3. Select correlation")
label4.place(x = 260, y = 40)
## Ratio button for positive correlation
self.positiveCor = Radiobutton(master, text = "Positive", variable = self.corelation, value = 0)
self.positiveCor.place(x = 270, y = 70)
## Ratio button for negative correlation
self.negativeCor = Radiobutton(master, text = "Negative", variable = self.corelation, value = 1)
self.negativeCor.place(x = 270, y = 100)
label5 = Label(master, text = "4. Select ratio")
label5.place(x = 260, y = 150)
## Entry box for correlation
self.correlationRatio = Entry(master, textvariable = self.ratio, width = 10)
self.ratio.set(1)
self.correlationRatio.place(x = 270, y = 180)
return moduleName
## Add button callback, which will vreate an associate in parent object
# @param self Object pointer
def Add(self):
if len(self.moduleList.get()) > 0 and len(self.parent.modelname.get()) > 0:
corr = True
if self.corelation.get() == 0:
corr = True
if self.corelation.get() == 1:
corr = False
newAssociate = AssociateJoint(self.moduleList.get(),self.jointSelection.get(), \
corr, self.ratio.get())
if self.parent.jointSelection.get() == 0:
self.parent.frontWheelAssociates.append(newAssociate)
self.parent.currentAssociates = self.parent.frontWheelAssociates
if self.parent.jointSelection.get() == 1:
self.parent.lftWheelAssociates.append(newAssociate)
self.parent.currentAssociates = self.parent.lftWheelAssociates
if self.parent.jointSelection.get() == 2:
self.parent.rgtWheelAssociates.append(newAssociate)
self.parent.currentAssociates = self.parent.rgtWheelAssociates
if self.parent.jointSelection.get() == 3:
self.parent.centralBendAssociates.append(newAssociate)
self.parent.currentAssociates = self.parent.centralBendAssociates
self.parent.RefreshAssociates()
self.parent.focus_set()
self.destroy()
## Cancle adding associate
# @param self Object pointer
# @param event Event object
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
## Main function for this module, which will start the python gui
# @param falg Integer, 0 for normal mode, 1 for gui debug mode, 2 for python only mode
def main(flag):
root = Tk()
root.geometry(str(window_width)+"x"+str(window_height))
root.wm_attributes("-topmost", 1)
app = GaitRecorder(root,flag)
root.mainloop()
if __name__ == '__main__':
if len(sys.argv) < 2:
flag = 0
else:
if sys.argv[1] == '-gd': # gui debug mode, open gui only
flag = 1
if sys.argv[1] == '-o': # do not invoke simulation
flag = 2
main(flag) | {
"content_hash": "36a12abde1aba30d6f1e82c1810fd516",
"timestamp": "",
"source": "github",
"line_count": 1676,
"max_line_length": 179,
"avg_line_length": 45.58174224343676,
"alnum_prop": 0.6646900975194712,
"repo_name": "princeedward/SimulationPlugins",
"id": "d809c4a8da87588e181455b646ab38e9119cfa87",
"size": "76513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GaitRecorder/pythonGUI/GaitRecorder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "304878"
},
{
"name": "CMake",
"bytes": "11271"
},
{
"name": "Protocol Buffer",
"bytes": "3296"
},
{
"name": "Python",
"bytes": "149269"
},
{
"name": "Shell",
"bytes": "8520"
}
],
"symlink_target": ""
} |
from app import app, flatpages, LESSON_DIR, DAY_DIR, pygments_style_defs, LINKS_ACTIVE, freezer
from flask import render_template, make_response
from itertools import groupby
@app.route('/')
def index():
"""Homepage"""
lesson_pages = [p for p in flatpages if LESSON_DIR in p.path]
lesson_pages = sorted(lesson_pages, key=lambda p: p.meta['num']) # sort pages by filename
days = groupby(lesson_pages,
lambda p: p.meta['day']) # group pages into days [(0, [Page1, Page2]), (1, [Page3, Page4]),...]
day_pages = [p for p in flatpages if DAY_DIR in p.path]
day_pages = sorted(day_pages, key=lambda p: p.meta['num'])
return render_template('landing.html', days=days, daydata=day_pages, links_active=LINKS_ACTIVE)
@app.route('/pygments.css')
def pygments_css():
"""colors python code blocks in markdown text"""
return pygments_style_defs('tango'), 200, {'Content-Type': 'text/css'}
@app.route('/file/<filename>')
def get_file(filename):
with app.open_resource('content/exercises/{}'.format(filename)) as f:
contents = f.read()
response = make_response(contents)
response.headers["Content-Disposition"] = "attachment; filename={}".format(filename)
return response
if __name__ == '__main__':
app.run(debug=True)
| {
"content_hash": "b299cc150fa646fc0448aff6eff81268",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 115,
"avg_line_length": 31.047619047619047,
"alnum_prop": 0.6602760736196319,
"repo_name": "neuroneuro15/SciCourseSite2",
"id": "f0c68c22ed64a2ef9e662f5a9c9a1de519ea8aa1",
"size": "1304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "132491"
},
{
"name": "HTML",
"bytes": "10609"
},
{
"name": "Python",
"bytes": "2261"
}
],
"symlink_target": ""
} |
import time
from tempest_lib.common.utils import misc
from tempest_lib import exceptions as lib_exc
from tempest import exceptions
from tempest.services.network.json import base
class NetworkClient(base.BaseNetworkClient):
"""
Tempest REST client for Neutron. Uses v2 of the Neutron API, since the
V1 API has been removed from the code base.
Implements create, delete, update, list and show for the basic Neutron
abstractions (networks, sub-networks, routers, ports and floating IP):
Implements add/remove interface to router using subnet ID / port ID
It also implements list, show, update and reset for OpenStack Networking
quotas
"""
def create_port(self, **kwargs):
uri = '/ports'
post_data = {'port': kwargs}
return self.create_resource(uri, post_data)
def update_port(self, port_id, **kwargs):
uri = '/ports/%s' % port_id
post_data = {'port': kwargs}
return self.update_resource(uri, post_data)
def show_port(self, port_id, **fields):
uri = '/ports/%s' % port_id
return self.show_resource(uri, **fields)
def delete_port(self, port_id):
uri = '/ports/%s' % port_id
return self.delete_resource(uri)
def list_ports(self, **filters):
uri = '/ports'
return self.list_resources(uri, **filters)
def create_floatingip(self, **kwargs):
uri = '/floatingips'
post_data = {'floatingip': kwargs}
return self.create_resource(uri, post_data)
def update_floatingip(self, floatingip_id, **kwargs):
uri = '/floatingips/%s' % floatingip_id
post_data = {'floatingip': kwargs}
return self.update_resource(uri, post_data)
def show_floatingip(self, floatingip_id, **fields):
uri = '/floatingips/%s' % floatingip_id
return self.show_resource(uri, **fields)
def delete_floatingip(self, floatingip_id):
uri = '/floatingips/%s' % floatingip_id
return self.delete_resource(uri)
def list_floatingips(self, **filters):
uri = '/floatingips'
return self.list_resources(uri, **filters)
def create_metering_label(self, **kwargs):
uri = '/metering/metering-labels'
post_data = {'metering_label': kwargs}
return self.create_resource(uri, post_data)
def show_metering_label(self, metering_label_id, **fields):
uri = '/metering/metering-labels/%s' % metering_label_id
return self.show_resource(uri, **fields)
def delete_metering_label(self, metering_label_id):
uri = '/metering/metering-labels/%s' % metering_label_id
return self.delete_resource(uri)
def list_metering_labels(self, **filters):
uri = '/metering/metering-labels'
return self.list_resources(uri, **filters)
def create_metering_label_rule(self, **kwargs):
uri = '/metering/metering-label-rules'
post_data = {'metering_label_rule': kwargs}
return self.create_resource(uri, post_data)
def show_metering_label_rule(self, metering_label_rule_id, **fields):
uri = '/metering/metering-label-rules/%s' % metering_label_rule_id
return self.show_resource(uri, **fields)
def delete_metering_label_rule(self, metering_label_rule_id):
uri = '/metering/metering-label-rules/%s' % metering_label_rule_id
return self.delete_resource(uri)
def list_metering_label_rules(self, **filters):
uri = '/metering/metering-label-rules'
return self.list_resources(uri, **filters)
def create_security_group(self, **kwargs):
uri = '/security-groups'
post_data = {'security_group': kwargs}
return self.create_resource(uri, post_data)
def update_security_group(self, security_group_id, **kwargs):
uri = '/security-groups/%s' % security_group_id
post_data = {'security_group': kwargs}
return self.update_resource(uri, post_data)
def show_security_group(self, security_group_id, **fields):
uri = '/security-groups/%s' % security_group_id
return self.show_resource(uri, **fields)
def delete_security_group(self, security_group_id):
uri = '/security-groups/%s' % security_group_id
return self.delete_resource(uri)
def list_security_groups(self, **filters):
uri = '/security-groups'
return self.list_resources(uri, **filters)
def create_security_group_rule(self, **kwargs):
uri = '/security-group-rules'
post_data = {'security_group_rule': kwargs}
return self.create_resource(uri, post_data)
def show_security_group_rule(self, security_group_rule_id, **fields):
uri = '/security-group-rules/%s' % security_group_rule_id
return self.show_resource(uri, **fields)
def delete_security_group_rule(self, security_group_rule_id):
uri = '/security-group-rules/%s' % security_group_rule_id
return self.delete_resource(uri)
def list_security_group_rules(self, **filters):
uri = '/security-group-rules'
return self.list_resources(uri, **filters)
def show_extension(self, ext_alias, **fields):
uri = '/extensions/%s' % ext_alias
return self.show_resource(uri, **fields)
def list_extensions(self, **filters):
uri = '/extensions'
return self.list_resources(uri, **filters)
def create_bulk_network(self, names):
network_list = [{'name': name} for name in names]
post_data = {'networks': network_list}
uri = '/networks'
return self.create_resource(uri, post_data)
def create_bulk_subnet(self, subnet_list):
post_data = {'subnets': subnet_list}
uri = '/subnets'
return self.create_resource(uri, post_data)
def create_bulk_port(self, port_list):
post_data = {'ports': port_list}
uri = '/ports'
return self.create_resource(uri, post_data)
def wait_for_resource_deletion(self, resource_type, id):
"""Waits for a resource to be deleted."""
start_time = int(time.time())
while True:
if self.is_resource_deleted(resource_type, id):
return
if int(time.time()) - start_time >= self.build_timeout:
raise exceptions.TimeoutException
time.sleep(self.build_interval)
def is_resource_deleted(self, resource_type, id):
method = 'show_' + resource_type
try:
getattr(self, method)(id)
except AttributeError:
raise Exception("Unknown resource type %s " % resource_type)
except lib_exc.NotFound:
return True
return False
def wait_for_resource_status(self, fetch, status, interval=None,
timeout=None):
"""
@summary: Waits for a network resource to reach a status
@param fetch: the callable to be used to query the resource status
@type fecth: callable that takes no parameters and returns the resource
@param status: the status that the resource has to reach
@type status: String
@param interval: the number of seconds to wait between each status
query
@type interval: Integer
@param timeout: the maximum number of seconds to wait for the resource
to reach the desired status
@type timeout: Integer
"""
if not interval:
interval = self.build_interval
if not timeout:
timeout = self.build_timeout
start_time = time.time()
while time.time() - start_time <= timeout:
resource = fetch()
if resource['status'] == status:
return
time.sleep(interval)
# At this point, the wait has timed out
message = 'Resource %s' % (str(resource))
message += ' failed to reach status %s' % status
message += ' (current: %s)' % resource['status']
message += ' within the required time %s' % timeout
caller = misc.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
def update_quotas(self, tenant_id, **kwargs):
put_body = {'quota': kwargs}
uri = '/quotas/%s' % tenant_id
return self.update_resource(uri, put_body)
def reset_quotas(self, tenant_id):
uri = '/quotas/%s' % tenant_id
return self.delete_resource(uri)
def show_quotas(self, tenant_id, **fields):
uri = '/quotas/%s' % tenant_id
return self.show_resource(uri, **fields)
def list_quotas(self, **filters):
uri = '/quotas'
return self.list_resources(uri, **filters)
def create_router(self, name, admin_state_up=True, **kwargs):
post_body = {'router': kwargs}
post_body['router']['name'] = name
post_body['router']['admin_state_up'] = admin_state_up
uri = '/routers'
return self.create_resource(uri, post_body)
def _update_router(self, router_id, set_enable_snat, **kwargs):
uri = '/routers/%s' % router_id
body = self.show_resource(uri)
update_body = {}
update_body['name'] = kwargs.get('name', body['router']['name'])
update_body['admin_state_up'] = kwargs.get(
'admin_state_up', body['router']['admin_state_up'])
cur_gw_info = body['router']['external_gateway_info']
if cur_gw_info:
# TODO(kevinbenton): setting the external gateway info is not
# allowed for a regular tenant. If the ability to update is also
# merged, a test case for this will need to be added similar to
# the SNAT case.
cur_gw_info.pop('external_fixed_ips', None)
if not set_enable_snat:
cur_gw_info.pop('enable_snat', None)
update_body['external_gateway_info'] = kwargs.get(
'external_gateway_info', body['router']['external_gateway_info'])
if 'distributed' in kwargs:
update_body['distributed'] = kwargs['distributed']
update_body = dict(router=update_body)
return self.update_resource(uri, update_body)
def update_router(self, router_id, **kwargs):
"""Update a router leaving enable_snat to its default value."""
# If external_gateway_info contains enable_snat the request will fail
# with 404 unless executed with admin client, and therefore we instruct
# _update_router to not set this attribute
# NOTE(salv-orlando): The above applies as long as Neutron's default
# policy is to restrict enable_snat usage to admins only.
return self._update_router(router_id, set_enable_snat=False, **kwargs)
def show_router(self, router_id, **fields):
uri = '/routers/%s' % router_id
return self.show_resource(uri, **fields)
def delete_router(self, router_id):
uri = '/routers/%s' % router_id
return self.delete_resource(uri)
def list_routers(self, **filters):
uri = '/routers'
return self.list_resources(uri, **filters)
def update_router_with_snat_gw_info(self, router_id, **kwargs):
"""Update a router passing also the enable_snat attribute.
This method must be execute with admin credentials, otherwise the API
call will return a 404 error.
"""
return self._update_router(router_id, set_enable_snat=True, **kwargs)
def add_router_interface_with_subnet_id(self, router_id, subnet_id):
uri = '/routers/%s/add_router_interface' % router_id
update_body = {"subnet_id": subnet_id}
return self.update_resource(uri, update_body)
def add_router_interface_with_port_id(self, router_id, port_id):
uri = '/routers/%s/add_router_interface' % router_id
update_body = {"port_id": port_id}
return self.update_resource(uri, update_body)
def remove_router_interface_with_subnet_id(self, router_id, subnet_id):
uri = '/routers/%s/remove_router_interface' % router_id
update_body = {"subnet_id": subnet_id}
return self.update_resource(uri, update_body)
def remove_router_interface_with_port_id(self, router_id, port_id):
uri = '/routers/%s/remove_router_interface' % router_id
update_body = {"port_id": port_id}
return self.update_resource(uri, update_body)
def list_router_interfaces(self, uuid):
uri = '/ports?device_id=%s' % uuid
return self.list_resources(uri)
def update_agent(self, agent_id, agent_info):
"""
:param agent_info: Agent update information.
E.g {"admin_state_up": True}
"""
uri = '/agents/%s' % agent_id
agent = {"agent": agent_info}
return self.update_resource(uri, agent)
def show_agent(self, agent_id, **fields):
uri = '/agents/%s' % agent_id
return self.show_resource(uri, **fields)
def list_agents(self, **filters):
uri = '/agents'
return self.list_resources(uri, **filters)
def list_routers_on_l3_agent(self, agent_id):
uri = '/agents/%s/l3-routers' % agent_id
return self.list_resources(uri)
def list_l3_agents_hosting_router(self, router_id):
uri = '/routers/%s/l3-agents' % router_id
return self.list_resources(uri)
def add_router_to_l3_agent(self, agent_id, router_id):
uri = '/agents/%s/l3-routers' % agent_id
post_body = {"router_id": router_id}
return self.create_resource(uri, post_body)
def remove_router_from_l3_agent(self, agent_id, router_id):
uri = '/agents/%s/l3-routers/%s' % (agent_id, router_id)
return self.delete_resource(uri)
def list_dhcp_agent_hosting_network(self, network_id):
uri = '/networks/%s/dhcp-agents' % network_id
return self.list_resources(uri)
def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
uri = '/agents/%s/dhcp-networks' % agent_id
return self.list_resources(uri)
def remove_network_from_dhcp_agent(self, agent_id, network_id):
uri = '/agents/%s/dhcp-networks/%s' % (agent_id,
network_id)
return self.delete_resource(uri)
def update_extra_routes(self, router_id, routes):
uri = '/routers/%s' % router_id
put_body = {
'router': {
'routes': routes
}
}
return self.update_resource(uri, put_body)
def delete_extra_routes(self, router_id):
uri = '/routers/%s' % router_id
put_body = {
'router': {
'routes': None
}
}
return self.update_resource(uri, put_body)
def add_dhcp_agent_to_network(self, agent_id, network_id):
post_body = {'network_id': network_id}
uri = '/agents/%s/dhcp-networks' % agent_id
return self.create_resource(uri, post_body)
def list_subnetpools(self, **filters):
uri = '/subnetpools'
return self.list_resources(uri, **filters)
def create_subnetpools(self, **kwargs):
uri = '/subnetpools'
post_data = {'subnetpool': kwargs}
return self.create_resource(uri, post_data)
def show_subnetpools(self, subnetpool_id, **fields):
uri = '/subnetpools/%s' % subnetpool_id
return self.show_resource(uri, **fields)
def update_subnetpools(self, subnetpool_id, **kwargs):
uri = '/subnetpools/%s' % subnetpool_id
post_data = {'subnetpool': kwargs}
return self.update_resource(uri, post_data)
def delete_subnetpools(self, subnetpool_id):
uri = '/subnetpools/%s' % subnetpool_id
return self.delete_resource(uri)
| {
"content_hash": "38677f1d3a44e3609cbc739c0ec36512",
"timestamp": "",
"source": "github",
"line_count": 413,
"max_line_length": 79,
"avg_line_length": 38.220338983050844,
"alnum_prop": 0.6139372822299651,
"repo_name": "izadorozhna/tempest",
"id": "7821f37bd5ce092c0d6fd8192e10d7090c6dc49d",
"size": "16358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/services/network/json/network_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2827292"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
} |
import pygame
from graphics import *
from math import *
from tower import *
# This class controls the radius for a tower, which is the median between a Tower and an Enemy.
# The radius will detect an enemy, and use stats from the tower to send projectiles at the enemy.
class TowerRadius(pygame.sprite.Sprite):
def __init__(self, radius, x, y, type):
#super(self).__init__()
pygame.sprite.Sprite.__init__(self)
self.radius = radius
self.type = type
self.visible = True
self.x = x
self.y = y
self.currTarget = "" #stores the current enemy we're firing at
self.enemiesInRange = []
def drawRadius(self, screen):
radcolor = (255,255,255)
if "red" in self.type:
radcolor = (255,0,0)
if "blue" in self.type:
radcolor = (0,0,255)
if "green" in self.type:
radcolor = (0,255,0)
if self.visible == True:
self.rect = pygame.draw.circle(screen, radcolor, (self.x, self.y), self.radius, 1)
self.rect.x = self.x - self.radius
self.rect.y = self.y - self.radius
else:
#self.rect = pygame.draw.circle(screen, radcolor, (self.x, self.y), self.radius, 0)
fadedRect = pygame.Surface((self.radius*2, self.radius*2))
fadedRect.set_alpha(0)
fadedRect.fill((255,255,255))
screen.blit(fadedRect, (self.x-self.radius, self.y-self.radius))
self.rect = fadedRect.get_rect()
self.rect.x = self.x - self.radius
self.rect.y = self.y - self.radius
#self.mask
def intersects(self, pos):
intersects = False
#print(pos[0], pos[1])
x = pos[0]
y = pos[1]
distance = sqrt(((x-self.x)**2) + ((y-self.y)**2))
#print(distance, self.radius)
# we need to adjust the distance since its going off of midpoint, not the edge of the enemy
adjusted_distance = distance - 8
if adjusted_distance <= self.radius:
intersects = True
return intersects
def checkTarget(self):
hasTarget = False
if len(self.enemiesInRange) > 0:
if self.currTarget is "":
self.currTarget = self.enemiesInRange[0]
else:
self.currTarget = self.enemiesInRange[0]
hasTarget = True
return hasTarget | {
"content_hash": "662ea38cfcd0131fc17a452aca642b8c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 97,
"avg_line_length": 31.227272727272727,
"alnum_prop": 0.6720038816108685,
"repo_name": "taytam/crystaldefense",
"id": "f66375f6fd68acf3a95f52af545cb97e48735c8a",
"size": "2061",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "towerradius.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "151704"
}
],
"symlink_target": ""
} |
import datetime
import json
from unittest import mock
from django.db.models import Q
from django.test import TestCase
from elasticsearch.serializer import JSONSerializer
from wagtail.search.backends.elasticsearch6 import Elasticsearch6SearchBackend
from wagtail.search.query import MATCH_ALL
from wagtail.tests.search import models
from .elasticsearch_common_tests import ElasticsearchCommonSearchBackendTests
class TestElasticsearch6SearchBackend(ElasticsearchCommonSearchBackendTests, TestCase):
backend_path = 'wagtail.search.backends.elasticsearch6'
class TestElasticsearch6SearchQuery(TestCase):
def assertDictEqual(self, a, b):
default = JSONSerializer().default
self.assertEqual(
json.dumps(a, sort_keys=True, default=default), json.dumps(b, sort_keys=True, default=default)
)
query_compiler_class = Elasticsearch6SearchBackend.query_compiler_class
def test_simple(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.all(), "Hello")
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}
}}
self.assertDictEqual(query.get_query(), expected_result)
def test_none_query_string(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.all(), MATCH_ALL)
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'match_all': {}}
}}
self.assertDictEqual(query.get_query(), expected_result)
def test_and_operator(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.all(), "Hello", operator='and')
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams'], 'operator': 'and'}}
}}
self.assertDictEqual(query.get_query(), expected_result)
def test_filter(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.filter(title="Test"), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'term': {'title_filter': 'Test'}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_and_filter(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.filter(title="Test", publication_date=datetime.date(2017, 10, 18)), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'bool': {'must': [{'term': {'publication_date_filter': '2017-10-18'}}, {'term': {'title_filter': 'Test'}}]}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
# Make sure field filters are sorted (as they can be in any order which may cause false positives)
query = query.get_query()
field_filters = query['bool']['filter'][1]['bool']['must']
field_filters[:] = sorted(field_filters, key=lambda f: list(f['term'].keys())[0])
self.assertDictEqual(query, expected_result)
def test_or_filter(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.filter(Q(title="Test") | Q(publication_date=datetime.date(2017, 10, 18))), "Hello")
# Make sure field filters are sorted (as they can be in any order which may cause false positives)
query = query.get_query()
field_filters = query['bool']['filter'][1]['bool']['should']
field_filters[:] = sorted(field_filters, key=lambda f: list(f['term'].keys())[0])
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'bool': {'should': [{'term': {'publication_date_filter': '2017-10-18'}}, {'term': {'title_filter': 'Test'}}]}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
self.assertDictEqual(query, expected_result)
def test_negated_filter(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.exclude(publication_date=datetime.date(2017, 10, 18)), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'bool': {'mustNot': {'term': {'publication_date_filter': '2017-10-18'}}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_fields(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.all(), "Hello", fields=['title'])
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'match': {'title': {'query': 'Hello'}}}
}}
self.assertDictEqual(query.get_query(), expected_result)
def test_fields_with_and_operator(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.all(), "Hello", fields=['title'], operator='and')
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'match': {'title': {'query': 'Hello', 'operator': 'and'}}}
}}
self.assertDictEqual(query.get_query(), expected_result)
def test_multiple_fields(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.all(), "Hello", fields=['title', 'content'])
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'multi_match': {'fields': ['title', 'content'], 'query': 'Hello'}}
}}
self.assertDictEqual(query.get_query(), expected_result)
def test_multiple_fields_with_and_operator(self):
# Create a query
query = self.query_compiler_class(
models.Book.objects.all(), "Hello", fields=['title', 'content'], operator='and'
)
# Check it
expected_result = {'bool': {
'filter': {'match': {'content_type': 'searchtests.Book'}},
'must': {'multi_match': {'fields': ['title', 'content'], 'query': 'Hello', 'operator': 'and'}}
}}
self.assertDictEqual(query.get_query(), expected_result)
def test_exact_lookup(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.filter(title__exact="Test"), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'term': {'title_filter': 'Test'}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_none_lookup(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.filter(title=None), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'bool': {'mustNot': {'exists': {'field': 'title_filter'}}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_isnull_true_lookup(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.filter(title__isnull=True), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'bool': {'mustNot': {'exists': {'field': 'title_filter'}}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_isnull_false_lookup(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.filter(title__isnull=False), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'exists': {'field': 'title_filter'}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_startswith_lookup(self):
# Create a query
query = self.query_compiler_class(models.Book.objects.filter(title__startswith="Test"), "Hello")
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'prefix': {'title_filter': 'Test'}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_gt_lookup(self):
# This also tests conversion of python dates to strings
# Create a query
query = self.query_compiler_class(
models.Book.objects.filter(publication_date__gt=datetime.datetime(2014, 4, 29)), "Hello"
)
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'gt': '2014-04-29'}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_lt_lookup(self):
# Create a query
query = self.query_compiler_class(
models.Book.objects.filter(publication_date__lt=datetime.datetime(2014, 4, 29)), "Hello"
)
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'lt': '2014-04-29'}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_gte_lookup(self):
# Create a query
query = self.query_compiler_class(
models.Book.objects.filter(publication_date__gte=datetime.datetime(2014, 4, 29)), "Hello"
)
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'gte': '2014-04-29'}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_lte_lookup(self):
# Create a query
query = self.query_compiler_class(
models.Book.objects.filter(publication_date__lte=datetime.datetime(2014, 4, 29)), "Hello"
)
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'lte': '2014-04-29'}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_range_lookup(self):
start_date = datetime.datetime(2014, 4, 29)
end_date = datetime.datetime(2014, 8, 19)
# Create a query
query = self.query_compiler_class(
models.Book.objects.filter(publication_date__range=(start_date, end_date)), "Hello"
)
# Check it
expected_result = {'bool': {'filter': [
{'match': {'content_type': 'searchtests.Book'}},
{'range': {'publication_date_filter': {'gte': '2014-04-29', 'lte': '2014-08-19'}}}
], 'must': {'multi_match': {'query': 'Hello', 'fields': ['_all_text', '_edgengrams']}}}}
self.assertDictEqual(query.get_query(), expected_result)
def test_custom_ordering(self):
# Create a query
query = self.query_compiler_class(
models.Book.objects.order_by('publication_date'), "Hello", order_by_relevance=False
)
# Check it
expected_result = [{'publication_date_filter': 'asc'}]
self.assertDictEqual(query.get_sort(), expected_result)
def test_custom_ordering_reversed(self):
# Create a query
query = self.query_compiler_class(
models.Book.objects.order_by('-publication_date'), "Hello", order_by_relevance=False
)
# Check it
expected_result = [{'publication_date_filter': 'desc'}]
self.assertDictEqual(query.get_sort(), expected_result)
def test_custom_ordering_multiple(self):
# Create a query
query = self.query_compiler_class(
models.Book.objects.order_by('publication_date', 'number_of_pages'), "Hello", order_by_relevance=False
)
# Check it
expected_result = [{'publication_date_filter': 'asc'}, {'number_of_pages_filter': 'asc'}]
self.assertDictEqual(query.get_sort(), expected_result)
class TestElasticsearch6SearchResults(TestCase):
fixtures = ['search']
def assertDictEqual(self, a, b):
default = JSONSerializer().default
self.assertEqual(
json.dumps(a, sort_keys=True, default=default), json.dumps
)
def get_results(self):
backend = Elasticsearch6SearchBackend({})
query = mock.MagicMock()
query.queryset = models.Book.objects.all()
query.get_query.return_value = 'QUERY'
query.get_sort.return_value = None
return backend.results_class(backend, query)
def construct_search_response(self, results):
return {
'_shards': {'failed': 0, 'successful': 5, 'total': 5},
'hits': {
'hits': [
{
'_id': 'searchtests_book:' + str(result),
'_index': 'wagtail',
'_score': 1,
'_type': 'searchtests_book',
'fields': {
'pk': [str(result)],
}
}
for result in results
],
'max_score': 1,
'total': len(results)
},
'timed_out': False,
'took': 2
}
@mock.patch('elasticsearch.Elasticsearch.search')
def test_basic_search(self, search):
search.return_value = self.construct_search_response([])
results = self.get_results()
list(results) # Performs search
search.assert_any_call(
body={'query': 'QUERY'},
_source=False,
stored_fields='pk',
index='wagtail__searchtests_book',
scroll='2m',
size=100
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_get_single_item(self, search):
# Need to return something to prevent index error
search.return_value = self.construct_search_response([1])
results = self.get_results()
results[10] # Performs search
search.assert_any_call(
from_=10,
body={'query': 'QUERY'},
_source=False,
stored_fields='pk',
index='wagtail__searchtests_book',
size=1
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_slice_results(self, search):
search.return_value = self.construct_search_response([])
results = self.get_results()[1:4]
list(results) # Performs search
search.assert_any_call(
from_=1,
body={'query': 'QUERY'},
_source=False,
stored_fields='pk',
index='wagtail__searchtests_book',
size=3
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_slice_results_multiple_times(self, search):
search.return_value = self.construct_search_response([])
results = self.get_results()[10:][:10]
list(results) # Performs search
search.assert_any_call(
from_=10,
body={'query': 'QUERY'},
_source=False,
stored_fields='pk',
index='wagtail__searchtests_book',
size=10
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_slice_results_and_get_item(self, search):
# Need to return something to prevent index error
search.return_value = self.construct_search_response([1])
results = self.get_results()[10:]
results[10] # Performs search
search.assert_any_call(
from_=20,
body={'query': 'QUERY'},
_source=False,
stored_fields='pk',
index='wagtail__searchtests_book',
size=1
)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_result_returned(self, search):
search.return_value = self.construct_search_response([1])
results = self.get_results()
self.assertEqual(results[0], models.Book.objects.get(id=1))
@mock.patch('elasticsearch.Elasticsearch.search')
def test_len_1(self, search):
search.return_value = self.construct_search_response([1])
results = self.get_results()
self.assertEqual(len(results), 1)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_len_2(self, search):
search.return_value = self.construct_search_response([1, 2])
results = self.get_results()
self.assertEqual(len(results), 2)
@mock.patch('elasticsearch.Elasticsearch.search')
def test_duplicate_results(self, search): # Duplicates will not be removed
search.return_value = self.construct_search_response([1, 1])
results = list(self.get_results()) # Must cast to list so we only create one query
self.assertEqual(len(results), 2)
self.assertEqual(results[0], models.Book.objects.get(id=1))
self.assertEqual(results[1], models.Book.objects.get(id=1))
@mock.patch('elasticsearch.Elasticsearch.search')
def test_result_order(self, search):
search.return_value = self.construct_search_response(
[1, 2, 3]
)
results = list(self.get_results()) # Must cast to list so we only create one query
self.assertEqual(results[0], models.Book.objects.get(id=1))
self.assertEqual(results[1], models.Book.objects.get(id=2))
self.assertEqual(results[2], models.Book.objects.get(id=3))
@mock.patch('elasticsearch.Elasticsearch.search')
def test_result_order_2(self, search):
search.return_value = self.construct_search_response(
[3, 2, 1]
)
results = list(self.get_results()) # Must cast to list so we only create one query
self.assertEqual(results[0], models.Book.objects.get(id=3))
self.assertEqual(results[1], models.Book.objects.get(id=2))
self.assertEqual(results[2], models.Book.objects.get(id=1))
class TestElasticsearch6Mapping(TestCase):
fixtures = ['search']
def assertDictEqual(self, a, b):
default = JSONSerializer().default
self.assertEqual(
json.dumps(a, sort_keys=True, default=default), json.dumps(b, sort_keys=True, default=default)
)
def setUp(self):
# Create ES mapping
self.es_mapping = Elasticsearch6SearchBackend.mapping_class(models.Book)
# Create ES document
self.obj = models.Book.objects.get(id=4)
def test_get_document_type(self):
self.assertEqual(self.es_mapping.get_document_type(), 'doc')
def test_get_mapping(self):
# Build mapping
mapping = self.es_mapping.get_mapping()
# Check
expected_result = {
'doc': {
'properties': {
'pk': {'type': 'keyword', 'store': True},
'content_type': {'type': 'keyword'},
'_all_text': {'type': 'text'},
'_edgengrams': {'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard', 'type': 'text'},
'title': {'type': 'text', 'boost': 2.0, 'copy_to': '_all_text', 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'title_edgengrams': {'type': 'text', 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'title_filter': {'type': 'keyword'},
'authors': {
'type': 'nested',
'properties': {
'name': {'type': 'text', 'copy_to': '_all_text'},
'date_of_birth_filter': {'type': 'date'},
},
},
'authors_filter': {'type': 'integer'},
'publication_date_filter': {'type': 'date'},
'number_of_pages_filter': {'type': 'integer'},
'tags': {
'type': 'nested',
'properties': {
'name': {'type': 'text', 'copy_to': '_all_text'},
'slug_filter': {'type': 'keyword'},
},
},
'tags_filter': {'type': 'integer'}
}
}
}
self.assertDictEqual(mapping, expected_result)
def test_get_document_id(self):
self.assertEqual(self.es_mapping.get_document_id(self.obj), str(self.obj.pk))
def test_get_document(self):
# Get document
document = self.es_mapping.get_document(self.obj)
# Sort edgengrams
if '_edgengrams' in document:
document['_edgengrams'].sort()
# Check
expected_result = {
'pk': '4',
'content_type': ["searchtests.Book"],
'_edgengrams': ['The Fellowship of the Ring', 'The Fellowship of the Ring'],
'title': 'The Fellowship of the Ring',
'title_edgengrams': 'The Fellowship of the Ring',
'title_filter': 'The Fellowship of the Ring',
'authors': [
{
'name': 'J. R. R. Tolkien',
'date_of_birth_filter': datetime.date(1892, 1, 3)
}
],
'authors_filter': [2],
'publication_date_filter': datetime.date(1954, 7, 29),
'number_of_pages_filter': 423,
'tags': [],
'tags_filter': []
}
self.assertDictEqual(document, expected_result)
class TestElasticsearch6MappingInheritance(TestCase):
fixtures = ['search']
def assertDictEqual(self, a, b):
default = JSONSerializer().default
self.assertEqual(
json.dumps(a, sort_keys=True, default=default), json.dumps(b, sort_keys=True, default=default)
)
def setUp(self):
# Create ES mapping
self.es_mapping = Elasticsearch6SearchBackend.mapping_class(models.Novel)
self.obj = models.Novel.objects.get(id=4)
def test_get_document_type(self):
self.assertEqual(self.es_mapping.get_document_type(), 'doc')
def test_get_mapping(self):
# Build mapping
mapping = self.es_mapping.get_mapping()
# Check
expected_result = {
'doc': {
'properties': {
# New
'searchtests_novel__setting': {'type': 'text', 'copy_to': '_all_text', 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'searchtests_novel__protagonist': {
'type': 'nested',
'properties': {
'name': {'type': 'text', 'boost': 0.5, 'copy_to': '_all_text'},
'novel_id_filter': {'type': 'integer'}
}
},
'searchtests_novel__protagonist_id_filter': {'type': 'integer'},
'searchtests_novel__characters': {
'type': 'nested',
'properties': {
'name': {'type': 'text', 'boost': 0.25, 'copy_to': '_all_text'}
}
},
# Inherited
'pk': {'type': 'keyword', 'store': True},
'content_type': {'type': 'keyword'},
'_all_text': {'type': 'text'},
'_edgengrams': {'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard', 'type': 'text'},
'title': {'type': 'text', 'boost': 2.0, 'copy_to': '_all_text', 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'title_edgengrams': {'type': 'text', 'analyzer': 'edgengram_analyzer', 'search_analyzer': 'standard'},
'title_filter': {'type': 'keyword'},
'authors': {
'type': 'nested',
'properties': {
'name': {'type': 'text', 'copy_to': '_all_text'},
'date_of_birth_filter': {'type': 'date'},
},
},
'authors_filter': {'type': 'integer'},
'publication_date_filter': {'type': 'date'},
'number_of_pages_filter': {'type': 'integer'},
'tags': {
'type': 'nested',
'properties': {
'name': {'type': 'text', 'copy_to': '_all_text'},
'slug_filter': {'type': 'keyword'},
},
},
'tags_filter': {'type': 'integer'}
}
}
}
self.assertDictEqual(mapping, expected_result)
def test_get_document_id(self):
# This must be tests_searchtest instead of 'tests_searchtest_tests_searchtestchild'
# as it uses the contents base content type name.
# This prevents the same object being accidentally indexed twice.
self.assertEqual(self.es_mapping.get_document_id(self.obj), str(self.obj.pk))
def test_get_document(self):
# Build document
document = self.es_mapping.get_document(self.obj)
# Sort edgengrams
if '_edgengrams' in document:
document['_edgengrams'].sort()
# Sort characters
if 'searchtests_novel__characters' in document:
document['searchtests_novel__characters'].sort(key=lambda c: c['name'])
# Check
expected_result = {
# New
'searchtests_novel__setting': "Middle Earth",
'searchtests_novel__protagonist': {
'name': "Frodo Baggins",
'novel_id_filter': 4
},
'searchtests_novel__protagonist_id_filter': 8,
'searchtests_novel__characters': [
{
'name': "Bilbo Baggins"
},
{
'name': "Frodo Baggins"
},
{
'name': "Gandalf"
}
],
# Changed
'content_type': ["searchtests.Novel", "searchtests.Book"],
'_edgengrams': ['Middle Earth', 'The Fellowship of the Ring', 'The Fellowship of the Ring'],
# Inherited
'pk': '4',
'title': 'The Fellowship of the Ring',
'title_edgengrams': 'The Fellowship of the Ring',
'title_filter': 'The Fellowship of the Ring',
'authors': [
{
'name': 'J. R. R. Tolkien',
'date_of_birth_filter': datetime.date(1892, 1, 3)
}
],
'authors_filter': [2],
'publication_date_filter': datetime.date(1954, 7, 29),
'number_of_pages_filter': 423,
'tags': [],
'tags_filter': []
}
self.assertDictEqual(document, expected_result)
@mock.patch('wagtail.search.backends.elasticsearch2.Elasticsearch')
class TestBackendConfiguration(TestCase):
def test_default_settings(self, Elasticsearch):
Elasticsearch6SearchBackend(params={})
Elasticsearch.assert_called_with(
hosts=[
{
'host': 'localhost',
'port': 9200,
'url_prefix': '',
'use_ssl': False,
'verify_certs': False,
'http_auth': None
}
],
timeout=10
)
def test_hosts(self, Elasticsearch):
Elasticsearch6SearchBackend(params={
'HOSTS': [
{
'host': '127.0.0.1',
'port': 9300,
'use_ssl': True,
'verify_certs': True,
}
]
})
Elasticsearch.assert_called_with(
hosts=[
{
'host': '127.0.0.1',
'port': 9300,
'use_ssl': True,
'verify_certs': True,
}
],
timeout=10
)
def test_urls(self, Elasticsearch):
# This test backwards compatibility with old URLS setting
Elasticsearch6SearchBackend(params={
'URLS': [
'http://localhost:12345',
'https://127.0.0.1:54321',
'http://username:password@elasticsearch.mysite.com',
'https://elasticsearch.mysite.com/hello',
],
})
Elasticsearch.assert_called_with(
hosts=[
{
'host': 'localhost',
'port': 12345,
'url_prefix': '',
'use_ssl': False,
'verify_certs': False,
'http_auth': None,
},
{
'host': '127.0.0.1',
'port': 54321,
'url_prefix': '',
'use_ssl': True,
'verify_certs': True,
'http_auth': None,
},
{
'host': 'elasticsearch.mysite.com',
'port': 80,
'url_prefix': '',
'use_ssl': False,
'verify_certs': False,
'http_auth': ('username', 'password')
},
{
'host': 'elasticsearch.mysite.com',
'port': 443,
'url_prefix': '/hello',
'use_ssl': True,
'verify_certs': True,
'http_auth': None,
},
],
timeout=10
)
| {
"content_hash": "599cfe9f88d7716d2acbda99ac21f97c",
"timestamp": "",
"source": "github",
"line_count": 816,
"max_line_length": 156,
"avg_line_length": 38.856617647058826,
"alnum_prop": 0.525152174598669,
"repo_name": "mikedingjan/wagtail",
"id": "805601a94e29f91f6769ddc5aa71a72ebe0bd29a",
"size": "31731",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "wagtail/search/tests/test_elasticsearch6_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "183841"
},
{
"name": "Dockerfile",
"bytes": "703"
},
{
"name": "HTML",
"bytes": "373400"
},
{
"name": "JavaScript",
"bytes": "266257"
},
{
"name": "Makefile",
"bytes": "992"
},
{
"name": "Python",
"bytes": "3607707"
},
{
"name": "Shell",
"bytes": "8289"
}
],
"symlink_target": ""
} |
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
demisto.results(demisto.executeCommand("grr_get_files", demisto.args()))
| {
"content_hash": "155200342ab85ef4fbaf0f04990c1597",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 41.25,
"alnum_prop": 0.7696969696969697,
"repo_name": "demisto/content",
"id": "dd0f0afc9ef27c7559a1a94db8ef793390233502",
"size": "165",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/GRR/Scripts/GrrGetFiles/GrrGetFiles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
VERSION='2.0.1'
from client import Spotify, SpotifyException
| {
"content_hash": "1b3b597f50999bb63f845fd2d5f86b33",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 44,
"avg_line_length": 30.5,
"alnum_prop": 0.8032786885245902,
"repo_name": "issyrichards/spartify2",
"id": "9339d2a6b6a8a57bce39a4f81127fd68e2a0e6c3",
"size": "61",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "spotipy/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "5397"
},
{
"name": "HTML",
"bytes": "10870"
},
{
"name": "JavaScript",
"bytes": "84593"
},
{
"name": "Makefile",
"bytes": "6653"
},
{
"name": "Python",
"bytes": "894913"
}
],
"symlink_target": ""
} |
import re
from BeautifulSoup import BeautifulSoup
from collections import defaultdict
# Use defaultdict to append duplicates, but output only the first entry
modules = defaultdict(list)
def normalize(string):
''' -> Remove parantheses part from ending of module names
-> Remove YUI from module name
'''
return re.sub('( ?\(.*\)$|YUI\ ?[2-3]?[ -]?)', '', string)
def get_descr_string(type, descr):
return '''<i>Type</i>: %s<br /><i>Description</i>: %s''' \
% (type, re.sub('(\n|\r)', '<br />', descr))
# Parse the official modules
official_soup = BeautifulSoup(open('data/official.html'))
for module in official_soup.findAll('li', {'class': 'component'}):
mah = module.a['href']
descr = get_descr_string('Official', module.a['data-tooltip'])
modules[module.a.text].append({'link': 'http://yuilibrary.com%s' % mah,
'name': module.a.text,
'descr': descr
})
# Parse the community supported gallery modules
gallery_soup = BeautifulSoup(open('data/gallery.html'))
for module in gallery_soup.findAll('a', href=re.compile('/gallery/show/.+')):
if 'patch' in module.text.lower():
continue
h4 = module.findNext('h4')
if h4.span:
hsnn = h4.span.next.next
descr = get_descr_string('Gallery, available on CDN', hsnn)
else:
descr = get_descr_string('Gallery', h4.next.next)
mh = module['href']
mt = normalize(module.text)
modules[mt].append({'link': 'http://yuilibrary.com%s' % mh,
'descr': descr,
'name': module.text})
with open('output.txt', 'w') as f:
for name, value in modules.items():
f.write('\t'.join(
[
name, # title
'A', # type
'', # redirect
'', # otheruses
'', # categories
'', # references
'', # see_also
'', # further_reading
'', # external_links
'', # disambiguation
'', # images
value[0]['descr'], # abstract
value[0]['link'] # source_url
]
) + "\n")
| {
"content_hash": "879e01e298bbee85e3dd5334c6edede0",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 77,
"avg_line_length": 36.58461538461538,
"alnum_prop": 0.4953742640874685,
"repo_name": "rasikapohankar/zeroclickinfo-fathead",
"id": "83e549e9998a7a859134b9ef20e31394bba1fcbc",
"size": "2425",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "lib/fathead/yui3/parse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "648"
},
{
"name": "Go",
"bytes": "11301"
},
{
"name": "JavaScript",
"bytes": "104980"
},
{
"name": "Perl",
"bytes": "148281"
},
{
"name": "Python",
"bytes": "762746"
},
{
"name": "Ruby",
"bytes": "12324"
},
{
"name": "Shell",
"bytes": "40674"
},
{
"name": "Tcl",
"bytes": "2746"
},
{
"name": "XSLT",
"bytes": "10904"
}
],
"symlink_target": ""
} |
from random import choice
from string import letters
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from .models import Invitation
_ = lambda x: x
class EmailAuthenticationForm(AuthenticationForm):
username = forms.EmailField(label=_("E-mail"), max_length=75)
class EmailUserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
It's a copy-paste from django.contrib.auth.models.UserCreationForm.
"""
error_messages = {
'duplicate_email': _("A user with that E-mail already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
email = forms.RegexField(label=_("E-mail"), max_length=75,
regex=r'^[\w.@+-]+$',
error_messages = {
'invalid': _("This has to be a valid E-mail.")})
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text = _("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("email",)
def clean_email(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
email = self.cleaned_data["email"]
try:
User.objects.get(email=email)
except User.DoesNotExist:
return email
raise forms.ValidationError(self.error_messages['duplicate_email'])
def clean_password2(self):
password1 = self.cleaned_data.get("password1", "")
password2 = self.cleaned_data["password2"]
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = super(EmailUserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
# username is required so we put some random data into it
user.username = ''.join([choice(letters) for i in xrange(30)])
if commit:
user.save()
return user
class InvitationForm(forms.ModelForm):
class Meta:
model = Invitation
fields = ['email'] | {
"content_hash": "c9d22196a8ee7213a3b07e45f7e5d747",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 79,
"avg_line_length": 33.75675675675676,
"alnum_prop": 0.6469175340272217,
"repo_name": "jacobjbollinger/sorbet",
"id": "4faf977c6239bd3281803c0fe83a432b8f5594f4",
"size": "2498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sorbet/core/forms.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
} |
from sahara.i18n import _
from sahara.plugins.cdh.client import services
from sahara.plugins.cdh.client import types
from sahara.plugins.cdh import exceptions as ex
CLUSTERS_PATH = "/clusters"
def create_cluster(resource_root, name, version=None, fullVersion=None):
"""Create a cluster
:param resource_root: The root Resource object.
:param name: Cluster name
:param version: Cluster CDH major version (eg: "CDH4")
- The CDH minor version will be assumed to be the
latest released version for CDH4, or 5.0 for CDH5.
:param fullVersion: Cluster's full CDH version. (eg: "5.1.1")
- If specified, 'version' will be ignored.
- Since: v6
:return: An ApiCluster object
"""
if version is None and fullVersion is None:
raise ex.CMApiVersionError(
_("Either 'version' or 'fullVersion' must be specified"))
if fullVersion is not None:
api_version = 6
version = None
else:
api_version = 1
apicluster = ApiCluster(resource_root, name, version, fullVersion)
return types.call(resource_root.post, CLUSTERS_PATH, ApiCluster, True,
data=[apicluster], api_version=api_version)[0]
def get_cluster(resource_root, name):
"""Lookup a cluster by name
:param resource_root: The root Resource object.
:param name: Cluster name
:return: An ApiCluster object
"""
return types.call(resource_root.get, "%s/%s"
% (CLUSTERS_PATH, name), ApiCluster)
def get_all_clusters(resource_root, view=None):
"""Get all clusters
:param resource_root: The root Resource object.
:return: A list of ApiCluster objects.
"""
return types.call(resource_root.get, CLUSTERS_PATH, ApiCluster, True,
params=(dict(view=view) if view else None))
class ApiCluster(types.BaseApiResource):
_ATTRIBUTES = {
'name': None,
'displayName': None,
'version': None,
'fullVersion': None,
'maintenanceMode': types.ROAttr(),
'maintenanceOwners': types.ROAttr(),
}
def __init__(self, resource_root, name=None, version=None,
fullVersion=None):
types.BaseApiObject.init(self, resource_root, locals())
def _path(self):
return "%s/%s" % (CLUSTERS_PATH, self.name)
def get_service_types(self):
"""Get all service types supported by this cluster
:return: A list of service types (strings)
"""
resp = self._get_resource_root().get(self._path() + '/serviceTypes')
return resp[types.ApiList.LIST_KEY]
def get_commands(self, view=None):
"""Retrieve a list of running commands for this cluster
:param view: View to materialize ('full' or 'summary')
:return: A list of running commands.
"""
return self._get("commands", types.ApiCommand, True,
params=(dict(view=view) if view else None))
def create_service(self, name, service_type):
"""Create a service
:param name: Service name
:param service_type: Service type
:return: An ApiService object
"""
return services.create_service(self._get_resource_root(), name,
service_type, self.name)
def get_service(self, name):
"""Lookup a service by name
:param name: Service name
:return: An ApiService object
"""
return services.get_service(self._get_resource_root(),
name, self.name)
def start(self):
"""Start all services in a cluster, respecting dependencies
:return: Reference to the submitted command.
"""
return self._cmd('start')
def deploy_client_config(self):
"""Deploys Service client configuration to the hosts on the cluster
:return: Reference to the submitted command.
:since: API v2
"""
return self._cmd('deployClientConfig')
def first_run(self):
"""Prepare and start services in a cluster
Perform all the steps needed to prepare each service in a
cluster and start the services in order.
:return: Reference to the submitted command.
:since: API v7
"""
return self._cmd('firstRun', None, api_version=7)
def remove_host(self, hostId):
"""Removes the association of the host with the cluster
:return: A ApiHostRef of the host that was removed.
:since: API v3
"""
return self._delete("hosts/" + hostId, types.ApiHostRef, api_version=3)
| {
"content_hash": "d1b6e5bd801cb2d18d661a789e25c3ce",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 79,
"avg_line_length": 33.070422535211264,
"alnum_prop": 0.6081771720613288,
"repo_name": "matips/iosr-2015",
"id": "38f35fdffd6f9ea601c53eb3b410b87ade387408",
"size": "5679",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sahara/plugins/cdh/client/clusters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "1528"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "2854653"
},
{
"name": "Shell",
"bytes": "45698"
}
],
"symlink_target": ""
} |
from pyramid.httpexceptions import HTTPInternalServerError
from pyramid_mailer.message import Message
from kotti.message import get_mailer
from kotti import get_settings
def mail_admin(msg):
"""
Send given message to the admins.
"""
admin = get_settings().get('kotti_dkbase.admin_email')
message = Message(
recipients=[admin],
subject=u'Server-Error',
body=msg,
)
mailer = get_mailer()
mailer.send_immediately(message)
def exception_decorator(view):
"""
Transform generic exceptions to Error 500
"""
def f(exception, request):
return view(HTTPInternalServerError(str(exception)), request)
return f
def error_view(exception, request):
should_mail = get_settings().get('kotti_dkbase.send_error_mails') == "True"
if should_mail and exception.code != 404:
mail_admin(str(exception) + str(request))
request.response.status_int = exception.code
return {}
| {
"content_hash": "afa1cbb7340c7764b3697ecf8fd80906",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 79,
"avg_line_length": 27.514285714285716,
"alnum_prop": 0.6791277258566978,
"repo_name": "chrneumann/kotti_dkbase",
"id": "0bbc362da5aeefa4cb6209c9b33f7068b305e69a",
"size": "963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kotti_dkbase/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "192116"
}
],
"symlink_target": ""
} |
import sys
import mock
from oslo.config import cfg
import testtools
import webtest
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.common import config
from neutron.common import exceptions
from neutron import context
from neutron.db import api as db
from neutron.db import quota_db
from neutron import quota
from neutron.tests import base
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_extensions
from neutron.tests.unit import testlib_api
TARGET_PLUGIN = ('neutron.plugins.linuxbridge.lb_neutron_plugin'
'.LinuxBridgePluginV2')
_get_path = test_api_v2._get_path
class QuotaExtensionTestCase(testlib_api.WebTestCase):
def setUp(self):
super(QuotaExtensionTestCase, self).setUp()
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
# Create the default configurations
args = ['--config-file', test_extensions.etcdir('neutron.conf.test')]
config.parse(args=args)
# Update the plugin and extensions path
self.setup_coreplugin(TARGET_PLUGIN)
cfg.CONF.set_override(
'quota_items',
['network', 'subnet', 'port', 'extra1'],
group='QUOTAS')
quota.QUOTAS = quota.QuotaEngine()
quota.register_resources_from_config()
self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
self.plugin = self._plugin_patcher.start()
self.plugin.return_value.supported_extension_aliases = ['quotas']
# QUOTAS will register the items in conf when starting
# extra1 here is added later, so have to do it manually
quota.QUOTAS.register_resource_by_name('extra1')
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
db.configure_db()
app = config.load_paste_app('extensions_test_app')
ext_middleware = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self.api = webtest.TestApp(ext_middleware)
def tearDown(self):
self._plugin_patcher.stop()
self.api = None
self.plugin = None
db.clear_db()
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
super(QuotaExtensionTestCase, self).tearDown()
class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.db.quota_db.DbQuotaDriver',
group='QUOTAS')
super(QuotaExtensionDbTestCase, self).setUp()
def test_quotas_loaded_right(self):
res = self.api.get(_get_path('quotas', fmt=self.fmt))
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
self.assertEqual(200, res.status_int)
def test_quotas_default_values(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
self.assertEqual(-1, quota['quota']['extra1'])
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_show_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_show_quotas_with_owner_tenant(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_list_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
def test_list_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_with_non_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 'abc'}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_negative_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -2}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_to_unlimited(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_exceeding_current_limit(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 120}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_with_non_support_resource_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'abc': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_update_attributes(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'extra1': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['extra1'])
def test_delete_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(204, res.status_int)
def test_delete_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_quotas_loaded_bad_returns_404(self):
try:
res = self.api.get(_get_path('quotas'), expect_errors=True)
self.assertEqual(404, res.status_int)
except Exception:
pass
def test_quotas_limit_check(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 5}}
res = self.api.put(_get_path('quotas', id=tenant_id,
fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=4)
def test_quotas_limit_check_with_invalid_quota_value(self):
tenant_id = 'tenant_id1'
with testtools.ExpectedException(exceptions.InvalidQuotaValue):
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=-2)
def test_quotas_get_tenant_from_request_context(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(quota['tenant']['tenant_id'], tenant_id)
def test_quotas_get_tenant_from_empty_request_context_returns_400(self):
env = {'neutron.context': context.Context('', '',
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(400, res.status_int)
class QuotaExtensionDbTestCaseXML(QuotaExtensionDbTestCase):
fmt = 'xml'
class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.quota.ConfDriver',
group='QUOTAS')
super(QuotaExtensionCfgTestCase, self).setUp()
def test_quotas_default_values(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
self.assertEqual(-1, quota['quota']['extra1'])
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
def test_show_quotas_without_admin_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_forbidden(self):
tenant_id = 'tenant_id1'
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas),
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_delete_quotas_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
class QuotaExtensionCfgTestCaseXML(QuotaExtensionCfgTestCase):
fmt = 'xml'
class TestDbQuotaDriver(base.BaseTestCase):
"""Test for neutron.db.quota_db.DbQuotaDriver."""
def test_get_tenant_quotas_arg(self):
"""Call neutron.db.quota_db.DbQuotaDriver._get_quotas."""
driver = quota_db.DbQuotaDriver()
ctx = context.Context('', 'bar')
foo_quotas = {'network': 5}
default_quotas = {'network': 10}
target_tenant = 'foo'
with mock.patch.object(quota_db.DbQuotaDriver,
'get_tenant_quotas',
return_value=foo_quotas) as get_tenant_quotas:
quotas = driver._get_quotas(ctx,
target_tenant,
default_quotas,
['network'])
self.assertEqual(quotas, foo_quotas)
get_tenant_quotas.assert_called_once_with(ctx,
default_quotas,
target_tenant)
class TestQuotaDriverLoad(base.BaseTestCase):
def setUp(self):
super(TestQuotaDriverLoad, self).setUp()
# Make sure QuotaEngine is reinitialized in each test.
quota.QUOTAS._driver = None
def _test_quota_driver(self, cfg_driver, loaded_driver,
with_quota_db_module=True):
cfg.CONF.set_override('quota_driver', cfg_driver, group='QUOTAS')
with mock.patch.dict(sys.modules, {}):
if (not with_quota_db_module and
'neutron.db.quota_db' in sys.modules):
del sys.modules['neutron.db.quota_db']
driver = quota.QUOTAS.get_driver()
self.assertEqual(loaded_driver, driver.__class__.__name__)
def test_quota_db_driver_with_quotas_table(self):
self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver',
'DbQuotaDriver', True)
def test_quota_db_driver_fallback_conf_driver(self):
self._test_quota_driver('neutron.db.quota_db.DbQuotaDriver',
'ConfDriver', False)
def test_quota_conf_driver(self):
self._test_quota_driver('neutron.quota.ConfDriver',
'ConfDriver', True)
| {
"content_hash": "458df1bb833ebbcae19969ae43c69470",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 78,
"avg_line_length": 43.99760765550239,
"alnum_prop": 0.554238486216084,
"repo_name": "sajuptpm/neutron-ipam",
"id": "0d88d937177619db244ec222592a91d5aa3bb949",
"size": "19073",
"binary": false,
"copies": "3",
"ref": "refs/heads/stable/icehouse",
"path": "neutron/tests/unit/test_quota_ext.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "9102565"
},
{
"name": "Shell",
"bytes": "9603"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
import unittest
from libcloudcore.importer import Importer
class TestImporter(unittest.TestCase):
def setUp(self):
self.importer = Importer(__name__)
def test_get_driver(self):
driver = self.importer.get_driver("bigv")
self.assertTrue(driver.name == "bigv")
def test_get_client(self):
client = self.importer.get_client("bigv")
self.assertTrue(client.name == "bigv")
def test_client_has_operations(self):
driver = self.importer.get_client("bigv")
self.assertTrue(hasattr(driver, "list_virtual_machines"))
| {
"content_hash": "3dda00f42b3af4f40e58b713c3542f63",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 65,
"avg_line_length": 27.761904761904763,
"alnum_prop": 0.6620926243567753,
"repo_name": "Jc2k/libcloudcore",
"id": "12caafb6b891938e9ad09d392dc6291ad8a0ad10",
"size": "1365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libcloudcore/tests/test_importer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "130567"
}
],
"symlink_target": ""
} |
"""psCharStrings.py -- module implementing various kinds of CharStrings:
CFF dictionary data and Type1/Type2 CharStrings.
"""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
import struct
DEBUG = 0
t1OperandEncoding = [None] * 256
t1OperandEncoding[0:32] = (32) * ["do_operator"]
t1OperandEncoding[32:247] = (247 - 32) * ["read_byte"]
t1OperandEncoding[247:251] = (251 - 247) * ["read_smallInt1"]
t1OperandEncoding[251:255] = (255 - 251) * ["read_smallInt2"]
t1OperandEncoding[255] = "read_longInt"
assert len(t1OperandEncoding) == 256
t2OperandEncoding = t1OperandEncoding[:]
t2OperandEncoding[28] = "read_shortInt"
t2OperandEncoding[255] = "read_fixed1616"
cffDictOperandEncoding = t2OperandEncoding[:]
cffDictOperandEncoding[29] = "read_longInt"
cffDictOperandEncoding[30] = "read_realNumber"
cffDictOperandEncoding[255] = "reserved"
realNibbles = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'.', 'E', 'E-', None, '-']
realNibblesDict = {}
for _i in range(len(realNibbles)):
realNibblesDict[realNibbles[_i]] = _i
class ByteCodeBase(object):
def read_byte(self, b0, data, index):
return b0 - 139, index
def read_smallInt1(self, b0, data, index):
b1 = byteord(data[index])
return (b0-247)*256 + b1 + 108, index+1
def read_smallInt2(self, b0, data, index):
b1 = byteord(data[index])
return -(b0-251)*256 - b1 - 108, index+1
def read_shortInt(self, b0, data, index):
value, = struct.unpack(">h", data[index:index+2])
return value, index+2
def read_longInt(self, b0, data, index):
value, = struct.unpack(">l", data[index:index+4])
return value, index+4
def read_fixed1616(self, b0, data, index):
value, = struct.unpack(">l", data[index:index+4])
return value / 65536, index+4
def read_realNumber(self, b0, data, index):
number = ''
while True:
b = byteord(data[index])
index = index + 1
nibble0 = (b & 0xf0) >> 4
nibble1 = b & 0x0f
if nibble0 == 0xf:
break
number = number + realNibbles[nibble0]
if nibble1 == 0xf:
break
number = number + realNibbles[nibble1]
return float(number), index
def buildOperatorDict(operatorList):
oper = {}
opc = {}
for item in operatorList:
if len(item) == 2:
oper[item[0]] = item[1]
else:
oper[item[0]] = item[1:]
if isinstance(item[0], tuple):
opc[item[1]] = item[0]
else:
opc[item[1]] = (item[0],)
return oper, opc
t2Operators = [
# opcode name
(1, 'hstem'),
(3, 'vstem'),
(4, 'vmoveto'),
(5, 'rlineto'),
(6, 'hlineto'),
(7, 'vlineto'),
(8, 'rrcurveto'),
(10, 'callsubr'),
(11, 'return'),
(14, 'endchar'),
(16, 'blend'),
(18, 'hstemhm'),
(19, 'hintmask'),
(20, 'cntrmask'),
(21, 'rmoveto'),
(22, 'hmoveto'),
(23, 'vstemhm'),
(24, 'rcurveline'),
(25, 'rlinecurve'),
(26, 'vvcurveto'),
(27, 'hhcurveto'),
# (28, 'shortint'), # not really an operator
(29, 'callgsubr'),
(30, 'vhcurveto'),
(31, 'hvcurveto'),
((12, 0), 'ignore'), # dotsection. Yes, there a few very early OTF/CFF
# fonts with this deprecated operator. Just ignore it.
((12, 3), 'and'),
((12, 4), 'or'),
((12, 5), 'not'),
((12, 8), 'store'),
((12, 9), 'abs'),
((12, 10), 'add'),
((12, 11), 'sub'),
((12, 12), 'div'),
((12, 13), 'load'),
((12, 14), 'neg'),
((12, 15), 'eq'),
((12, 18), 'drop'),
((12, 20), 'put'),
((12, 21), 'get'),
((12, 22), 'ifelse'),
((12, 23), 'random'),
((12, 24), 'mul'),
((12, 26), 'sqrt'),
((12, 27), 'dup'),
((12, 28), 'exch'),
((12, 29), 'index'),
((12, 30), 'roll'),
((12, 34), 'hflex'),
((12, 35), 'flex'),
((12, 36), 'hflex1'),
((12, 37), 'flex1'),
]
def getIntEncoder(format):
if format == "cff":
fourByteOp = bytechr(29)
elif format == "t1":
fourByteOp = bytechr(255)
else:
assert format == "t2"
fourByteOp = None
def encodeInt(value, fourByteOp=fourByteOp, bytechr=bytechr,
pack=struct.pack, unpack=struct.unpack):
if -107 <= value <= 107:
code = bytechr(value + 139)
elif 108 <= value <= 1131:
value = value - 108
code = bytechr((value >> 8) + 247) + bytechr(value & 0xFF)
elif -1131 <= value <= -108:
value = -value - 108
code = bytechr((value >> 8) + 251) + bytechr(value & 0xFF)
elif fourByteOp is None:
# T2 only supports 2 byte ints
if -32768 <= value <= 32767:
code = bytechr(28) + pack(">h", value)
else:
# Backwards compatible hack: due to a previous bug in FontTools,
# 16.16 fixed numbers were written out as 4-byte ints. When
# these numbers were small, they were wrongly written back as
# small ints instead of 4-byte ints, breaking round-tripping.
# This here workaround doesn't do it any better, since we can't
# distinguish anymore between small ints that were supposed to
# be small fixed numbers and small ints that were just small
# ints. Hence the warning.
import sys
sys.stderr.write("Warning: 4-byte T2 number got passed to the "
"IntType handler. This should happen only when reading in "
"old XML files.\n")
code = bytechr(255) + pack(">l", value)
else:
code = fourByteOp + pack(">l", value)
return code
return encodeInt
encodeIntCFF = getIntEncoder("cff")
encodeIntT1 = getIntEncoder("t1")
encodeIntT2 = getIntEncoder("t2")
def encodeFixed(f, pack=struct.pack):
# For T2 only
return b"\xff" + pack(">l", int(round(f * 65536)))
def encodeFloat(f):
# For CFF only, used in cffLib
s = str(f).upper()
if s[:2] == "0.":
s = s[1:]
elif s[:3] == "-0.":
s = "-" + s[2:]
nibbles = []
while s:
c = s[0]
s = s[1:]
if c == "E" and s[:1] == "-":
s = s[1:]
c = "E-"
nibbles.append(realNibblesDict[c])
nibbles.append(0xf)
if len(nibbles) % 2:
nibbles.append(0xf)
d = bytechr(30)
for i in range(0, len(nibbles), 2):
d = d + bytechr(nibbles[i] << 4 | nibbles[i+1])
return d
class CharStringCompileError(Exception): pass
class T2CharString(ByteCodeBase):
operandEncoding = t2OperandEncoding
operators, opcodes = buildOperatorDict(t2Operators)
def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None):
if program is None:
program = []
self.bytecode = bytecode
self.program = program
self.private = private
self.globalSubrs = globalSubrs if globalSubrs is not None else []
def __repr__(self):
if self.bytecode is None:
return "<%s (source) at %x>" % (self.__class__.__name__, id(self))
else:
return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self))
def getIntEncoder(self):
return encodeIntT2
def getFixedEncoder(self):
return encodeFixed
def decompile(self):
if not self.needsDecompilation():
return
subrs = getattr(self.private, "Subrs", [])
decompiler = SimpleT2Decompiler(subrs, self.globalSubrs)
decompiler.execute(self)
def draw(self, pen):
subrs = getattr(self.private, "Subrs", [])
extractor = T2OutlineExtractor(pen, subrs, self.globalSubrs,
self.private.nominalWidthX, self.private.defaultWidthX)
extractor.execute(self)
self.width = extractor.width
def compile(self):
if self.bytecode is not None:
return
assert self.program, "illegal CharString: decompiled to empty program"
assert self.program[-1] in ("endchar", "return", "callsubr", "callgsubr",
"seac"), "illegal CharString"
bytecode = []
opcodes = self.opcodes
program = self.program
encodeInt = self.getIntEncoder()
encodeFixed = self.getFixedEncoder()
i = 0
end = len(program)
while i < end:
token = program[i]
i = i + 1
tp = type(token)
if issubclass(tp, basestring):
try:
bytecode.extend(bytechr(b) for b in opcodes[token])
except KeyError:
raise CharStringCompileError("illegal operator: %s" % token)
if token in ('hintmask', 'cntrmask'):
bytecode.append(program[i]) # hint mask
i = i + 1
elif tp == int:
bytecode.append(encodeInt(token))
elif tp == float:
bytecode.append(encodeFixed(token))
else:
assert 0, "unsupported type: %s" % tp
try:
bytecode = bytesjoin(bytecode)
except TypeError:
print(bytecode)
raise
self.setBytecode(bytecode)
def needsDecompilation(self):
return self.bytecode is not None
def setProgram(self, program):
self.program = program
self.bytecode = None
def setBytecode(self, bytecode):
self.bytecode = bytecode
self.program = None
def getToken(self, index,
len=len, byteord=byteord, getattr=getattr, type=type, StringType=str):
if self.bytecode is not None:
if index >= len(self.bytecode):
return None, 0, 0
b0 = byteord(self.bytecode[index])
index = index + 1
code = self.operandEncoding[b0]
handler = getattr(self, code)
token, index = handler(b0, self.bytecode, index)
else:
if index >= len(self.program):
return None, 0, 0
token = self.program[index]
index = index + 1
isOperator = isinstance(token, StringType)
return token, isOperator, index
def getBytes(self, index, nBytes):
if self.bytecode is not None:
newIndex = index + nBytes
bytes = self.bytecode[index:newIndex]
index = newIndex
else:
bytes = self.program[index]
index = index + 1
assert len(bytes) == nBytes
return bytes, index
def do_operator(self, b0, data, index):
if b0 == 12:
op = (b0, byteord(data[index]))
index = index+1
else:
op = b0
operator = self.operators[op]
return operator, index
def toXML(self, xmlWriter):
from fontTools.misc.textTools import num2binary
if self.bytecode is not None:
xmlWriter.dumphex(self.bytecode)
else:
index = 0
args = []
while True:
token, isOperator, index = self.getToken(index)
if token is None:
break
if isOperator:
args = [str(arg) for arg in args]
if token in ('hintmask', 'cntrmask'):
hintMask, isOperator, index = self.getToken(index)
bits = []
for byte in hintMask:
bits.append(num2binary(byteord(byte), 8))
hintMask = strjoin(bits)
line = ' '.join(args + [token, hintMask])
else:
line = ' '.join(args + [token])
xmlWriter.write(line)
xmlWriter.newline()
args = []
else:
args.append(token)
def fromXML(self, name, attrs, content):
from fontTools.misc.textTools import binary2num, readHex
if attrs.get("raw"):
self.setBytecode(readHex(content))
return
content = strjoin(content)
content = content.split()
program = []
end = len(content)
i = 0
while i < end:
token = content[i]
i = i + 1
try:
token = int(token)
except ValueError:
try:
token = float(token)
except ValueError:
program.append(token)
if token in ('hintmask', 'cntrmask'):
mask = content[i]
maskBytes = b""
for j in range(0, len(mask), 8):
maskBytes = maskBytes + bytechr(binary2num(mask[j:j+8]))
program.append(maskBytes)
i = i + 1
else:
program.append(token)
else:
program.append(token)
self.setProgram(program)
t1Operators = [
# opcode name
(1, 'hstem'),
(3, 'vstem'),
(4, 'vmoveto'),
(5, 'rlineto'),
(6, 'hlineto'),
(7, 'vlineto'),
(8, 'rrcurveto'),
(9, 'closepath'),
(10, 'callsubr'),
(11, 'return'),
(13, 'hsbw'),
(14, 'endchar'),
(21, 'rmoveto'),
(22, 'hmoveto'),
(30, 'vhcurveto'),
(31, 'hvcurveto'),
((12, 0), 'dotsection'),
((12, 1), 'vstem3'),
((12, 2), 'hstem3'),
((12, 6), 'seac'),
((12, 7), 'sbw'),
((12, 12), 'div'),
((12, 16), 'callothersubr'),
((12, 17), 'pop'),
((12, 33), 'setcurrentpoint'),
]
class T1CharString(T2CharString):
operandEncoding = t1OperandEncoding
operators, opcodes = buildOperatorDict(t1Operators)
def __init__(self, bytecode=None, program=None, subrs=None):
if program is None:
program = []
self.bytecode = bytecode
self.program = program
self.subrs = subrs
def getIntEncoder(self):
return encodeIntT1
def getFixedEncoder(self):
def encodeFixed(value):
raise TypeError("Type 1 charstrings don't support floating point operands")
def decompile(self):
if self.bytecode is None:
return
program = []
index = 0
while True:
token, isOperator, index = self.getToken(index)
if token is None:
break
program.append(token)
self.setProgram(program)
def draw(self, pen):
extractor = T1OutlineExtractor(pen, self.subrs)
extractor.execute(self)
self.width = extractor.width
class SimpleT2Decompiler(object):
def __init__(self, localSubrs, globalSubrs):
self.localSubrs = localSubrs
self.localBias = calcSubrBias(localSubrs)
self.globalSubrs = globalSubrs
self.globalBias = calcSubrBias(globalSubrs)
self.reset()
def reset(self):
self.callingStack = []
self.operandStack = []
self.hintCount = 0
self.hintMaskBytes = 0
def execute(self, charString):
self.callingStack.append(charString)
needsDecompilation = charString.needsDecompilation()
if needsDecompilation:
program = []
pushToProgram = program.append
else:
pushToProgram = lambda x: None
pushToStack = self.operandStack.append
index = 0
while True:
token, isOperator, index = charString.getToken(index)
if token is None:
break # we're done!
pushToProgram(token)
if isOperator:
handlerName = "op_" + token
if hasattr(self, handlerName):
handler = getattr(self, handlerName)
rv = handler(index)
if rv:
hintMaskBytes, index = rv
pushToProgram(hintMaskBytes)
else:
self.popall()
else:
pushToStack(token)
if needsDecompilation:
assert program, "illegal CharString: decompiled to empty program"
assert program[-1] in ("endchar", "return", "callsubr", "callgsubr",
"seac"), "illegal CharString"
charString.setProgram(program)
del self.callingStack[-1]
def pop(self):
value = self.operandStack[-1]
del self.operandStack[-1]
return value
def popall(self):
stack = self.operandStack[:]
self.operandStack[:] = []
return stack
def push(self, value):
self.operandStack.append(value)
def op_return(self, index):
if self.operandStack:
pass
def op_endchar(self, index):
pass
def op_ignore(self, index):
pass
def op_callsubr(self, index):
subrIndex = self.pop()
subr = self.localSubrs[subrIndex+self.localBias]
self.execute(subr)
def op_callgsubr(self, index):
subrIndex = self.pop()
subr = self.globalSubrs[subrIndex+self.globalBias]
self.execute(subr)
def op_hstem(self, index):
self.countHints()
def op_vstem(self, index):
self.countHints()
def op_hstemhm(self, index):
self.countHints()
def op_vstemhm(self, index):
self.countHints()
def op_hintmask(self, index):
if not self.hintMaskBytes:
self.countHints()
self.hintMaskBytes = (self.hintCount + 7) // 8
hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes)
return hintMaskBytes, index
op_cntrmask = op_hintmask
def countHints(self):
args = self.popall()
self.hintCount = self.hintCount + len(args) // 2
# misc
def op_and(self, index):
raise NotImplementedError
def op_or(self, index):
raise NotImplementedError
def op_not(self, index):
raise NotImplementedError
def op_store(self, index):
raise NotImplementedError
def op_abs(self, index):
raise NotImplementedError
def op_add(self, index):
raise NotImplementedError
def op_sub(self, index):
raise NotImplementedError
def op_div(self, index):
raise NotImplementedError
def op_load(self, index):
raise NotImplementedError
def op_neg(self, index):
raise NotImplementedError
def op_eq(self, index):
raise NotImplementedError
def op_drop(self, index):
raise NotImplementedError
def op_put(self, index):
raise NotImplementedError
def op_get(self, index):
raise NotImplementedError
def op_ifelse(self, index):
raise NotImplementedError
def op_random(self, index):
raise NotImplementedError
def op_mul(self, index):
raise NotImplementedError
def op_sqrt(self, index):
raise NotImplementedError
def op_dup(self, index):
raise NotImplementedError
def op_exch(self, index):
raise NotImplementedError
def op_index(self, index):
raise NotImplementedError
def op_roll(self, index):
raise NotImplementedError
class T2OutlineExtractor(SimpleT2Decompiler):
def __init__(self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX):
SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs)
self.pen = pen
self.nominalWidthX = nominalWidthX
self.defaultWidthX = defaultWidthX
def reset(self):
SimpleT2Decompiler.reset(self)
self.hints = []
self.gotWidth = 0
self.width = 0
self.currentPoint = (0, 0)
self.sawMoveTo = 0
def _nextPoint(self, point):
x, y = self.currentPoint
point = x + point[0], y + point[1]
self.currentPoint = point
return point
def rMoveTo(self, point):
self.pen.moveTo(self._nextPoint(point))
self.sawMoveTo = 1
def rLineTo(self, point):
if not self.sawMoveTo:
self.rMoveTo((0, 0))
self.pen.lineTo(self._nextPoint(point))
def rCurveTo(self, pt1, pt2, pt3):
if not self.sawMoveTo:
self.rMoveTo((0, 0))
nextPoint = self._nextPoint
self.pen.curveTo(nextPoint(pt1), nextPoint(pt2), nextPoint(pt3))
def closePath(self):
if self.sawMoveTo:
self.pen.closePath()
self.sawMoveTo = 0
def endPath(self):
# In T2 there are no open paths, so always do a closePath when
# finishing a sub path.
self.closePath()
def popallWidth(self, evenOdd=0):
args = self.popall()
if not self.gotWidth:
if evenOdd ^ (len(args) % 2):
self.width = self.nominalWidthX + args[0]
args = args[1:]
else:
self.width = self.defaultWidthX
self.gotWidth = 1
return args
def countHints(self):
args = self.popallWidth()
self.hintCount = self.hintCount + len(args) // 2
#
# hint operators
#
#def op_hstem(self, index):
# self.countHints()
#def op_vstem(self, index):
# self.countHints()
#def op_hstemhm(self, index):
# self.countHints()
#def op_vstemhm(self, index):
# self.countHints()
#def op_hintmask(self, index):
# self.countHints()
#def op_cntrmask(self, index):
# self.countHints()
#
# path constructors, moveto
#
def op_rmoveto(self, index):
self.endPath()
self.rMoveTo(self.popallWidth())
def op_hmoveto(self, index):
self.endPath()
self.rMoveTo((self.popallWidth(1)[0], 0))
def op_vmoveto(self, index):
self.endPath()
self.rMoveTo((0, self.popallWidth(1)[0]))
def op_endchar(self, index):
self.endPath()
args = self.popallWidth()
if args:
from fontTools.encodings.StandardEncoding import StandardEncoding
# endchar can do seac accent bulding; The T2 spec says it's deprecated,
# but recent software that shall remain nameless does output it.
adx, ady, bchar, achar = args
baseGlyph = StandardEncoding[bchar]
self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0))
accentGlyph = StandardEncoding[achar]
self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady))
#
# path constructors, lines
#
def op_rlineto(self, index):
args = self.popall()
for i in range(0, len(args), 2):
point = args[i:i+2]
self.rLineTo(point)
def op_hlineto(self, index):
self.alternatingLineto(1)
def op_vlineto(self, index):
self.alternatingLineto(0)
#
# path constructors, curves
#
def op_rrcurveto(self, index):
"""{dxa dya dxb dyb dxc dyc}+ rrcurveto"""
args = self.popall()
for i in range(0, len(args), 6):
dxa, dya, dxb, dyb, dxc, dyc, = args[i:i+6]
self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc))
def op_rcurveline(self, index):
"""{dxa dya dxb dyb dxc dyc}+ dxd dyd rcurveline"""
args = self.popall()
for i in range(0, len(args)-2, 6):
dxb, dyb, dxc, dyc, dxd, dyd = args[i:i+6]
self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd))
self.rLineTo(args[-2:])
def op_rlinecurve(self, index):
"""{dxa dya}+ dxb dyb dxc dyc dxd dyd rlinecurve"""
args = self.popall()
lineArgs = args[:-6]
for i in range(0, len(lineArgs), 2):
self.rLineTo(lineArgs[i:i+2])
dxb, dyb, dxc, dyc, dxd, dyd = args[-6:]
self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd))
def op_vvcurveto(self, index):
"dx1? {dya dxb dyb dyc}+ vvcurveto"
args = self.popall()
if len(args) % 2:
dx1 = args[0]
args = args[1:]
else:
dx1 = 0
for i in range(0, len(args), 4):
dya, dxb, dyb, dyc = args[i:i+4]
self.rCurveTo((dx1, dya), (dxb, dyb), (0, dyc))
dx1 = 0
def op_hhcurveto(self, index):
"""dy1? {dxa dxb dyb dxc}+ hhcurveto"""
args = self.popall()
if len(args) % 2:
dy1 = args[0]
args = args[1:]
else:
dy1 = 0
for i in range(0, len(args), 4):
dxa, dxb, dyb, dxc = args[i:i+4]
self.rCurveTo((dxa, dy1), (dxb, dyb), (dxc, 0))
dy1 = 0
def op_vhcurveto(self, index):
"""dy1 dx2 dy2 dx3 {dxa dxb dyb dyc dyd dxe dye dxf}* dyf? vhcurveto (30)
{dya dxb dyb dxc dxd dxe dye dyf}+ dxf? vhcurveto
"""
args = self.popall()
while args:
args = self.vcurveto(args)
if args:
args = self.hcurveto(args)
def op_hvcurveto(self, index):
"""dx1 dx2 dy2 dy3 {dya dxb dyb dxc dxd dxe dye dyf}* dxf?
{dxa dxb dyb dyc dyd dxe dye dxf}+ dyf?
"""
args = self.popall()
while args:
args = self.hcurveto(args)
if args:
args = self.vcurveto(args)
#
# path constructors, flex
#
def op_hflex(self, index):
dx1, dx2, dy2, dx3, dx4, dx5, dx6 = self.popall()
dy1 = dy3 = dy4 = dy6 = 0
dy5 = -dy2
self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
def op_flex(self, index):
dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, dx6, dy6, fd = self.popall()
self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
def op_hflex1(self, index):
dx1, dy1, dx2, dy2, dx3, dx4, dx5, dy5, dx6 = self.popall()
dy3 = dy4 = 0
dy6 = -(dy1 + dy2 + dy3 + dy4 + dy5)
self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
def op_flex1(self, index):
dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, d6 = self.popall()
dx = dx1 + dx2 + dx3 + dx4 + dx5
dy = dy1 + dy2 + dy3 + dy4 + dy5
if abs(dx) > abs(dy):
dx6 = d6
dy6 = -dy
else:
dx6 = -dx
dy6 = d6
self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
#
# MultipleMaster. Well...
#
def op_blend(self, index):
self.popall()
# misc
def op_and(self, index):
raise NotImplementedError
def op_or(self, index):
raise NotImplementedError
def op_not(self, index):
raise NotImplementedError
def op_store(self, index):
raise NotImplementedError
def op_abs(self, index):
raise NotImplementedError
def op_add(self, index):
raise NotImplementedError
def op_sub(self, index):
raise NotImplementedError
def op_div(self, index):
num2 = self.pop()
num1 = self.pop()
d1 = num1//num2
d2 = num1/num2
if d1 == d2:
self.push(d1)
else:
self.push(d2)
def op_load(self, index):
raise NotImplementedError
def op_neg(self, index):
raise NotImplementedError
def op_eq(self, index):
raise NotImplementedError
def op_drop(self, index):
raise NotImplementedError
def op_put(self, index):
raise NotImplementedError
def op_get(self, index):
raise NotImplementedError
def op_ifelse(self, index):
raise NotImplementedError
def op_random(self, index):
raise NotImplementedError
def op_mul(self, index):
raise NotImplementedError
def op_sqrt(self, index):
raise NotImplementedError
def op_dup(self, index):
raise NotImplementedError
def op_exch(self, index):
raise NotImplementedError
def op_index(self, index):
raise NotImplementedError
def op_roll(self, index):
raise NotImplementedError
#
# miscellaneous helpers
#
def alternatingLineto(self, isHorizontal):
args = self.popall()
for arg in args:
if isHorizontal:
point = (arg, 0)
else:
point = (0, arg)
self.rLineTo(point)
isHorizontal = not isHorizontal
def vcurveto(self, args):
dya, dxb, dyb, dxc = args[:4]
args = args[4:]
if len(args) == 1:
dyc = args[0]
args = []
else:
dyc = 0
self.rCurveTo((0, dya), (dxb, dyb), (dxc, dyc))
return args
def hcurveto(self, args):
dxa, dxb, dyb, dyc = args[:4]
args = args[4:]
if len(args) == 1:
dxc = args[0]
args = []
else:
dxc = 0
self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc))
return args
class T1OutlineExtractor(T2OutlineExtractor):
def __init__(self, pen, subrs):
self.pen = pen
self.subrs = subrs
self.reset()
def reset(self):
self.flexing = 0
self.width = 0
self.sbx = 0
T2OutlineExtractor.reset(self)
def endPath(self):
if self.sawMoveTo:
self.pen.endPath()
self.sawMoveTo = 0
def popallWidth(self, evenOdd=0):
return self.popall()
def exch(self):
stack = self.operandStack
stack[-1], stack[-2] = stack[-2], stack[-1]
#
# path constructors
#
def op_rmoveto(self, index):
if self.flexing:
return
self.endPath()
self.rMoveTo(self.popall())
def op_hmoveto(self, index):
if self.flexing:
# We must add a parameter to the stack if we are flexing
self.push(0)
return
self.endPath()
self.rMoveTo((self.popall()[0], 0))
def op_vmoveto(self, index):
if self.flexing:
# We must add a parameter to the stack if we are flexing
self.push(0)
self.exch()
return
self.endPath()
self.rMoveTo((0, self.popall()[0]))
def op_closepath(self, index):
self.closePath()
def op_setcurrentpoint(self, index):
args = self.popall()
x, y = args
self.currentPoint = x, y
def op_endchar(self, index):
self.endPath()
def op_hsbw(self, index):
sbx, wx = self.popall()
self.width = wx
self.sbx = sbx
self.currentPoint = sbx, self.currentPoint[1]
def op_sbw(self, index):
self.popall() # XXX
#
def op_callsubr(self, index):
subrIndex = self.pop()
subr = self.subrs[subrIndex]
self.execute(subr)
def op_callothersubr(self, index):
subrIndex = self.pop()
nArgs = self.pop()
#print nArgs, subrIndex, "callothersubr"
if subrIndex == 0 and nArgs == 3:
self.doFlex()
self.flexing = 0
elif subrIndex == 1 and nArgs == 0:
self.flexing = 1
# ignore...
def op_pop(self, index):
pass # ignore...
def doFlex(self):
finaly = self.pop()
finalx = self.pop()
self.pop() # flex height is unused
p3y = self.pop()
p3x = self.pop()
bcp4y = self.pop()
bcp4x = self.pop()
bcp3y = self.pop()
bcp3x = self.pop()
p2y = self.pop()
p2x = self.pop()
bcp2y = self.pop()
bcp2x = self.pop()
bcp1y = self.pop()
bcp1x = self.pop()
rpy = self.pop()
rpx = self.pop()
# call rrcurveto
self.push(bcp1x+rpx)
self.push(bcp1y+rpy)
self.push(bcp2x)
self.push(bcp2y)
self.push(p2x)
self.push(p2y)
self.op_rrcurveto(None)
# call rrcurveto
self.push(bcp3x)
self.push(bcp3y)
self.push(bcp4x)
self.push(bcp4y)
self.push(p3x)
self.push(p3y)
self.op_rrcurveto(None)
# Push back final coords so subr 0 can find them
self.push(finalx)
self.push(finaly)
def op_dotsection(self, index):
self.popall() # XXX
def op_hstem3(self, index):
self.popall() # XXX
def op_seac(self, index):
"asb adx ady bchar achar seac"
from fontTools.encodings.StandardEncoding import StandardEncoding
asb, adx, ady, bchar, achar = self.popall()
baseGlyph = StandardEncoding[bchar]
self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0))
accentGlyph = StandardEncoding[achar]
adx = adx + self.sbx - asb # seac weirdness
self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady))
def op_vstem3(self, index):
self.popall() # XXX
class DictDecompiler(ByteCodeBase):
operandEncoding = cffDictOperandEncoding
def __init__(self, strings):
self.stack = []
self.strings = strings
self.dict = {}
def getDict(self):
assert len(self.stack) == 0, "non-empty stack"
return self.dict
def decompile(self, data):
index = 0
lenData = len(data)
push = self.stack.append
while index < lenData:
b0 = byteord(data[index])
index = index + 1
code = self.operandEncoding[b0]
handler = getattr(self, code)
value, index = handler(b0, data, index)
if value is not None:
push(value)
def pop(self):
value = self.stack[-1]
del self.stack[-1]
return value
def popall(self):
args = self.stack[:]
del self.stack[:]
return args
def do_operator(self, b0, data, index):
if b0 == 12:
op = (b0, byteord(data[index]))
index = index+1
else:
op = b0
operator, argType = self.operators[op]
self.handle_operator(operator, argType)
return None, index
def handle_operator(self, operator, argType):
if isinstance(argType, type(())):
value = ()
for i in range(len(argType)-1, -1, -1):
arg = argType[i]
arghandler = getattr(self, "arg_" + arg)
value = (arghandler(operator),) + value
else:
arghandler = getattr(self, "arg_" + argType)
value = arghandler(operator)
self.dict[operator] = value
def arg_number(self, name):
return self.pop()
def arg_SID(self, name):
return self.strings[self.pop()]
def arg_array(self, name):
return self.popall()
def arg_delta(self, name):
out = []
current = 0
for v in self.popall():
current = current + v
out.append(current)
return out
def calcSubrBias(subrs):
nSubrs = len(subrs)
if nSubrs < 1240:
bias = 107
elif nSubrs < 33900:
bias = 1131
else:
bias = 32768
return bias
| {
"content_hash": "543df5150e05dd9e6bdd0f3d33d08349",
"timestamp": "",
"source": "github",
"line_count": 1177,
"max_line_length": 82,
"avg_line_length": 25.074766355140188,
"alnum_prop": 0.6443262291193711,
"repo_name": "googlei18n/fontuley",
"id": "6ffdb994a5b5e6cc650550422e618581217c2115",
"size": "29513",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/third_party/fontTools/Lib/fontTools/misc/psCharStrings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "242"
},
{
"name": "JavaScript",
"bytes": "109257"
},
{
"name": "Python",
"bytes": "857727"
},
{
"name": "Shell",
"bytes": "564"
}
],
"symlink_target": ""
} |
import copy
try:
import json
except ImportError:
import simplejson as json
import geolocation
import geolocation.abstract as abstract
class CodecError(StandardError):
"""Base exception class for codec errors"""
pass
class UnknownCodec(CodecError):
"""Failed to find any codec for the given format"""
pass
class Codec(object):
"""Abstract codec"""
formats = []
"""List of formats this codec supports"""
def __init__(self, variant):
"""Create a variant of the codec"""
if variant not in self.formats:
raise CodecError('%r not supported by %r (supported: %s)' % \
(variant, self, ', '.join(self.formats)))
self.variant = variant
def decode(self, data):
"""Decode data"""
raise NotImplementedError()
def encode(self, data):
"""Encode data"""
raise NotImplementedError()
class CodecFactory(abstract.Factory):
@classmethod
def register(cls, codec, formats=None):
if formats is None:
formats = codec.formats
elif not isinstance(formats, (list, tuple, set)):
formats = [formats]
for fmt in formats:
super(CodecFactory, cls).register(fmt, codec)
@classmethod
def create(cls, variant, *args, **kwargs):
try:
codec = super(CodecFactory, cls).create(variant, variant, *args, **kwargs)
except abstract.UnknownVariant:
return UnknownCodec('Failed to find a codec to handle "%s" (available: %s)' % \
(variant, ', '.join(cls.creatables())))
return codec
class GeolocationDecodeError(CodecError, geolocation.GeolocationError):
pass
class GeolocationCodec(Codec):
pass
class GeolocationCodecs(CodecFactory):
"""Geolocation Codecs
>>> json = '{"foo": ["bar", false, 1.3]}'
>>> data = {u'foo': [u'bar', False, 1.3]}
>>> codec = GeolocationCodec.create('json')
>>> codec.decode(json) == data
True
>>> codec.encode(data) == json
True
"""
pass
class GeolocationJsonCodec(GeolocationCodec):
formats = ['json']
def decode(self, data):
return json.loads(data)
def encode(self, data):
return json.dumps(data)
def setup(codecs, registry=None):
if not registry:
registry = GeolocationCodecs()
if isinstance(codecs, type(registry)):
return copy(codecs)
if isinstance(codecs, dict):
for variant, codec in codecs.iteritems():
registry.register(codec, [variant])
else:
for codec in codecs:
registry.register(codec)
return registry
setup([GeolocationJsonCodec], GeolocationCodecs)
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"content_hash": "6890423eab9b1803029371c73ab8e8cd",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 91,
"avg_line_length": 23.75423728813559,
"alnum_prop": 0.6086336068498038,
"repo_name": "op/geolocation",
"id": "4729beaa53de3c4356de7b6b5a8e6511aa8a8bc6",
"size": "2864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geolocation/codecs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20407"
}
],
"symlink_target": ""
} |
from flask import request, render_template, redirect, url_for, jsonify
from werkzeug.utils import secure_filename
from app import multiauth, errHandler, models, mysqlhandler, aws_s3Handler
from . import firmware
from app import mysql
runMySQL = mysqlhandler.MySQLHandler(mysql)
@firmware.route('/firmware')
def index():
return render_template('upload_firmware.html')
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ['zip', 'gz']
@firmware.route('/upload_firmware', methods=['GET','POST'])
def upload_firmware():
for f in request.files.getlist("file"):
if allowed_file(f.filename):
filename = secure_filename(f.filename)
res = aws_s3Handler.upload_data('Firmware/' + filename, f)
if res:
url = aws_s3Handler.get_url('Firmware/' + filename)
runMySQL.sqlquery('''INSERT INTO firmware(ver, s3_path) VALUES(%s, "%s")''' %(int(filename.rsplit('.', 1)[0]), url.rsplit('?',1)[0] ), sqlcommit=1)
else:
return jsonify({"Failed": "file type does not be allowed to upload"})
return jsonify({"file saved": url})
else:
return jsonify({"Failed": "file type does not be allowed to upload"})
@firmware.route('/api/get_firmware', methods=['POST'])
@multiauth.login_required
def get_firmware():
user_account = request.json.get('userid')
header_token = request.headers.get('Authorization').split(' ')[1]
status = models.check_userid_and_token(user_account, header_token)
if status:
return status
else:
rv = runMySQL.sqlquery('''SELECT ver, s3_path FROM firmware ORDER BY ver DESC limit 1''')
if rv is None:
return jsonify({"errcode": 400, "respmsg": errHandler.get_errmsg(400)})
return jsonify({"errcode": 200, "respmsg": errHandler.get_errmsg(200), "version": rv[0], "url": rv[1]})
| {
"content_hash": "2e884d72ec97ed9c031dc476bc5f143d",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 151,
"avg_line_length": 32.236363636363635,
"alnum_prop": 0.6959954878736605,
"repo_name": "flyfax/huawei_metis",
"id": "e8440c601bec22566ee7af73fb2d4eb9496027bf",
"size": "1797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/tcm/firmware_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "440"
},
{
"name": "HTML",
"bytes": "2581"
},
{
"name": "PLpgSQL",
"bytes": "287103"
},
{
"name": "Python",
"bytes": "174845"
},
{
"name": "SQLPL",
"bytes": "13153"
}
],
"symlink_target": ""
} |
from pyrates.utility.grid_search import ClusterWorkerTemplate
import os
from pandas import DataFrame
from pyrates.utility import grid_search, welch
import numpy as np
from copy import deepcopy
class MinimalWorker(ClusterWorkerTemplate):
def worker_postprocessing(self, **kwargs):
self.processed_results = DataFrame(data=None, columns=self.results.columns)
for idx, data in self.results.iteritems():
self.processed_results.loc[:, idx] = data * 1e3
self.processed_results.index = self.results.index * 1e-3
class ExtendedWorker(MinimalWorker):
def worker_gs(self, *args, **kwargs):
kwargs_tmp = deepcopy(kwargs)
param_grid = kwargs_tmp.pop('param_grid')
r, self.result_map, sim_time = grid_search(*args, param_grid=param_grid, **kwargs_tmp)
r = r.droplevel(2, axis=1)
self.results = r
return sim_time
def worker_postprocessing(self, **kwargs):
kwargs_tmp = kwargs.copy()
param_grid = kwargs_tmp.pop('param_grid')
targets = kwargs_tmp.pop('y')
self.processed_results = DataFrame(data=None, columns=['fitness', 'r_e', 'r_i'])
# calculate fitness
for gene_id in param_grid.index:
r = self.results
r = r * 1e3
r.index = r.index * 1e-3
cutoff = r.index[-1]*0.5
mean_re = np.mean(r['r_e'][f'circuit_{gene_id}'].loc[cutoff:])
mean_ri = np.mean(r['r_i'][f'circuit_{gene_id}'].loc[cutoff:])
outputs = [mean_re, mean_ri]
dist1 = fitness(outputs, targets)
dist2 = np.var(r['r_i'][f'circuit_{gene_id}'].loc[cutoff:])
r = self.results
self.processed_results.loc[gene_id, 'fitness'] = dist1+dist2
self.processed_results.loc[gene_id, 'r_e'] = np.mean(r['r_e'][f'circuit_{gene_id}'].loc[cutoff:])*1e3
self.processed_results.loc[gene_id, 'r_i'] = np.mean(r['r_i'][f'circuit_{gene_id}'].loc[cutoff:])*1e3
def fitness(y, t):
t = np.asarray(t)
weights = t/sum(t)
y = np.asarray(y).flatten()
t = np.asarray(t).flatten()
diff = np.asarray([0.0 if np.isnan(t_tmp) else y_tmp - t_tmp for y_tmp, t_tmp in zip(y, t)])
return np.sqrt(weights @ diff**2)
if __name__ == "__main__":
cgs_worker = ExtendedWorker()
cgs_worker.worker_init()
#cgs_worker.worker_init(
# config_file="/nobackup/spanien1/rgast/PycharmProjects/BrainNetworks/BasalGanglia/stn_gpe_simple_opt/Config/DefaultConfig_0.yaml",
# subgrid="/nobackup/spanien1/rgast/PycharmProjects/BrainNetworks/BasalGanglia/stn_gpe_simple_opt/Grids/Subgrids/DefaultGrid_43/spanien/spanien_Subgrid_0.h5",
# result_file="~/my_result.h5",
# build_dir=os.getcwd()
#)
| {
"content_hash": "dff99698dc921a0a4b06bb315ea76e16",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 165,
"avg_line_length": 41.17910447761194,
"alnum_prop": 0.6219644798840159,
"repo_name": "Richert/BrainNetworks",
"id": "0846122cfd534c1bee41c641c90d7d2a974cea7a",
"size": "2778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BasalGanglia/stn_gpe_simple_cfit_worker.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Fortran",
"bytes": "9380"
},
{
"name": "Python",
"bytes": "397632"
}
],
"symlink_target": ""
} |
class LinkedListNode:
def __init__(self, value):
self.value = value
self.next = None
a = LinkedListNode("Angel Food")
b = LinkedListNode("Bundt")
c = LinkedListNode("Cheese")
d = LinkedListNode("Devil's Food")
e = LinkedListNode("Eccles")
a.next = b
b.next = c
c.next = d
d.next = e
def kth_to_last_node(k, head):
if k < 1:
raise ValueError('Impossible to find less than first to last node: %s' % k)
left_node = head
right_node = head
# move right_node to the kth node
for _ in xrange(k - 1):
# but along the way, if a right_node doesn't have a next,
# then k is greater than the length of the list and there
# can't be a kth-to-last node! we'll raise an error
if not right_node.next:
raise ValueError('k is larger than the length of the linked list: %s' % k)
right_node = right_node.next
# starting with left_node on the head,
# move left_node and right_node down the list,
# maintaining a distance of k between them,
# until right_node hits the end of the list
while right_node.next:
left_node = left_node.next
right_node = right_node.next
# since left_node is k nodes behind right_node,
# left_node is now the kth to last node!
return left_node
print kth_to_last_node(1, a).value
# def kth_to_last_node(k, head):
# front = head
# for i in range(k):
# if front is None:
# raise ValueError("k cannot be larger than length of linked list")
# front = front.next
#
# back = head
# while front is not None:
# back = back.next
# front = front.next
# return back
#
#
# print kth_to_last_node(3, a).value
| {
"content_hash": "dd36337d4f2a2e0cb1d6ceb8774d0ee1",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 86,
"avg_line_length": 25.80597014925373,
"alnum_prop": 0.6124927703875073,
"repo_name": "howardwkim/ctci",
"id": "5bd294830f1e98bc1c37d79e4f47cbea00e48f4c",
"size": "1729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cake/p25_kth_last_node.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "42659"
}
],
"symlink_target": ""
} |
"""
WSGI config for FestPal_server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "FestPal_server.settings")
application = get_wsgi_application()
| {
"content_hash": "bf241fcb15c676a911259ae2e95b495b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.3125,
"alnum_prop": 0.7728395061728395,
"repo_name": "amentis/FestPal-Server",
"id": "95db95f63e4ab3c0bb279767ad8ed18825de9600",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FestPal_server/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "290"
},
{
"name": "Python",
"bytes": "99612"
}
],
"symlink_target": ""
} |
import pytest
from browserstacker import ScreenShotsAPI
from .constants import BROWSERSTACK_USER, BROWSERSTACK_KEY
@pytest.yield_fixture()
def screenshoter(request):
"""
Instance of ScreenShotsAPI, parametrized with desired capabilities.
"""
if hasattr(request, 'param'):
browser = request.param
else:
browser = None
api = ScreenShotsAPI(BROWSERSTACK_USER, BROWSERSTACK_KEY, default_browser=browser)
yield api
api.session.close()
| {
"content_hash": "f0f7cbeb143c29e0958f24c390a232c6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 86,
"avg_line_length": 26.72222222222222,
"alnum_prop": 0.7193347193347194,
"repo_name": "Stranger6667/pytest-browserstack",
"id": "4129fdcc06d5239b67d23676a19d84ecd109b7f8",
"size": "497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytest_browserstack/screenshots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1207"
},
{
"name": "Python",
"bytes": "24355"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolar.marker.colorbar"
_path_str = "scatterpolar.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolar.m
arker.colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolar.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.marker.colorbar.Tickformatstop`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
_v = dtickrange if dtickrange is not None else _v
if _v is not None:
self["dtickrange"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "de4b39eba160c58a0e42745363485570",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 88,
"avg_line_length": 33.91166077738516,
"alnum_prop": 0.5645514223194749,
"repo_name": "plotly/plotly.py",
"id": "61eeba82553ab1f2d61924463d86311076ac073e",
"size": "9597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/scatterpolar/marker/colorbar/_tickformatstop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
class BuscacursoscrawlerPipeline(object):
def process_item(self, item, spider):
return item
| {
"content_hash": "2051473749a96720991539103db24c7d",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.7211538461538461,
"repo_name": "rpalmaotero/buscacursos-crawler",
"id": "091512babbedc958064a2d308632f53d9ed765c1",
"size": "298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "buscacursoscrawler/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6831"
}
],
"symlink_target": ""
} |
"""POSIX specific tests. These are implicitly run by test_psutil.py."""
import unittest
import subprocess
import time
import sys
import os
import datetime
import psutil
from test_psutil import (get_test_subprocess, reap_children, PYTHON, LINUX, OSX,
ignore_access_denied, sh)
def ps(cmd):
"""Expects a ps command with a -o argument and parse the result
returning only the value of interest.
"""
if not LINUX:
cmd = cmd.replace(" --no-headers ", " ")
p = subprocess.Popen(cmd, shell=1, stdout=subprocess.PIPE)
output = p.communicate()[0].strip()
if sys.version_info >= (3,):
output = str(output, sys.stdout.encoding)
if not LINUX:
output = output.split('\n')[1]
try:
return int(output)
except ValueError:
return output
class PosixSpecificTestCase(unittest.TestCase):
"""Compare psutil results against 'ps' command line utility."""
# for ps -o arguments see: http://unixhelp.ed.ac.uk/CGI/man-cgi?ps
def setUp(self):
self.pid = get_test_subprocess([PYTHON, "-E", "-O"],
stdin=subprocess.PIPE).pid
def tearDown(self):
reap_children()
def test_process_parent_pid(self):
ppid_ps = ps("ps --no-headers -o ppid -p %s" %self.pid)
ppid_psutil = psutil.Process(self.pid).ppid
self.assertEqual(ppid_ps, ppid_psutil)
def test_process_uid(self):
uid_ps = ps("ps --no-headers -o uid -p %s" %self.pid)
uid_psutil = psutil.Process(self.pid).uids.real
self.assertEqual(uid_ps, uid_psutil)
def test_process_gid(self):
gid_ps = ps("ps --no-headers -o rgid -p %s" %self.pid)
gid_psutil = psutil.Process(self.pid).gids.real
self.assertEqual(gid_ps, gid_psutil)
def test_process_username(self):
username_ps = ps("ps --no-headers -o user -p %s" %self.pid)
username_psutil = psutil.Process(self.pid).username
self.assertEqual(username_ps, username_psutil)
@ignore_access_denied
def test_process_rss_memory(self):
# give python interpreter some time to properly initialize
# so that the results are the same
time.sleep(0.1)
rss_ps = ps("ps --no-headers -o rss -p %s" %self.pid)
rss_psutil = psutil.Process(self.pid).get_memory_info()[0] / 1024
self.assertEqual(rss_ps, rss_psutil)
@ignore_access_denied
def test_process_vsz_memory(self):
# give python interpreter some time to properly initialize
# so that the results are the same
time.sleep(0.1)
vsz_ps = ps("ps --no-headers -o vsz -p %s" %self.pid)
vsz_psutil = psutil.Process(self.pid).get_memory_info()[1] / 1024
self.assertEqual(vsz_ps, vsz_psutil)
def test_process_name(self):
# use command + arg since "comm" keyword not supported on all platforms
name_ps = ps("ps --no-headers -o command -p %s" %self.pid).split(' ')[0]
# remove path if there is any, from the command
name_ps = os.path.basename(name_ps).lower()
name_psutil = psutil.Process(self.pid).name.lower()
self.assertEqual(name_ps, name_psutil)
def test_process_create_time(self):
time_ps = ps("ps --no-headers -o start -p %s" %self.pid).split(' ')[0]
time_psutil = psutil.Process(self.pid).create_time
time_psutil = datetime.datetime.fromtimestamp(
time_psutil).strftime("%H:%M:%S")
self.assertEqual(time_ps, time_psutil)
def test_process_exe(self):
ps_pathname = ps("ps --no-headers -o command -p %s" %self.pid).split(' ')[0]
psutil_pathname = psutil.Process(self.pid).exe
try:
self.assertEqual(ps_pathname, psutil_pathname)
except AssertionError:
# certain platforms such as BSD are more accurate returning:
# "/usr/local/bin/python2.7"
# ...instead of:
# "/usr/local/bin/python"
# We do not want to consider this difference in accuracy
# an error.
adjusted_ps_pathname = ps_pathname[:len(ps_pathname)]
self.assertEqual(ps_pathname, adjusted_ps_pathname)
def test_process_cmdline(self):
ps_cmdline = ps("ps --no-headers -o command -p %s" %self.pid)
psutil_cmdline = " ".join(psutil.Process(self.pid).cmdline)
self.assertEqual(ps_cmdline, psutil_cmdline)
def test_get_pids(self):
# Note: this test might fail if the OS is starting/killing
# other processes in the meantime
p = get_test_subprocess(["ps", "ax", "-o", "pid"], stdout=subprocess.PIPE)
output = p.communicate()[0].strip()
if sys.version_info >= (3,):
output = str(output, sys.stdout.encoding)
output = output.replace('PID', '')
p.wait()
pids_ps = []
for pid in output.split('\n'):
if pid:
pids_ps.append(int(pid.strip()))
# remove ps subprocess pid which is supposed to be dead in meantime
pids_ps.remove(p.pid)
pids_psutil = psutil.get_pid_list()
pids_ps.sort()
pids_psutil.sort()
# on OSX ps doesn't show pid 0
if OSX and 0 not in pids_ps:
pids_ps.insert(0, 0)
if pids_ps != pids_psutil:
difference = [x for x in pids_psutil if x not in pids_ps] + \
[x for x in pids_ps if x not in pids_psutil]
self.fail("difference: " + str(difference))
def test_nic_names(self):
p = subprocess.Popen("ifconfig -a", shell=1, stdout=subprocess.PIPE)
output = p.communicate()[0].strip()
if sys.version_info >= (3,):
output = str(output, sys.stdout.encoding)
for nic in psutil.network_io_counters(pernic=True).keys():
for line in output.split():
if line.startswith(nic):
break
else:
self.fail("couldn't find %s nic in 'ifconfig -a' output" % nic)
def test_get_users(self):
out = sh("who")
lines = out.split('\n')
users = [x.split()[0] for x in lines]
self.assertEqual(len(users), len(psutil.get_users()))
terminals = [os.path.join("/dev", x.split()[1]) for x in lines]
for u in psutil.get_users():
self.assertTrue(u.name in users, u.name)
self.assertTrue(u.terminal in terminals, u.terminal)
if __name__ == '__main__':
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(PosixSpecificTestCase))
unittest.TextTestRunner(verbosity=2).run(test_suite)
| {
"content_hash": "a3ad40d93ce446f9b3d815e63fb394f4",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 84,
"avg_line_length": 38.566473988439306,
"alnum_prop": 0.595773381294964,
"repo_name": "tamentis/psutil",
"id": "4f6eae690cbf5c7a13c8925d10e3e83bd58c1cf2",
"size": "6881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/_posix.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "231150"
},
{
"name": "Python",
"bytes": "247951"
}
],
"symlink_target": ""
} |
"""
Testing the elmr.utils module
"""
##########################################################################
## Imports
##########################################################################
import unittest
from dateutil.tz import tzutc
from elmr.utils import classproperty
from datetime import datetime, timedelta
from elmr.utils import utcnow, months_since, months_between
##########################################################################
## Time Helper Tests
##########################################################################
class TimeHelperTests(unittest.TestCase):
def test_utcnow(self):
"""
Assert that timezone aware datetime is returned from utcnow
"""
dt = utcnow()
self.assertIsNotNone(dt.tzinfo)
self.assertEqual(dt.tzname(), 'UTC')
def test_months_between(self):
"""
Test the months between helper
"""
dta = datetime(2015, 4, 15, 12, 0, 0)
dtb = dta + timedelta(days=29)
self.assertEqual(months_between(dta, dtb), 0)
dtb = dta + timedelta(days=30)
self.assertEqual(months_between(dta, dtb), 1)
dta = datetime(2015, 5, 15, 12, 0, 0)
dtb = dta + timedelta(days=30)
self.assertEqual(months_between(dta, dtb), 0)
dtb = dta + timedelta(days=31)
self.assertEqual(months_between(dta, dtb), 1)
dta = datetime(2015, 2, 14, 12, 0, 0)
dtb = dta + timedelta(days=27)
self.assertEqual(months_between(dta, dtb), 0)
dtb = dta + timedelta(days=28)
self.assertEqual(months_between(dta, dtb), 1)
dta = datetime(2015, 3, 15, 12, 0, 0)
dtb = dta + timedelta(days=400)
self.assertEqual(months_between(dta, dtb), 13)
dta = datetime(2015, 3, 15, 12, 0, 0)
dtb = dta + timedelta(days=1000)
self.assertEqual(months_between(dta, dtb), 32)
def test_months_between_tz_aware(self):
"""
Test the months between helper with timezones
"""
dta = datetime(2015, 4, 15, 12, 0, 0)
dta.replace(tzinfo=tzutc())
dtb = dta + timedelta(days=29)
self.assertEqual(months_between(dta, dtb), 0)
dtb = dta + timedelta(days=30)
self.assertEqual(months_between(dta, dtb), 1)
dtb = dta + timedelta(days=91)
self.assertEqual(months_between(dta, dtb), 3)
def test_months_since(self):
"""
Test timezone aware months since
"""
dt = utcnow() - timedelta(days=235)
self.assertEqual(months_since(dt), 7)
##########################################################################
## Descriptor and Decorator Tests
##########################################################################
class DecoratorTests(unittest.TestCase):
class Bar(object):
_bar = 42
@classproperty
def bar(cls):
return cls._bar
def test_get_classproperty(self):
"""
Check that the class property works correctly
"""
self.assertEqual(self.Bar.bar, self.Bar._bar)
| {
"content_hash": "1b73ccec02b30b72505239f6affaf3f0",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 74,
"avg_line_length": 28.522935779816514,
"alnum_prop": 0.508201994210357,
"repo_name": "bbengfort/jobs-report",
"id": "1fef334207449111e32c8070bf64dd18f457874f",
"size": "3394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "82716"
},
{
"name": "HTML",
"bytes": "45222"
},
{
"name": "JavaScript",
"bytes": "32869"
},
{
"name": "Makefile",
"bytes": "818"
},
{
"name": "PLpgSQL",
"bytes": "324502"
},
{
"name": "Python",
"bytes": "121814"
}
],
"symlink_target": ""
} |
__all__ = ["data_sunspot", "data_synthetic", "data_random"] | {
"content_hash": "9e79d150968cb0dcd933156775180573",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 59,
"avg_line_length": 59,
"alnum_prop": 0.6271186440677966,
"repo_name": "aaskov/nsp",
"id": "e0b2a3b597144b956472550ebc46f75506d6575b",
"size": "59",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39972"
}
],
"symlink_target": ""
} |
import contextlib
from neutron_lib import constants as n_consts
import webob.exc
from neutron.api import extensions
from neutron.common import config
from neutron import context
import neutron.extensions
from neutron.extensions import metering
from neutron.plugins.common import constants
from neutron.services.metering import metering_plugin
from neutron.tests.unit.db import test_db_base_plugin_v2
DB_METERING_PLUGIN_KLASS = (
"neutron.services.metering."
"metering_plugin.MeteringPlugin"
)
extensions_path = ':'.join(neutron.extensions.__path__)
class MeteringPluginDbTestCaseMixin(object):
def _create_metering_label(self, fmt, name, description, **kwargs):
data = {'metering_label': {'name': name,
'tenant_id': kwargs.get('tenant_id',
'test-tenant'),
'shared': kwargs.get('shared', False),
'description': description}}
req = self.new_create_request('metering-labels', data,
fmt)
if kwargs.get('set_context') and 'tenant_id' in kwargs:
# create a specific auth context for this request
req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id'],
is_admin=kwargs.get('is_admin', True)))
return req.get_response(self.ext_api)
def _make_metering_label(self, fmt, name, description, **kwargs):
res = self._create_metering_label(fmt, name, description, **kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _create_metering_label_rule(self, fmt, metering_label_id, direction,
remote_ip_prefix, excluded, **kwargs):
data = {'metering_label_rule':
{'metering_label_id': metering_label_id,
'tenant_id': kwargs.get('tenant_id', 'test-tenant'),
'direction': direction,
'excluded': excluded,
'remote_ip_prefix': remote_ip_prefix}}
req = self.new_create_request('metering-label-rules',
data, fmt)
if kwargs.get('set_context') and 'tenant_id' in kwargs:
# create a specific auth context for this request
req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return req.get_response(self.ext_api)
def _make_metering_label_rule(self, fmt, metering_label_id, direction,
remote_ip_prefix, excluded, **kwargs):
res = self._create_metering_label_rule(fmt, metering_label_id,
direction, remote_ip_prefix,
excluded, **kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def metering_label(self, name='label', description='desc',
fmt=None, **kwargs):
if not fmt:
fmt = self.fmt
metering_label = self._make_metering_label(fmt, name,
description, **kwargs)
yield metering_label
@contextlib.contextmanager
def metering_label_rule(self, metering_label_id=None, direction='ingress',
remote_ip_prefix='10.0.0.0/24',
excluded='false', fmt=None):
if not fmt:
fmt = self.fmt
metering_label_rule = self._make_metering_label_rule(fmt,
metering_label_id,
direction,
remote_ip_prefix,
excluded)
yield metering_label_rule
class MeteringPluginDbTestCase(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
MeteringPluginDbTestCaseMixin):
fmt = 'json'
resource_prefix_map = dict(
(k.replace('_', '-'), "/metering")
for k in metering.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self, plugin=None):
service_plugins = {'metering_plugin_name': DB_METERING_PLUGIN_KLASS}
super(MeteringPluginDbTestCase, self).setUp(
plugin=plugin,
service_plugins=service_plugins
)
self.plugin = metering_plugin.MeteringPlugin()
ext_mgr = extensions.PluginAwareExtensionManager(
extensions_path,
{constants.METERING: self.plugin}
)
app = config.load_paste_app('extensions_test_app')
self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
class TestMetering(MeteringPluginDbTestCase):
def test_create_metering_label(self):
name = 'my label'
description = 'my metering label'
keys = [('name', name,), ('description', description)]
with self.metering_label(name, description) as metering_label:
for k, v, in keys:
self.assertEqual(metering_label['metering_label'][k], v)
def test_create_metering_label_shared(self):
name = 'my label'
description = 'my metering label'
shared = True
keys = [('name', name,), ('description', description),
('shared', shared)]
with self.metering_label(name, description,
shared=shared) as metering_label:
for k, v, in keys:
self.assertEqual(metering_label['metering_label'][k], v)
def test_update_metering_label(self):
name = 'my label'
description = 'my metering label'
data = {'metering_label': {}}
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
self._update('metering-labels', metering_label_id, data,
webob.exc.HTTPNotImplemented.code)
def test_delete_metering_label(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
self._delete('metering-labels', metering_label_id, 204)
def test_list_metering_label(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as v1,\
self.metering_label(name, description) as v2:
metering_label = (v1, v2)
self._test_list_resources('metering-label', metering_label)
def test_create_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
keys = [('metering_label_id', metering_label_id),
('direction', direction),
('excluded', excluded),
('remote_ip_prefix', remote_ip_prefix)]
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded) as label_rule:
for k, v, in keys:
self.assertEqual(label_rule['metering_label_rule'][k], v)
def test_update_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
data = {'metering_label_rule': {}}
with self.metering_label(name, description) as metering_label, \
self.metering_label_rule(
metering_label['metering_label']['id'],
direction, remote_ip_prefix) as label_rule:
rule_id = label_rule['metering_label_rule']['id']
self._update('metering-label-rules', rule_id, data,
webob.exc.HTTPNotImplemented.code)
def test_delete_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded) as label_rule:
rule_id = label_rule['metering_label_rule']['id']
self._delete('metering-label-rules', rule_id, 204)
def test_list_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded) as v1,\
self.metering_label_rule(metering_label_id,
'ingress',
remote_ip_prefix,
excluded) as v2:
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
metering_label_rule)
def test_create_metering_label_rules(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded) as v1,\
self.metering_label_rule(metering_label_id,
direction,
n_consts.IPv4_ANY,
False) as v2:
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
metering_label_rule)
def test_create_overlap_metering_label_rules(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix1 = '192.168.0.0/24'
remote_ip_prefix2 = '192.168.0.0/16'
excluded = True
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix1,
excluded):
res = self._create_metering_label_rule(self.fmt,
metering_label_id,
direction,
remote_ip_prefix2,
excluded)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
def test_create_metering_label_rule_two_labels(self):
name1 = 'my label 1'
name2 = 'my label 2'
description = 'my metering label'
with self.metering_label(name1, description) as metering_label1:
metering_label_id1 = metering_label1['metering_label']['id']
with self.metering_label(name2, description) as metering_label2:
metering_label_id2 = metering_label2['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with self.metering_label_rule(metering_label_id1,
direction,
remote_ip_prefix,
excluded) as v1,\
self.metering_label_rule(metering_label_id2,
direction,
remote_ip_prefix,
excluded) as v2:
metering_label_rule = (v1, v2)
self._test_list_resources('metering-label-rule',
metering_label_rule)
| {
"content_hash": "c776d175527b1eff694bb7a05da971d2",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 79,
"avg_line_length": 42.5527950310559,
"alnum_prop": 0.5091957378484893,
"repo_name": "igor-toga/local-snat",
"id": "594a3c4a1a0ca9efc718b22aa738e7e313cc6c38",
"size": "14309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/db/metering/test_metering_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9636936"
},
{
"name": "Shell",
"bytes": "14072"
}
],
"symlink_target": ""
} |
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-NetComputer',
'Author': ['@harmj0y'],
'Description': ('Queries the domain for current computer objects. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Return computers with a specific name, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'SPN' : {
'Description' : 'Return computers with a specific service principal name, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'OperatingSystem' : {
'Description' : 'Return computers with a specific operating system, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'Filter' : {
'Description' : 'A customized ldap filter string to use, e.g. "(description=*admin*)"',
'Required' : False,
'Value' : ''
},
'Printers' : {
'Description' : 'Switch. Return only printers.',
'Required' : False,
'Value' : ''
},
'Ping' : {
'Description' : "Switch. Ping each host to ensure it's up before enumerating.",
'Required' : False,
'Value' : ''
},
'Unconstrained' : {
'Description' : "Switch. Return computer objects that have unconstrained delegation.",
'Required' : False,
'Value' : ''
},
'FullData' : {
'Description' : "Switch. Return full computer objects instead of just system names (the default).",
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'The domain to use for the query, defaults to the current domain.',
'Required' : False,
'Value' : ''
},
'DomainController' : {
'Description' : 'Domain controller to reflect LDAP queries through.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(moduleCode, moduleName)
script += moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
| {
"content_hash": "66444749b38749fd85e1480a0f15304d",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 121,
"avg_line_length": 37.588235294117645,
"alnum_prop": 0.4624413145539906,
"repo_name": "cobbr/ObfuscatedEmpire",
"id": "94d2fc1ac48dae3cb553cb17f7818838ad0c37df",
"size": "5112",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/modules/powershell/situational_awareness/network/powerview/get_computer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2198"
},
{
"name": "PowerShell",
"bytes": "16082284"
},
{
"name": "Python",
"bytes": "2724059"
},
{
"name": "Shell",
"bytes": "8067"
}
],
"symlink_target": ""
} |
"""
==================================================
Test Images and Image Creation (:mod:`mango.data`)
==================================================
.. currentmodule:: mango.data
Functions for generating images with content.
Functions
=========
.. autosummary::
:toctree: generated/
fill_annular_circular_cylinder - Voxels which lie inside an annular circular cylinder are assigned a specified fill value.
fill_box - Voxels which lie inside a box are assigned a specified fill value.
fill_circular_cylinder - Voxels which lie inside a circular cylinder are assigned a specified fill value.
fill_ellipsoid - Voxels which lie inside an ellipsoid are assigned a specified fill value.
createCheckerDds - Creates a 3D checker-board :obj:`Dds` image.
chi_squared_noise - Creates a :obj:`mango.Dds` of Chi-Squared distributed noise.
gaussian_noise - Creates a :obj:`mango.Dds` of Gaussian (Normal) distributed noise.
gaussian_noise_like - Creates a :obj:`mango.Dds` of Gaussian (Normal) distributed noise.
"""
from ._factory import *
__all__ = [s for s in dir() if not s.startswith('_')]
| {
"content_hash": "dd52dc338bb5d7714ce949a9c0eead75",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 125,
"avg_line_length": 36.45161290322581,
"alnum_prop": 0.6619469026548672,
"repo_name": "pymango/pymango",
"id": "00d67cc3ded74c1f7cfc44f26d8f68bb9d92ba51",
"size": "1130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/python/mango/data/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CMake",
"bytes": "1621"
},
{
"name": "Python",
"bytes": "652240"
}
],
"symlink_target": ""
} |
"""
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Lars Buitinck
# Simon Wu <s8wu@uwaterloo.ca>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (DataConversionWarning,
check_X_y, NotFittedError)
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual, sample_weight):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver in ['liblinear', 'sag']:
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
if solver == 'liblinear' and sample_weight is not None:
raise ValueError("Solver %s does not support "
"sample weights." % solver)
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual, sample_weight)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set "
"class_weight='balanced'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
# 'auto' is deprecated and will be removed in 0.19
if class_weight in ("auto", "balanced"):
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
lbin = LabelBinarizer()
Y_binarized = lbin.fit_transform(y)
if Y_binarized.shape[1] == 1:
Y_binarized = np.hstack([1 - Y_binarized, Y_binarized])
w0 = np.zeros((Y_binarized.shape[1], n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_binarized
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
warm_start_sag = {'coef': w0}
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, 'log', 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum,
warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual, sample_weight)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'* instead of deprecated *class_weight='auto'*.
max_iter : int
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag* solvers.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual, sample_weight)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
self.n_iter_ = np.array([n_iter_])
return self
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for 'lbfgs' and
'newton-cg' solvers.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual, sample_weight)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
check_classification_targets(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
# 'auto' is deprecated and will be removed in 0.19
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
# compute the class weights for the entire dataset y
if self.class_weight in ("auto", "balanced"):
classes = np.unique(y)
class_weight = compute_class_weight(self.class_weight, classes, y)
class_weight = dict(zip(classes, class_weight))
else:
class_weight = self.class_weight
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| {
"content_hash": "36f95aeb1cc3361f94da60921258abf6",
"timestamp": "",
"source": "github",
"line_count": 1675,
"max_line_length": 81,
"avg_line_length": 39.57492537313433,
"alnum_prop": 0.6025223268163167,
"repo_name": "marcocaccin/scikit-learn",
"id": "70086b35543d3712beb647fe6e25425b206d0da1",
"size": "66288",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "sklearn/linear_model/logistic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394526"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "5724945"
},
{
"name": "Shell",
"bytes": "4031"
}
],
"symlink_target": ""
} |
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
# Define these once; use them twice!
strFrom = 'from@example.com'
strTo = 'to@example.com'
# Create the root message and fill in the from, to, and subject headers
msgRoot = MIMEMultipart('related')
msgRoot['Subject'] = 'test message'
msgRoot['From'] = strFrom
msgRoot['To'] = strTo
msgRoot.preamble = 'This is a multi-part message in MIME format.'
# Encapsulate the plain and HTML versions of the message body in an
# 'alternative' part, so message agents can decide which they want to display.
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
msgText = MIMEText('This is the alternative plain text message.')
msgAlternative.attach(msgText)
# We reference the image in the IMG SRC attribute by the ID we give it below
msgText = MIMEText('<b>Some <i>HTML</i> text</b> and an image.<br><img src="cid:image1"><br>Nifty!', 'html')
msgAlternative.attach(msgText)
# This example assumes the image is in the current directory
fp = open('test.jpg', 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
# Define the image's ID as referenced above
msgImage.add_header('Content-ID', '<image1>')
msgRoot.attach(msgImage)
# Send the email (this example assumes SMTP authentication is required)
import smtplib
smtp = smtplib.SMTP()
smtp.connect('smtp.example.com')
smtp.login('exampleuser', 'examplepass')
smtp.sendmail(strFrom, strTo, msgRoot.as_string())
smtp.quit()
| {
"content_hash": "05f0e13d9543de57115bd710769074ce",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 108,
"avg_line_length": 35.04651162790697,
"alnum_prop": 0.7577969475779694,
"repo_name": "ActiveState/code",
"id": "75acb55d4d4fc5f273dae137d51593b72b8b4bb1",
"size": "1634",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "recipes/Python/473810_Send_HTML_email_embedded_image_platext/recipe-473810.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
"""Renewable certificates storage."""
import datetime
import logging
import os
import re
import configobj
import parsedatetime
import pytz
from letsencrypt import constants
from letsencrypt import crypto_util
from letsencrypt import errors
from letsencrypt import error_handler
from letsencrypt import le_util
logger = logging.getLogger(__name__)
ALL_FOUR = ("cert", "privkey", "chain", "fullchain")
def config_with_defaults(config=None):
"""Merge supplied config, if provided, on top of builtin defaults."""
defaults_copy = configobj.ConfigObj(constants.RENEWER_DEFAULTS)
defaults_copy.merge(config if config is not None else configobj.ConfigObj())
return defaults_copy
def add_time_interval(base_time, interval, textparser=parsedatetime.Calendar()):
"""Parse the time specified time interval, and add it to the base_time
The interval can be in the English-language format understood by
parsedatetime, e.g., '10 days', '3 weeks', '6 months', '9 hours', or
a sequence of such intervals like '6 months 1 week' or '3 days 12
hours'. If an integer is found with no associated unit, it is
interpreted by default as a number of days.
:param datetime.datetime base_time: The time to be added with the interval.
:param str interval: The time interval to parse.
:returns: The base_time plus the interpretation of the time interval.
:rtype: :class:`datetime.datetime`"""
if interval.strip().isdigit():
interval += " days"
# try to use the same timezone, but fallback to UTC
tzinfo = base_time.tzinfo or pytz.UTC
return textparser.parseDT(interval, base_time, tzinfo=tzinfo)[0]
def write_renewal_config(filename, target, cli_config):
"""Writes a renewal config file with the specified name and values.
:param str filename: Absolute path to the config file
:param dict target: Maps ALL_FOUR to their symlink paths
:param .RenewerConfiguration cli_config: parsed command line
arguments
:returns: Configuration object for the new config file
:rtype: configobj.ConfigObj
"""
# create_empty creates a new config file if filename does not exist
config = configobj.ConfigObj(filename, create_empty=True)
for kind in ALL_FOUR:
config[kind] = target[kind]
# XXX: We clearly need a more general and correct way of getting
# options into the configobj for the RenewableCert instance.
# This is a quick-and-dirty way to do it to allow integration
# testing to start. (Note that the config parameter to new_lineage
# ideally should be a ConfigObj, but in this case a dict will be
# accepted in practice.)
renewalparams = vars(cli_config.namespace)
if renewalparams:
config["renewalparams"] = renewalparams
config.comments["renewalparams"] = ["",
"Options and defaults used"
" in the renewal process"]
# TODO: add human-readable comments explaining other available
# parameters
logger.debug("Writing new config %s.", filename)
config.write()
return config
def update_configuration(lineagename, target, cli_config):
"""Modifies lineagename's config to contain the specified values.
:param str lineagename: Name of the lineage being modified
:param dict target: Maps ALL_FOUR to their symlink paths
:param .RenewerConfiguration cli_config: parsed command line
arguments
:returns: Configuration object for the updated config file
:rtype: configobj.ConfigObj
"""
config_filename = os.path.join(
cli_config.renewal_configs_dir, lineagename) + ".conf"
temp_filename = config_filename + ".new"
# If an existing tempfile exists, delete it
if os.path.exists(temp_filename):
os.unlink(temp_filename)
write_renewal_config(temp_filename, target, cli_config)
os.rename(temp_filename, config_filename)
return configobj.ConfigObj(config_filename)
def get_link_target(link):
"""Get an absolute path to the target of link.
:param str link: Path to a symbolic link
:returns: Absolute path to the target of link
:rtype: str
"""
target = os.readlink(link)
if not os.path.isabs(target):
target = os.path.join(os.path.dirname(link), target)
return os.path.abspath(target)
class RenewableCert(object): # pylint: disable=too-many-instance-attributes
"""Renewable certificate.
Represents a lineage of certificates that is under the management
of the Let's Encrypt client, indicated by the existence of an
associated renewal configuration file.
Note that the notion of "current version" for a lineage is
maintained on disk in the structure of symbolic links, and is not
explicitly stored in any instance variable in this object. The
RenewableCert object is able to determine information about the
current (or other) version by accessing data on disk, but does not
inherently know any of this information except by examining the
symbolic links as needed. The instance variables mentioned below
point to symlinks that reflect the notion of "current version" of
each managed object, and it is these paths that should be used when
configuring servers to use the certificate managed in a lineage.
These paths are normally within the "live" directory, and their
symlink targets -- the actual cert files -- are normally found
within the "archive" directory.
:ivar str cert: The path to the symlink representing the current
version of the certificate managed by this lineage.
:ivar str privkey: The path to the symlink representing the current
version of the private key managed by this lineage.
:ivar str chain: The path to the symlink representing the current version
of the chain managed by this lineage.
:ivar str fullchain: The path to the symlink representing the
current version of the fullchain (combined chain and cert)
managed by this lineage.
:ivar configobj.ConfigObj configuration: The renewal configuration
options associated with this lineage, obtained from parsing the
renewal configuration file and/or systemwide defaults.
"""
def __init__(self, config_filename, cli_config):
"""Instantiate a RenewableCert object from an existing lineage.
:param str config_filename: the path to the renewal config file
that defines this lineage.
:param .RenewerConfiguration: parsed command line arguments
:raises .CertStorageError: if the configuration file's name didn't end
in ".conf", or the file is missing or broken.
"""
self.cli_config = cli_config
if not config_filename.endswith(".conf"):
raise errors.CertStorageError(
"renewal config file name must end in .conf")
self.lineagename = os.path.basename(
config_filename[:-len(".conf")])
# self.configuration should be used to read parameters that
# may have been chosen based on default values from the
# systemwide renewal configuration; self.configfile should be
# used to make and save changes.
try:
self.configfile = configobj.ConfigObj(config_filename)
except configobj.ConfigObjError:
raise errors.CertStorageError(
"error parsing {0}".format(config_filename))
# TODO: Do we actually use anything from defaults and do we want to
# read further defaults from the systemwide renewal configuration
# file at this stage?
self.configuration = config_with_defaults(self.configfile)
if not all(x in self.configuration for x in ALL_FOUR):
raise errors.CertStorageError(
"renewal config file {0} is missing a required "
"file reference".format(self.configfile))
self.cert = self.configuration["cert"]
self.privkey = self.configuration["privkey"]
self.chain = self.configuration["chain"]
self.fullchain = self.configuration["fullchain"]
self._fix_symlinks()
self._check_symlinks()
def _check_symlinks(self):
"""Raises an exception if a symlink doesn't exist"""
for kind in ALL_FOUR:
link = getattr(self, kind)
if not os.path.islink(link):
raise errors.CertStorageError(
"expected {0} to be a symlink".format(link))
target = get_link_target(link)
if not os.path.exists(target):
raise errors.CertStorageError("target {0} of symlink {1} does "
"not exist".format(target, link))
def _consistent(self):
"""Are the files associated with this lineage self-consistent?
:returns: Whether the files stored in connection with this
lineage appear to be correct and consistent with one
another.
:rtype: bool
"""
# Each element must be referenced with an absolute path
for x in (self.cert, self.privkey, self.chain, self.fullchain):
if not os.path.isabs(x):
logger.debug("Element %s is not referenced with an "
"absolute path.", x)
return False
# Each element must exist and be a symbolic link
for x in (self.cert, self.privkey, self.chain, self.fullchain):
if not os.path.islink(x):
logger.debug("Element %s is not a symbolic link.", x)
return False
for kind in ALL_FOUR:
link = getattr(self, kind)
target = get_link_target(link)
# Each element's link must point within the cert lineage's
# directory within the official archive directory
desired_directory = os.path.join(
self.cli_config.archive_dir, self.lineagename)
if not os.path.samefile(os.path.dirname(target),
desired_directory):
logger.debug("Element's link does not point within the "
"cert lineage's directory within the "
"official archive directory. Link: %s, "
"target directory: %s, "
"archive directory: %s.",
link, os.path.dirname(target), desired_directory)
return False
# The link must point to a file that exists
if not os.path.exists(target):
logger.debug("Link %s points to file %s that does not exist.",
link, target)
return False
# The link must point to a file that follows the archive
# naming convention
pattern = re.compile(r"^{0}([0-9]+)\.pem$".format(kind))
if not pattern.match(os.path.basename(target)):
logger.debug("%s does not follow the archive naming "
"convention.", target)
return False
# It is NOT required that the link's target be a regular
# file (it may itself be a symlink). But we should probably
# do a recursive check that ultimately the target does
# exist?
# XXX: Additional possible consistency checks (e.g.
# cryptographic validation of the chain being a chain,
# the chain matching the cert, and the cert matching
# the subject key)
# XXX: All four of the targets are in the same directory
# (This check is redundant with the check that they
# are all in the desired directory!)
# len(set(os.path.basename(self.current_target(x)
# for x in ALL_FOUR))) == 1
return True
def _fix(self):
"""Attempt to fix defects or inconsistencies in this lineage.
.. todo:: Currently unimplemented.
"""
# TODO: Figure out what kinds of fixes are possible. For
# example, checking if there is a valid version that
# we can update the symlinks to. (Maybe involve
# parsing keys and certs to see if they exist and
# if a key corresponds to the subject key of a cert?)
# TODO: In general, the symlink-reading functions below are not
# cautious enough about the possibility that links or their
# targets may not exist. (This shouldn't happen, but might
# happen as a result of random tampering by a sysadmin, or
# filesystem errors, or crashes.)
def _previous_symlinks(self):
"""Returns the kind and path of all symlinks used in recovery.
:returns: list of (kind, symlink) tuples
:rtype: list
"""
previous_symlinks = []
for kind in ALL_FOUR:
link_dir = os.path.dirname(getattr(self, kind))
link_base = "previous_{0}.pem".format(kind)
previous_symlinks.append((kind, os.path.join(link_dir, link_base)))
return previous_symlinks
def _fix_symlinks(self):
"""Fixes symlinks in the event of an incomplete version update.
If there is no problem with the current symlinks, this function
has no effect.
"""
previous_symlinks = self._previous_symlinks()
if all(os.path.exists(link[1]) for link in previous_symlinks):
for kind, previous_link in previous_symlinks:
current_link = getattr(self, kind)
if os.path.lexists(current_link):
os.unlink(current_link)
os.symlink(os.readlink(previous_link), current_link)
for _, link in previous_symlinks:
if os.path.exists(link):
os.unlink(link)
def current_target(self, kind):
"""Returns full path to which the specified item currently points.
:param str kind: the lineage member item ("cert", "privkey",
"chain", or "fullchain")
:returns: The path to the current version of the specified
member.
:rtype: str or None
"""
if kind not in ALL_FOUR:
raise errors.CertStorageError("unknown kind of item")
link = getattr(self, kind)
if not os.path.exists(link):
logger.debug("Expected symlink %s for %s does not exist.",
link, kind)
return None
return get_link_target(link)
def current_version(self, kind):
"""Returns numerical version of the specified item.
For example, if kind is "chain" and the current chain link
points to a file named "chain7.pem", returns the integer 7.
:param str kind: the lineage member item ("cert", "privkey",
"chain", or "fullchain")
:returns: the current version of the specified member.
:rtype: int
"""
if kind not in ALL_FOUR:
raise errors.CertStorageError("unknown kind of item")
pattern = re.compile(r"^{0}([0-9]+)\.pem$".format(kind))
target = self.current_target(kind)
if target is None or not os.path.exists(target):
logger.debug("Current-version target for %s "
"does not exist at %s.", kind, target)
target = ""
matches = pattern.match(os.path.basename(target))
if matches:
return int(matches.groups()[0])
else:
logger.debug("No matches for target %s.", kind)
return None
def version(self, kind, version):
"""The filename that corresponds to the specified version and kind.
.. warning:: The specified version may not exist in this
lineage. There is no guarantee that the file path returned
by this method actually exists.
:param str kind: the lineage member item ("cert", "privkey",
"chain", or "fullchain")
:param int version: the desired version
:returns: The path to the specified version of the specified member.
:rtype: str
"""
if kind not in ALL_FOUR:
raise errors.CertStorageError("unknown kind of item")
where = os.path.dirname(self.current_target(kind))
return os.path.join(where, "{0}{1}.pem".format(kind, version))
def available_versions(self, kind):
"""Which alternative versions of the specified kind of item exist?
The archive directory where the current version is stored is
consulted to obtain the list of alternatives.
:param str kind: the lineage member item (
``cert``, ``privkey``, ``chain``, or ``fullchain``)
:returns: all of the version numbers that currently exist
:rtype: `list` of `int`
"""
if kind not in ALL_FOUR:
raise errors.CertStorageError("unknown kind of item")
where = os.path.dirname(self.current_target(kind))
files = os.listdir(where)
pattern = re.compile(r"^{0}([0-9]+)\.pem$".format(kind))
matches = [pattern.match(f) for f in files]
return sorted([int(m.groups()[0]) for m in matches if m])
def newest_available_version(self, kind):
"""Newest available version of the specified kind of item?
:param str kind: the lineage member item (``cert``,
``privkey``, ``chain``, or ``fullchain``)
:returns: the newest available version of this member
:rtype: int
"""
return max(self.available_versions(kind))
def latest_common_version(self):
"""Newest version for which all items are available?
:returns: the newest available version for which all members
(``cert, ``privkey``, ``chain``, and ``fullchain``) exist
:rtype: int
"""
# TODO: this can raise CertStorageError if there is no version overlap
# (it should probably return None instead)
# TODO: this can raise a spurious AttributeError if the current
# link for any kind is missing (it should probably return None)
versions = [self.available_versions(x) for x in ALL_FOUR]
return max(n for n in versions[0] if all(n in v for v in versions[1:]))
def next_free_version(self):
"""Smallest version newer than all full or partial versions?
:returns: the smallest version number that is larger than any
version of any item currently stored in this lineage
:rtype: int
"""
# TODO: consider locking/mutual exclusion between updating processes
# This isn't self.latest_common_version() + 1 because we don't want
# collide with a version that might exist for one file type but not
# for the others.
return max(self.newest_available_version(x) for x in ALL_FOUR) + 1
def has_pending_deployment(self):
"""Is there a later version of all of the managed items?
:returns: ``True`` if there is a complete version of this
lineage with a larger version number than the current
version, and ``False`` otherwis
:rtype: bool
"""
# TODO: consider whether to assume consistency or treat
# inconsistent/consistent versions differently
smallest_current = min(self.current_version(x) for x in ALL_FOUR)
return smallest_current < self.latest_common_version()
def _update_link_to(self, kind, version):
"""Make the specified item point at the specified version.
(Note that this method doesn't verify that the specified version
exists.)
:param str kind: the lineage member item ("cert", "privkey",
"chain", or "fullchain")
:param int version: the desired version
"""
if kind not in ALL_FOUR:
raise errors.CertStorageError("unknown kind of item")
link = getattr(self, kind)
filename = "{0}{1}.pem".format(kind, version)
# Relative rather than absolute target directory
target_directory = os.path.dirname(os.readlink(link))
# TODO: it could be safer to make the link first under a temporary
# filename, then unlink the old link, then rename the new link
# to the old link; this ensures that this process is able to
# create symlinks.
# TODO: we might also want to check consistency of related links
# for the other corresponding items
os.unlink(link)
os.symlink(os.path.join(target_directory, filename), link)
def update_all_links_to(self, version):
"""Change all member objects to point to the specified version.
:param int version: the desired version
"""
with error_handler.ErrorHandler(self._fix_symlinks):
previous_links = self._previous_symlinks()
for kind, link in previous_links:
os.symlink(self.current_target(kind), link)
for kind in ALL_FOUR:
self._update_link_to(kind, version)
for _, link in previous_links:
os.unlink(link)
def names(self, version=None):
"""What are the subject names of this certificate?
(If no version is specified, use the current version.)
:param int version: the desired version number
:returns: the subject names
:rtype: `list` of `str`
:raises .CertStorageError: if could not find cert file.
"""
if version is None:
target = self.current_target("cert")
else:
target = self.version("cert", version)
if target is None:
raise errors.CertStorageError("could not find cert file")
with open(target) as f:
return crypto_util.get_sans_from_cert(f.read())
def autodeployment_is_enabled(self):
"""Is automatic deployment enabled for this cert?
If autodeploy is not specified, defaults to True.
:returns: True if automatic deployment is enabled
:rtype: bool
"""
return ("autodeploy" not in self.configuration or
self.configuration.as_bool("autodeploy"))
def should_autodeploy(self, interactive=False):
"""Should this lineage now automatically deploy a newer version?
This is a policy question and does not only depend on whether
there is a newer version of the cert. (This considers whether
autodeployment is enabled, whether a relevant newer version
exists, and whether the time interval for autodeployment has
been reached.)
:param bool interactive: set to True to examine the question
regardless of whether the renewal configuration allows
automated deployment (for interactive use). Default False.
:returns: whether the lineage now ought to autodeploy an
existing newer cert version
:rtype: bool
"""
if interactive or self.autodeployment_is_enabled():
if self.has_pending_deployment():
interval = self.configuration.get("deploy_before_expiry",
"5 days")
expiry = crypto_util.notAfter(self.current_target("cert"))
now = pytz.UTC.fromutc(datetime.datetime.utcnow())
if expiry < add_time_interval(now, interval):
return True
return False
def ocsp_revoked(self, version=None):
# pylint: disable=no-self-use,unused-argument
"""Is the specified cert version revoked according to OCSP?
Also returns True if the cert version is declared as intended
to be revoked according to Let's Encrypt OCSP extensions.
(If no version is specified, uses the current version.)
This method is not yet implemented and currently always returns
False.
:param int version: the desired version number
:returns: whether the certificate is or will be revoked
:rtype: bool
"""
# XXX: This query and its associated network service aren't
# implemented yet, so we currently return False (indicating that the
# certificate is not revoked).
return False
def autorenewal_is_enabled(self):
"""Is automatic renewal enabled for this cert?
If autorenew is not specified, defaults to True.
:returns: True if automatic renewal is enabled
:rtype: bool
"""
return ("autorenew" not in self.configuration or
self.configuration.as_bool("autorenew"))
def should_autorenew(self, interactive=False):
"""Should we now try to autorenew the most recent cert version?
This is a policy question and does not only depend on whether
the cert is expired. (This considers whether autorenewal is
enabled, whether the cert is revoked, and whether the time
interval for autorenewal has been reached.)
Note that this examines the numerically most recent cert version,
not the currently deployed version.
:param bool interactive: set to True to examine the question
regardless of whether the renewal configuration allows
automated renewal (for interactive use). Default False.
:returns: whether an attempt should now be made to autorenew the
most current cert version in this lineage
:rtype: bool
"""
if interactive or self.autorenewal_is_enabled():
# Consider whether to attempt to autorenew this cert now
# Renewals on the basis of revocation
if self.ocsp_revoked(self.latest_common_version()):
logger.debug("Should renew, certificate is revoked.")
return True
# Renews some period before expiry time
default_interval = constants.RENEWER_DEFAULTS["renew_before_expiry"]
interval = self.configuration.get("renew_before_expiry", default_interval)
expiry = crypto_util.notAfter(self.version(
"cert", self.latest_common_version()))
now = pytz.UTC.fromutc(datetime.datetime.utcnow())
if expiry < add_time_interval(now, interval):
logger.debug("Should renew, less than %s before certificate "
"expiry %s.", interval,
expiry.strftime("%Y-%m-%d %H:%M:%S %Z"))
return True
return False
@classmethod
def new_lineage(cls, lineagename, cert, privkey, chain, cli_config):
# pylint: disable=too-many-locals
"""Create a new certificate lineage.
Attempts to create a certificate lineage -- enrolled for
potential future renewal -- with the (suggested) lineage name
lineagename, and the associated cert, privkey, and chain (the
associated fullchain will be created automatically). Optional
configurator and renewalparams record the configuration that was
originally used to obtain this cert, so that it can be reused
later during automated renewal.
Returns a new RenewableCert object referring to the created
lineage. (The actual lineage name, as well as all the relevant
file paths, will be available within this object.)
:param str lineagename: the suggested name for this lineage
(normally the current cert's first subject DNS name)
:param str cert: the initial certificate version in PEM format
:param str privkey: the private key in PEM format
:param str chain: the certificate chain in PEM format
:param .RenewerConfiguration cli_config: parsed command line
arguments
:returns: the newly-created RenewalCert object
:rtype: :class:`storage.renewableCert`
"""
# Examine the configuration and find the new lineage's name
for i in (cli_config.renewal_configs_dir, cli_config.archive_dir,
cli_config.live_dir):
if not os.path.exists(i):
os.makedirs(i, 0700)
logger.debug("Creating directory %s.", i)
config_file, config_filename = le_util.unique_lineage_name(
cli_config.renewal_configs_dir, lineagename)
if not config_filename.endswith(".conf"):
raise errors.CertStorageError(
"renewal config file name must end in .conf")
# Determine where on disk everything will go
# lineagename will now potentially be modified based on which
# renewal configuration file could actually be created
lineagename = os.path.basename(config_filename)[:-len(".conf")]
archive = os.path.join(cli_config.archive_dir, lineagename)
live_dir = os.path.join(cli_config.live_dir, lineagename)
if os.path.exists(archive):
raise errors.CertStorageError(
"archive directory exists for " + lineagename)
if os.path.exists(live_dir):
raise errors.CertStorageError(
"live directory exists for " + lineagename)
os.mkdir(archive)
os.mkdir(live_dir)
logger.debug("Archive directory %s and live "
"directory %s created.", archive, live_dir)
relative_archive = os.path.join("..", "..", "archive", lineagename)
# Put the data into the appropriate files on disk
target = dict([(kind, os.path.join(live_dir, kind + ".pem"))
for kind in ALL_FOUR])
for kind in ALL_FOUR:
os.symlink(os.path.join(relative_archive, kind + "1.pem"),
target[kind])
with open(target["cert"], "w") as f:
logger.debug("Writing certificate to %s.", target["cert"])
f.write(cert)
with open(target["privkey"], "w") as f:
logger.debug("Writing private key to %s.", target["privkey"])
f.write(privkey)
# XXX: Let's make sure to get the file permissions right here
with open(target["chain"], "w") as f:
logger.debug("Writing chain to %s.", target["chain"])
f.write(chain)
with open(target["fullchain"], "w") as f:
# assumes that OpenSSL.crypto.dump_certificate includes
# ending newline character
logger.debug("Writing full chain to %s.", target["fullchain"])
f.write(cert + chain)
# Document what we've done in a new renewal config file
config_file.close()
new_config = write_renewal_config(config_filename, target, cli_config)
return cls(new_config.filename, cli_config)
def save_successor(self, prior_version, new_cert,
new_privkey, new_chain, cli_config):
"""Save new cert and chain as a successor of a prior version.
Returns the new version number that was created.
.. note:: this function does NOT update links to deploy this
version
:param int prior_version: the old version to which this version
is regarded as a successor (used to choose a privkey, if the
key has not changed, but otherwise this information is not
permanently recorded anywhere)
:param str new_cert: the new certificate, in PEM format
:param str new_privkey: the new private key, in PEM format,
or ``None``, if the private key has not changed
:param str new_chain: the new chain, in PEM format
:param .RenewerConfiguration cli_config: parsed command line
arguments
:returns: the new version number that was created
:rtype: int
"""
# XXX: assumes official archive location rather than examining links
# XXX: consider using os.open for availability of os.O_EXCL
# XXX: ensure file permissions are correct; also create directories
# if needed (ensuring their permissions are correct)
# Figure out what the new version is and hence where to save things
self.cli_config = cli_config
target_version = self.next_free_version()
archive = self.cli_config.archive_dir
# XXX if anyone ever moves a renewal configuration file, this will
# break... perhaps prefix should be the dirname of the previous
# cert.pem?
prefix = os.path.join(archive, self.lineagename)
target = dict(
[(kind,
os.path.join(prefix, "{0}{1}.pem".format(kind, target_version)))
for kind in ALL_FOUR])
# Distinguish the cases where the privkey has changed and where it
# has not changed (in the latter case, making an appropriate symlink
# to an earlier privkey version)
if new_privkey is None:
# The behavior below keeps the prior key by creating a new
# symlink to the old key or the target of the old key symlink.
old_privkey = os.path.join(
prefix, "privkey{0}.pem".format(prior_version))
if os.path.islink(old_privkey):
old_privkey = os.readlink(old_privkey)
else:
old_privkey = "privkey{0}.pem".format(prior_version)
logger.debug("Writing symlink to old private key, %s.", old_privkey)
os.symlink(old_privkey, target["privkey"])
else:
with open(target["privkey"], "w") as f:
logger.debug("Writing new private key to %s.", target["privkey"])
f.write(new_privkey)
# Save everything else
with open(target["cert"], "w") as f:
logger.debug("Writing certificate to %s.", target["cert"])
f.write(new_cert)
with open(target["chain"], "w") as f:
logger.debug("Writing chain to %s.", target["chain"])
f.write(new_chain)
with open(target["fullchain"], "w") as f:
logger.debug("Writing full chain to %s.", target["fullchain"])
f.write(new_cert + new_chain)
symlinks = dict((kind, self.configuration[kind]) for kind in ALL_FOUR)
# Update renewal config file
self.configfile = update_configuration(
self.lineagename, symlinks, cli_config)
self.configuration = config_with_defaults(self.configfile)
return target_version
| {
"content_hash": "be7bfeb158bf968f222218da9a090907",
"timestamp": "",
"source": "github",
"line_count": 828,
"max_line_length": 86,
"avg_line_length": 41.85024154589372,
"alnum_prop": 0.6243218284658895,
"repo_name": "TheBoegl/letsencrypt",
"id": "6786ac7450bee4d35231cbc95d4475776cd364f6",
"size": "34652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "letsencrypt/storage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "50413"
},
{
"name": "Augeas",
"bytes": "4997"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1388093"
},
{
"name": "Shell",
"bytes": "104220"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Rank.slug'
db.add_column(u'dojo_rank', 'slug',
self.gf('autoslug.fields.AutoSlugField')(default='', unique_with=(), max_length=50, populate_from='title'),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Rank.slug'
db.delete_column(u'dojo_rank', 'slug')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dojo.discipline': {
'Meta': {'ordering': "['-created']", 'object_name': 'Discipline'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 6, 0, 0)', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dojo.Student']"}),
'description': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'dojo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dojo.Dojo']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'title'"}),
'test_in_order': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dojo.dojo': {
'Meta': {'object_name': 'Dojo'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 6, 0, 0)', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dojo.Student']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owners': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'title'"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dojo.rank': {
'Meta': {'ordering': "['order']", 'object_name': 'Rank'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 6, 0, 0)', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'max_length': '2'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'title'"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'dojo.student': {
'Meta': {'object_name': 'Student'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 6, 0, 0)', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'dojo.test': {
'Meta': {'ordering': "['rank_awarded__order']", 'object_name': 'Test'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 6, 0, 0)', 'null': 'True', 'blank': 'True'}),
'discipline': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dojo.Discipline']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pass_percentage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'rank_awarded': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dojo.Rank']"})
},
u'dojo.testanswer': {
'Meta': {'object_name': 'TestAnswer'},
'answer': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'dojo.testattempt': {
'Meta': {'ordering': "['test', 'test__rank_awarded']", 'object_name': 'TestAttempt'},
'correct_answers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['dojo.TestQuestion']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 6, 0, 0)', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dojo.Student']"}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dojo.Test']"})
},
u'dojo.testquestion': {
'Meta': {'object_name': 'TestQuestion'},
'answers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dojo.TestAnswer']", 'symmetrical': 'False'}),
'correct_answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'correct_answer'", 'to': u"orm['dojo.TestAnswer']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.TextField', [], {}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dojo.Test']"})
}
}
complete_apps = ['dojo'] | {
"content_hash": "cfb3a0d243fd317c4515e6a4025f111b",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 187,
"avg_line_length": 72.95238095238095,
"alnum_prop": 0.5452567449956484,
"repo_name": "powellc/django-belts",
"id": "60eb665e5c5acefbd55d4db9cc7ca7030ef6f06e",
"size": "9216",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "belts/migrations/0002_auto__add_field_rank_slug.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "149358"
},
{
"name": "JavaScript",
"bytes": "61752"
},
{
"name": "Python",
"bytes": "72345"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(name='devmason_server',
version='1.0pre-20091019',
description='A Django implementation of a the pony-build server.',
author = 'Eric Holscher, Jacob Kaplan-Moss',
url = 'http://github.com/ericholscher/devmason-server',
license = 'BSD',
packages = ['devmason_server'],
install_requires=['django-tagging>=0.3',
'django-piston>=0.2.2',
'django',
'mimeparse>=0.1.2'],
)
| {
"content_hash": "7fc793e3d2136ee1627504d6ee5b3978",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 72,
"avg_line_length": 36.857142857142854,
"alnum_prop": 0.563953488372093,
"repo_name": "ericholscher/devmason-server",
"id": "721dc3033bbd6d960ca8c738906019657ee766f4",
"size": "516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "897"
},
{
"name": "Python",
"bytes": "64973"
}
],
"symlink_target": ""
} |
from django.views.generic.base import TemplateView
class SlatesListView(TemplateView):
template_name = 'slates/slates_list.html'
| {
"content_hash": "4da5b853d7adc11b1b09e7d7d5dbd9fb",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 50,
"avg_line_length": 27,
"alnum_prop": 0.7925925925925926,
"repo_name": "sunpig/django-assets-build-example",
"id": "48e224aaa7944a78d7a359d6317b20bb6a338a89",
"size": "135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_project/slates/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1318"
},
{
"name": "HTML",
"bytes": "5541"
},
{
"name": "JavaScript",
"bytes": "4934"
},
{
"name": "Makefile",
"bytes": "2724"
},
{
"name": "Python",
"bytes": "10522"
},
{
"name": "Ruby",
"bytes": "8836"
},
{
"name": "Shell",
"bytes": "99450"
}
],
"symlink_target": ""
} |
import ibis.expr.operations as ops
from . import helpers
def substring(translator, expr):
op = expr.op()
arg, start, length = op.args
arg_formatted = translator.translate(arg)
start_formatted = translator.translate(start)
# Impala is 1-indexed
if length is None or isinstance(length.op(), ops.Literal):
lvalue = length.op().value if length is not None else None
if lvalue:
return 'substr({}, {} + 1, {})'.format(
arg_formatted, start_formatted, lvalue
)
else:
return f'substr({arg_formatted}, {start_formatted} + 1)'
else:
length_formatted = translator.translate(length)
return 'substr({}, {} + 1, {})'.format(
arg_formatted, start_formatted, length_formatted
)
def string_find(translator, expr):
op = expr.op()
arg, substr, start, _ = op.args
arg_formatted = translator.translate(arg)
substr_formatted = translator.translate(substr)
if start is not None and not isinstance(start.op(), ops.Literal):
start_fmt = translator.translate(start)
return 'locate({}, {}, {} + 1) - 1'.format(
substr_formatted, arg_formatted, start_fmt
)
elif start is not None and start.op().value:
sval = start.op().value
return 'locate({}, {}, {}) - 1'.format(
substr_formatted, arg_formatted, sval + 1
)
else:
return f'locate({substr_formatted}, {arg_formatted}) - 1'
def find_in_set(translator, expr):
op = expr.op()
arg, str_list = op.args
arg_formatted = translator.translate(arg)
str_formatted = ','.join([x._arg.value for x in str_list])
return f"find_in_set({arg_formatted}, '{str_formatted}') - 1"
def string_join(translator, expr):
op = expr.op()
arg, strings = op.args
return helpers.format_call(translator, 'concat_ws', arg, *strings)
def string_like(translator, expr):
arg, pattern, _ = expr.op().args
return '{} LIKE {}'.format(
translator.translate(arg), translator.translate(pattern)
)
def parse_url(translator, expr):
op = expr.op()
arg, extract, key = op.args
arg_formatted = translator.translate(arg)
if key is None:
return f"parse_url({arg_formatted}, '{extract}')"
else:
key_fmt = translator.translate(key)
return "parse_url({}, '{}', {})".format(
arg_formatted, extract, key_fmt
)
def startswith(translator, expr):
arg, start = expr.op().args
arg_formatted = translator.translate(arg)
start_formatted = translator.translate(start)
return f"{arg_formatted} like concat({start_formatted}, '%')"
def endswith(translator, expr):
arg, start = expr.op().args
arg_formatted = translator.translate(arg)
end_formatted = translator.translate(start)
return f"{arg_formatted} like concat('%', {end_formatted})"
| {
"content_hash": "89b0a90573b2148fad028fabd8c40ead",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 70,
"avg_line_length": 29.19,
"alnum_prop": 0.6135662898252826,
"repo_name": "cloudera/ibis",
"id": "06a96e48aa550371111ba958bce0075005cbc5d0",
"size": "2919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibis/backends/base/sql/registry/string.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "44943"
},
{
"name": "CMake",
"bytes": "4383"
},
{
"name": "Python",
"bytes": "2570944"
},
{
"name": "Shell",
"bytes": "1989"
}
],
"symlink_target": ""
} |
"""
Emit a file suitible to include() in a CMakeLists.txt file
with information from a python interpreter
Compatible for python 2.6 -> 3.4
"""
from __future__ import print_function
import sys, os
if len(sys.argv)<2:
out = sys.stdout
else:
out = open(sys.argv[1], 'w')
from distutils.sysconfig import get_config_var, get_python_inc, get_python_lib
incdirs = [get_python_inc()]
libdirs = [get_config_var('LIBDIR')]
have_np='NO'
try:
from numpy.distutils.misc_util import get_numpy_include_dirs
incdirs += get_numpy_include_dirs()
have_np='YES'
except ImportError:
pass
libdirs = [get_config_var('LIBDIR')]
# prepend introspected numpy directory so that it is checked before
# system python directory, which may contained a different version
# when virtualenv is used. Debian helpfully symlinks the numpy headers
# as /usr/include/pythonX.Y/numpy :P
libdirs.reverse()
incdirs.reverse()
# location of extension modules relative to prefix (eg. "lib/python3/dist-packages")
moddir = get_python_lib()
print('set(Python_DEFINITIONS, "%s")'%get_config_var('BASECFLAGS'), file=out)
print('set(Python_VERSION "%s")'%get_config_var('VERSION'), file=out)
print('set(Python_VERSION_LD "%s")'%(get_config_var('LDVERSION') or get_config_var('VERSION')), file=out)
print('set(Python_INCLUDE_DIRS "%s")'%';'.join(incdirs), file=out)
print('set(Python_LIBRARY_DIRS "%s")'%';'.join(libdirs), file=out)
print('set(Python_MODULE_DIR "%s")'%moddir, file=out)
print('set(Python_NUMPY_FOUND %s)'%have_np, file=out)
print('set(Python_VERSION_MAJOR %s)'%sys.version_info[0], file=out)
print('set(Python_VERSION_MINOR %s)'%sys.version_info[1], file=out)
print('set(Python_VERSION_PATCH %s)'%sys.version_info[2], file=out)
print('set(Python_FOUND YES)', file=out)
| {
"content_hash": "bd409db548ea7a160c661f72c1619d8f",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 105,
"avg_line_length": 32.25454545454546,
"alnum_prop": 0.7125140924464487,
"repo_name": "frib-high-level-controls/FLAME",
"id": "ba95c91af05a0a442c60ba9a2d79bde2d4471b8f",
"size": "1796",
"binary": false,
"copies": "1",
"ref": "refs/heads/upstream",
"path": "pyconfig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1212"
},
{
"name": "C",
"bytes": "618"
},
{
"name": "C++",
"bytes": "385724"
},
{
"name": "CMake",
"bytes": "13830"
},
{
"name": "CSS",
"bytes": "15111"
},
{
"name": "Gnuplot",
"bytes": "14014"
},
{
"name": "HTML",
"bytes": "166105"
},
{
"name": "JavaScript",
"bytes": "95395"
},
{
"name": "Lex",
"bytes": "1714"
},
{
"name": "Makefile",
"bytes": "605"
},
{
"name": "Python",
"bytes": "172730"
},
{
"name": "Shell",
"bytes": "825"
},
{
"name": "Yacc",
"bytes": "4111"
}
],
"symlink_target": ""
} |
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.oxauth.security import Identity
from org.gluu.model.custom.script.type.auth import PersonAuthenticationType
from org.gluu.oxauth.service import UserService, AuthenticationService
from org.gluu.util import StringHelper
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, customScript, configurationAttributes):
print "Basic (demo reset step). Initialization"
print "Basic (demo reset step). Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "Basic (demo reset step). Destroy"
print "Basic (demo reset step). Destroyed successfully"
return True
def getApiVersion(self):
return 11
def getAuthenticationMethodClaims(self, requestParameters):
return None
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
authenticationService = CdiUtil.bean(AuthenticationService)
if 1 <= step <= 3:
print "Basic (demo reset step). Authenticate for step '%s'" % step
identity = CdiUtil.bean(Identity)
identity.setWorkingParameter("pass_authentication", False)
credentials = identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
logged_in = False
if (StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password)):
userService = CdiUtil.bean(UserService)
logged_in = authenticationService.authenticate(user_name, user_password)
if (not logged_in):
return False
identity.setWorkingParameter("pass_authentication", True)
return True
else:
return False
def getNextStep(self, configurationAttributes, requestParameters, step):
print "Basic (demo reset step). Get next step for step '%s'" % step
identity = CdiUtil.bean(Identity)
# If user not pass current step authenticaton redirect to current step
pass_authentication = identity.getWorkingParameter("pass_authentication")
if not pass_authentication:
resultStep = step
print "Basic (demo reset step). Get next step. Changing step to '%s'" % resultStep
return resultStep
return -1
def prepareForStep(self, configurationAttributes, requestParameters, step):
print "Basic (demo reset step). Prepare for step '%s'" % step
if 1 <= step <= 3:
return True
else:
return False
def getExtraParametersForStep(self, configurationAttributes, step):
return None
def getCountAuthenticationSteps(self, configurationAttributes):
return 3
def getPageForStep(self, configurationAttributes, step):
return ""
def getLogoutExternalUrl(self, configurationAttributes, requestParameters):
print "Get external logout URL call"
return None
def logout(self, configurationAttributes, requestParameters):
return True
| {
"content_hash": "0a4e8ba3ecefb911b624d6b664b77214",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 107,
"avg_line_length": 37.56842105263158,
"alnum_prop": 0.6626506024096386,
"repo_name": "GluuFederation/oxAuth",
"id": "5a03b4d483765e9e3b7633676ccfabf17d93b945",
"size": "3735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/integrations/basic.reset_to_step/BasicResetToStepExternalAuthenticator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "78"
},
{
"name": "CSS",
"bytes": "91820"
},
{
"name": "HTML",
"bytes": "689156"
},
{
"name": "Java",
"bytes": "7932142"
},
{
"name": "JavaScript",
"bytes": "1475711"
},
{
"name": "Mustache",
"bytes": "2244"
},
{
"name": "Python",
"bytes": "1023958"
},
{
"name": "Shell",
"bytes": "302"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE", "sandboxes.fancypages.sandbox.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "1fa20e5889f0a6e8a30be10662ec12fa",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 74,
"avg_line_length": 25.8,
"alnum_prop": 0.7015503875968992,
"repo_name": "socradev/django-fancypages",
"id": "5189e7610ba8298439613c5201823e600cdd5eb0",
"size": "280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandboxes/fancypages/manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "187646"
},
{
"name": "HTML",
"bytes": "64967"
},
{
"name": "JavaScript",
"bytes": "561457"
},
{
"name": "Makefile",
"bytes": "1282"
},
{
"name": "Python",
"bytes": "526740"
}
],
"symlink_target": ""
} |
"""
test_dashboard:
==========
A module intended for use with Nose.
"""
from __future__ import absolute_import
from unittest import TestCase
from _plotly_utils.exceptions import PlotlyError
import chart_studio.dashboard_objs.dashboard_objs as dashboard
class TestDashboard(TestCase):
def test_invalid_path(self):
my_box = {
"type": "box",
"boxType": "plot",
"fileId": "AdamKulidjian:327",
"shareKey": None,
"title": "box 1",
}
dash = dashboard.Dashboard()
message = (
"Invalid path. Your 'path' list must only contain "
"the strings 'first' and 'second'."
)
self.assertRaisesRegexp(PlotlyError, message, dash._insert, my_box, "third")
def test_box_id_none(self):
my_box = {
"type": "box",
"boxType": "plot",
"fileId": "AdamKulidjian:327",
"shareKey": None,
"title": "box 1",
}
dash = dashboard.Dashboard()
dash.insert(my_box, "above", None)
message = (
"Make sure the box_id is specfied if there is at least "
"one box in your dashboard."
)
self.assertRaisesRegexp(
PlotlyError, message, dash.insert, my_box, "above", None
)
def test_id_not_valid(self):
my_box = {
"type": "box",
"boxType": "plot",
"fileId": "AdamKulidjian:327",
"shareKey": None,
"title": "box 1",
}
message = (
"Your box_id must be a number in your dashboard. To view a "
"representation of your dashboard run get_preview()."
)
dash = dashboard.Dashboard()
dash.insert(my_box, "above", 1)
# insert box
self.assertRaisesRegexp(PlotlyError, message, dash.insert, my_box, "above", 0)
# get box by id
self.assertRaisesRegexp(PlotlyError, message, dash.get_box, 0)
# remove box
self.assertRaisesRegexp(PlotlyError, message, dash.remove, 0)
def test_invalid_side(self):
my_box = {
"type": "box",
"boxType": "plot",
"fileId": "AdamKulidjian:327",
"shareKey": None,
"title": "box 1",
}
message = (
"If there is at least one box in your dashboard, you "
"must specify a valid side value. You must choose from "
"'above', 'below', 'left', and 'right'."
)
dash = dashboard.Dashboard()
dash.insert(my_box, "above", 0)
self.assertRaisesRegexp(
PlotlyError, message, dash.insert, my_box, "somewhere", 1
)
def test_dashboard_dict(self):
my_box = {
"type": "box",
"boxType": "plot",
"fileId": "AdamKulidjian:327",
"shareKey": None,
"title": "box 1",
}
dash = dashboard.Dashboard()
dash.insert(my_box)
dash.insert(my_box, "above", 1)
expected_dashboard = {
"layout": {
"direction": "vertical",
"first": {
"direction": "vertical",
"first": {
"boxType": "plot",
"fileId": "AdamKulidjian:327",
"shareKey": None,
"title": "box 1",
"type": "box",
},
"second": {
"boxType": "plot",
"fileId": "AdamKulidjian:327",
"shareKey": None,
"title": "box 1",
"type": "box",
},
"size": 50,
"sizeUnit": "%",
"type": "split",
},
"second": {"boxType": "empty", "type": "box"},
"size": 1500,
"sizeUnit": "px",
"type": "split",
},
"settings": {},
"version": 2,
}
self.assertEqual(dash["layout"], expected_dashboard["layout"])
| {
"content_hash": "85c7e162e4248f1796971d79241bbb78",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 86,
"avg_line_length": 28.58108108108108,
"alnum_prop": 0.45602836879432623,
"repo_name": "plotly/python-api",
"id": "623420941a200ce9d931fa4528dc3e27324932ca",
"size": "4230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/chart-studio/chart_studio/tests/test_plot_ly/test_dashboard/test_dashboard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""forms.py: appsearch forms"""
import operator
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.db.models.fields import BLANK_CHOICE_DASH
from django.forms import ValidationError
from django.forms.formsets import BaseFormSet
import dateutil.parser
class ModelSelectionForm(forms.Form):
"""
Default Model selection form.
Builds choices based on the registered configurations in the given ``registry``. The form uses
content type ids for values instead of model names or module paths, etc.
"""
model = forms.ChoiceField(label="Search For")
def __init__(self, registry, user, *args, **kwargs):
super(ModelSelectionForm, self).__init__(*args, **kwargs)
self.registry = registry
self.configurations = registry.get_configurations(user=user)
self.fields["model"].choices = BLANK_CHOICE_DASH + [
(c._content_type.id, c.verbose_name) for c in self.configurations
]
def clean_model(self):
"""Cleans the content type id into the model it represents."""
model = self.cleaned_data["model"]
try:
model = ContentType.objects.get(id=model).model_class()
except ContentType.DoesNotExist as e:
raise ValidationError("Invalid choice - {}".format(e))
if model not in self.registry:
raise ValidationError("Invalid choice")
return model
def get_selected_configuration(self):
"""
Given that the form has passed validation, returns the configuration for the selected model.
"""
model_value = self.cleaned_data["model"]
return self.registry[model_value]
def get_selected_model(self):
"""Given that the form has passed validation, returns the selected model."""
return self.get_selected_configuration().model
class ConstraintForm(forms.Form):
"""
Using an additional constructor parameter ``configuration``, an instance of
``registry.ModelSearch``, the form dynamically populates the choices for the ``field`` field.
"""
type = forms.ChoiceField(
label="Constraint",
choices=[
("and", "AND"),
("or", "OR"),
],
)
# Dynamically populated list of fields available for filtering
field = forms.ChoiceField(label="Filter by", choices=[])
# Dynamically populated list of valid operators for the chosen ``field``
operator = forms.ChoiceField(label="Constraint type", choices=[])
term = forms.CharField(label="Search term", required=False)
end_term = forms.CharField(label="End term", required=False)
def __init__(self, configuration, *args, **kwargs):
"""
Receives the configuration for the model to represent (potentially ``None`` if the original
``ModelSelectionForm`` is blank or invalid).
"""
super(ConstraintForm, self).__init__(*args, **kwargs)
self.configuration = configuration
if configuration:
self.fields["field"].choices = configuration.get_searchable_field_choices()
def _clean_fields(self):
"""
Workaround for displaying stripped info in the ``operator`` select, and running validation
against reversed (name, value) choices.
Because the frontend strips away the queryset language from the operator choices (leaving
behind doubled 2-tuples of UI text such as ("= equal", "= equal")), the default field-level
validation of the ``operator`` field will fail by default, since "iexact" is valid but
"= equal" is not. To solve the problem, this method sets the choices appropriately in
reverse: ("= equal", "iexact"). When field validation inspects the value, everything will
check out. During that time, the ``clean_operator()`` method will be called to further
process the ``operator`` value back to "iexact" (the reversed, faked UI text).
Once all of the trickery is done, the choices are mapped once more to ("= equal", "= equal")
so that when the template is rendered with this form, the choices are as they were before.
"""
field_field = self.fields["field"]
field_hash = field_field.widget.value_from_datadict(self.data, {}, self.add_prefix("field"))
operators = self.configuration.get_operator_choices(hash=field_hash)
self.fields["operator"].choices = map(list, map(reversed, operators))
# Allow default validation to occur, including the "clean_operator" method below.
super(ConstraintForm, self)._clean_fields()
# Set the operators back to the flat choices of frontend-only values
self.fields["operator"].choices = map(lambda o: (o[1], o[1]), operators)
def clean_type(self):
"""Convert type into an ``operator.and_`` or ``operator.or_`` reference."""
type = self.cleaned_data["type"]
if type == "and":
type = operator.and_
elif type == "or":
type = operator.or_
return type
def clean_field(self):
"""Convert ``field`` hash into ORM path tuple."""
return self.configuration.reverse_field_hash(self.cleaned_data["field"])
def clean_operator(self):
"""Convert operator into ORM query type such as "icontains" """
# The default cleaned_value will be the text from the UI, so the choices (which have been
# reversed for default validation to accept the text as a valid choice) are converted to a
# dictionary for accessing the intended ORM queryset language value.
# Input: "= equal"
# Output: "iexact"
operator = self.cleaned_data["operator"]
operator = dict(self.fields["operator"].choices)[operator]
return operator
def clean_term(self): # noqa: C901
"""Normalizes the ``term`` field to what makes sense for the operator."""
if "field" not in self.cleaned_data or "operator" not in self.cleaned_data:
return self.cleaned_data["term"]
classification = self.configuration.get_field_classification(self.cleaned_data["field"])
operator = self.cleaned_data["operator"]
term = self.cleaned_data["term"].strip()
field_type = self.configuration.field_types[self.cleaned_data["field"]]
if field_type.choices:
# The field's database values aren't the display values, but the display values are
# what the user will search for. e.g., choices=[(1, 'Bob'), (2, 'Mary')]
choices = {k.lower(): v for v, k in field_type.choices}
if term.lower() in choices:
term = choices[term.lower()]
else:
# Numbers and strings don't need processing, but the other types should be inspected.
if classification == "date":
try:
term = dateutil.parser.parse(term)
except (TypeError, ValueError):
raise ValidationError("Unable to parse a date from '{}'".format(term))
elif classification == "boolean":
if term.lower() in ("true", "yes"):
term = True
elif term.lower() in ("false", "no"):
term = False
else:
raise ValidationError("Boolean value must be either true/false or yes/no.")
elif classification == "number":
try:
float(term)
except TypeError:
raise ValidationError("Value must be numeric.")
if operator not in ("isnull", "!isnull") and term in [None, ""]:
raise ValidationError("This field is required.")
return term
def clean_end_term(self):
"""
Normalize the ``end_term`` field for range queries.
The ``end_term`` will merge itself into ``term`` if appropriate, making the cleaned value
of ``term`` a list of the two values suitable for direct use in a queryset "range" lookup.
Consequently, ``end_term`` will always clean to the empty string.
"""
if (
"field" not in self.cleaned_data
or "operator" not in self.cleaned_data
or "term" not in self.cleaned_data
):
return self.cleaned_data["end_term"]
classification = self.configuration.get_field_classification(self.cleaned_data["field"])
operator = self.cleaned_data["operator"]
begin_term = self.cleaned_data["term"]
term = self.cleaned_data["end_term"]
if operator == "range":
if classification == "date":
term = dateutil.parser.parse(term)
elif classification == "number":
term = int(term)
else:
raise ValidationError("Unknown range type %r." % classification)
self.cleaned_data["term"] = [begin_term, term]
return ""
class ConstraintFormset(BaseFormSet):
"""Removes the first ``ConstraintForm``'s ``type`` field."""
def __init__(self, configuration, *args, **kwargs):
"""Stores the configuration until the forms are constructed."""
self.configuration = configuration
super(ConstraintFormset, self).__init__(*args, **kwargs)
def _construct_form(self, i, **kwargs):
"""Sends the specified model configuration to the form."""
return super(ConstraintFormset, self)._construct_form(
i, configuration=self.configuration, **kwargs
)
| {
"content_hash": "cd23dd1974df5e51a1bf1ffd1c8a6538",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 100,
"avg_line_length": 39.32786885245902,
"alnum_prop": 0.6217173822426011,
"repo_name": "pivotal-energy-solutions/django-appsearch",
"id": "b286c3d61d2ee153959235c6193a347828176f06",
"size": "9620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appsearch/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "13049"
},
{
"name": "JavaScript",
"bytes": "8225"
},
{
"name": "Python",
"bytes": "78018"
},
{
"name": "Shell",
"bytes": "210"
}
],
"symlink_target": ""
} |
import logging
import random
from django.conf import settings
from django.db import transaction, IntegrityError
from django.utils import timezone
from share import exceptions
from share.harvest.exceptions import HarvesterConcurrencyError
from share.models import (
HarvestJob,
IngestJob,
NormalizedData,
RawDatum,
)
from share.models.core import FormattedMetadataRecord
from share.models.ingest import RawDatumJob
from share.regulate import Regulator
from share.search import SearchIndexer
from share.search.messages import MessageType
from share.util import chunked
logger = logging.getLogger(__name__)
class JobConsumer:
Job = None
lock_field = None
def __init__(self, task=None):
if self.Job is None or self.lock_field is None:
raise NotImplementedError
self.task = task
def _consume_job(self, job, **kwargs):
raise NotImplementedError
def _current_versions(self, job):
"""Get up-to-date values for the job's `*_version` fields
Dict from field name to version number
"""
raise NotImplementedError
def consume(self, job_id=None, exhaust=True, ignore_disabled=False, superfluous=False, force=False, **kwargs):
"""Consume the given job, or consume an available job if no job is specified.
Parameters:
job_id (int, optional): Consume the given job. Defaults to None.
If the given job cannot be locked, the task will retry indefinitely.
If the given job belongs to a disabled or deleted Source or SourceConfig, the task will fail.
exhaust (bool, optional): If True and there are queued jobs, start another task. Defaults to True.
Used to prevent a backlog. If we have a valid job, spin off another task to eat through
the rest of the queue.
ignore_disabled (bool, optional): Consume jobs from disabled source configs and/or deleted sources. Defaults to False.
superfluous (bool, optional): Consuming a job should be idempotent, and subsequent runs may
skip doing work that has already been done. If superfluous=True, however, will do all
work whether or not it's already been done. Default False.
force (bool, optional):
Additional keyword arguments passed to _consume_job, along with superfluous and force
"""
with self._locked_job(job_id, ignore_disabled) as job:
if job is None:
if job_id is None:
logger.info('No %ss are currently available', self.Job.__name__)
return
else:
# If an id was given to us, we should have gotten a job
job = self.Job.objects.get(id=job_id) # Force the failure
raise Exception('Failed to load {} but then found {!r}.'.format(job_id, job)) # Should never be reached
assert self.task or not exhaust, 'Cannot pass exhaust=True unless running in an async context'
if exhaust and job_id is None:
if force:
logger.warning('propagating force=True until queue exhaustion')
logger.debug('Spawning another task to consume %s', self.Job.__name__)
res = self.task.apply_async(self.task.request.args, self.task.request.kwargs)
logger.info('Spawned %r', res)
if self._prepare_job(job, superfluous=superfluous):
logger.info('Consuming %r', job)
with job.handle():
self._consume_job(job, **kwargs, superfluous=superfluous, force=force)
def _prepare_job(self, job, superfluous):
if job.status == self.Job.STATUS.skipped:
# Need some way to short-circuit a superfluous retry loop
logger.warning('%r has been marked skipped. Change its status to allow re-running it', job)
return False
if self.task and self.task.request.id:
# Additional attributes for the celery backend
# Allows for better analytics of currently running tasks
self.task.update_state(meta={
'job_id': job.id,
'source': job.source_config.source.long_title,
'source_config': job.source_config.label,
})
job.task_id = self.task.request.id
job.save(update_fields=('task_id',))
if job.completions > 0 and job.status == self.Job.STATUS.succeeded:
if not superfluous:
job.skip(job.SkipReasons.duplicated)
logger.warning('%r has already been consumed. Force a re-run with superfluous=True', job)
return False
logger.info('%r has already been consumed. Re-running superfluously', job)
if not self._update_versions(job):
job.skip(job.SkipReasons.obsolete)
return False
return True
def _filter_ready(self, qs):
return qs.filter(
status__in=self.Job.READY_STATUSES,
).exclude(
claimed=True
)
def _locked_job(self, job_id, ignore_disabled=False):
qs = self.Job.objects.all()
if job_id is not None:
logger.debug('Loading %s %d', self.Job.__name__, job_id)
qs = qs.filter(id=job_id)
else:
logger.debug('job_id was not specified, searching for an available job.')
if not ignore_disabled:
qs = qs.exclude(
source_config__disabled=True,
).exclude(
source_config__source__is_deleted=True
)
qs = self._filter_ready(qs).unlocked(self.lock_field)
return qs.lock_first(self.lock_field)
def _update_versions(self, job):
"""Update version fields to the values from self.current_versions
Return True if successful, else False.
"""
current_versions = self._current_versions(job)
if all(getattr(job, f) == v for f, v in current_versions.items()):
# No updates required
return True
if job.completions > 0:
logger.warning('%r is outdated but has previously completed, skipping...', job)
return False
try:
with transaction.atomic():
for f, v in current_versions.items():
setattr(job, f, v)
job.save()
logger.warning('%r has been updated to the versions: %s', job, current_versions)
return True
except IntegrityError:
logger.warning('A newer version of %r already exists, skipping...', job)
return False
class HarvestJobConsumer(JobConsumer):
Job = HarvestJob
lock_field = 'source_config'
def _filter_ready(self, qs):
qs = super()._filter_ready(qs)
return qs.filter(
end_date__lte=timezone.now().date(),
source_config__harvest_after__lte=timezone.now().time(),
)
def _current_versions(self, job):
return {
'source_config_version': job.source_config.version,
'harvester_version': job.source_config.harvester.version,
}
def _consume_job(self, job, force, superfluous, limit=None, ingest=True):
try:
if ingest:
datum_gen = (datum for datum in self._harvest(job, force, limit) if datum.created or superfluous)
for chunk in chunked(datum_gen, 500):
self._bulk_schedule_ingest(job, chunk)
else:
for _ in self._harvest(job, force, limit):
pass
except HarvesterConcurrencyError as e:
if not self.task:
raise
# If job_id was specified there's a chance that the advisory lock was not, in fact, acquired.
# If so, retry indefinitely to preserve existing functionality.
# Use random to add jitter to help break up locking issues
# Kinda hacky, allow a stupidly large number of retries as there is no options for infinite
raise self.task.retry(
exc=e,
max_retries=99999,
countdown=(random.random() + 1) * min(settings.CELERY_RETRY_BACKOFF_BASE ** self.task.request.retries, 60 * 15)
)
def _harvest(self, job, force, limit):
error = None
datum_ids = []
logger.info('Harvesting %r', job)
harvester = job.source_config.get_harvester()
try:
for datum in harvester.harvest_date_range(job.start_date, job.end_date, limit=limit, force=force):
datum_ids.append(datum.id)
yield datum
except Exception as e:
error = e
raise error
finally:
try:
RawDatumJob.objects.bulk_create([
RawDatumJob(job=job, datum_id=datum_id)
for datum_id in datum_ids
])
except Exception as e:
logger.exception('Failed to connect %r to raw data', job)
# Avoid shadowing the original error
if not error:
raise e
def _bulk_schedule_ingest(self, job, datums):
# HACK to allow scheduling ingest tasks without cyclical imports
from share.tasks import ingest
job_kwargs = {
'source_config': job.source_config,
'source_config_version': job.source_config.version,
'transformer_version': job.source_config.transformer.version,
'regulator_version': Regulator.VERSION,
}
created_jobs = IngestJob.objects.bulk_get_or_create(
[IngestJob(raw_id=datum.id, suid_id=datum.suid_id, **job_kwargs) for datum in datums]
)
if not settings.INGEST_ONLY_CANONICAL_DEFAULT or job.source_config.source.canonical:
for job in created_jobs:
ingest.delay(job_id=job.id)
class IngestJobConsumer(JobConsumer):
Job = IngestJob
lock_field = 'suid'
MAX_RETRIES = 5
def __init__(self, *args, only_canonical=False, **kwargs):
super().__init__(*args, **kwargs)
self.only_canonical = only_canonical
def consume(self, job_id=None, **kwargs):
# TEMPORARY HACK: The query to find an unclaimed job (when job_id isn't given)
# is crazy-slow to the point that workers are barely getting anything else done
# and the urgent ingest queue is backing up. All urgent tasks have a job_id,
# so we can skip those without a job_id and catch up in the task queue without
# negatively affecting OSF.
# REMINDER: when you remove this, also un-skip tests in
# tests/share/tasks/test_job_consumers
if job_id is None:
task = self.task
logger.warning('Skipping ingest task with job_id=None (task_id: %s)', task.request.id if task else None)
return
return super().consume(job_id=job_id, **kwargs)
def _current_versions(self, job):
return {
'source_config_version': job.source_config.version,
'transformer_version': job.source_config.transformer.version,
'regulator_version': Regulator.VERSION,
}
def _filter_ready(self, qs):
qs = super()._filter_ready(qs)
if self.only_canonical:
qs = qs.filter(
source_config__source__canonical=True,
)
return qs
def _consume_job(self, job, superfluous, force, apply_changes=True, index=True, urgent=False,
pls_format_metadata=True, metadata_formats=None):
datum = None
graph = None
most_recent_raw = job.suid.most_recent_raw_datum()
# Check whether we've already done transform/regulate
if not superfluous:
datum = job.ingested_normalized_data.filter(raw=most_recent_raw).order_by('-created_at').first()
if superfluous or datum is None:
graph = self._transform(job, most_recent_raw)
if not graph:
return
graph = self._regulate(job, graph)
if not graph:
return
datum = NormalizedData.objects.create(
data=graph.to_jsonld(),
source=job.suid.source_config.source.user,
raw=most_recent_raw,
)
job.ingested_normalized_data.add(datum)
if pls_format_metadata:
records = FormattedMetadataRecord.objects.save_formatted_records(
job.suid,
record_formats=metadata_formats,
normalized_datum=datum,
)
# TODO consider whether to handle the possible but rare-to-nonexistent case where
# `records` is empty this time but there were records in the past -- would need to
# remove the previous from the index
if records and index:
self._queue_for_indexing(job.suid, urgent)
def _transform(self, job, raw):
transformer = job.suid.source_config.get_transformer()
try:
graph = transformer.transform(raw)
except exceptions.TransformError as e:
job.fail(e)
return None
if not graph:
if not raw.normalizeddata_set.exists():
logger.warning('Graph was empty for %s, setting no_output to True', raw)
RawDatum.objects.filter(id=raw.id).update(no_output=True)
else:
logger.warning('Graph was empty for %s, but a normalized data already exists for it', raw)
return None
return graph
def _regulate(self, job, graph):
try:
Regulator(job).regulate(graph)
return graph
except exceptions.RegulateError as e:
job.fail(e)
return None
def _queue_for_indexing(self, suid, urgent):
indexer = SearchIndexer(self.task.app) if self.task else SearchIndexer()
indexer.send_messages(MessageType.INDEX_SUID, [suid.id], urgent=urgent)
| {
"content_hash": "5d06b1c58e96126ca5274109eb184cd5",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 130,
"avg_line_length": 39.985955056179776,
"alnum_prop": 0.5939585528626624,
"repo_name": "aaxelb/SHARE",
"id": "bbf391c0709efc57c72c06be0d1ced35054444dd",
"size": "14235",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "share/tasks/jobs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3321"
},
{
"name": "Dockerfile",
"bytes": "1143"
},
{
"name": "Gherkin",
"bytes": "4346"
},
{
"name": "HTML",
"bytes": "4834"
},
{
"name": "Python",
"bytes": "1450482"
},
{
"name": "Shell",
"bytes": "408"
}
],
"symlink_target": ""
} |
#
# GaloisBenchmark.py
#
# Author(s):
# Alessio Parma <alessio.parma@gmail.com>
#
# Copyright (c) 2012-2016 Alessio Parma <alessio.parma@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
from Common import tag
from Galois.Starter import runSimulations
parameters = [(4, 64), (8, 96), (16, 128), (32, 256)]
def run():
outputName = "galois-benchmark-{0}.csv".format(tag)
output = open(outputName, "w")
for (machineCount, frameCount) in parameters:
print("### Running Galois with (mc = {0}, fc = {1}) ###".format(machineCount, frameCount))
start = time.time()
usedMemory = runSimulations(machineCount, frameCount)
end = time.time()
execTime = (end-start)/60.0
output.write("{0};{1};{2}\n".format(machineCount, execTime, int(usedMemory)))
output.flush()
print("### Execution time: {0} minutes ###".format(execTime))
print("### Used memory: {0} MB ###".format(usedMemory))
print("")
output.close() | {
"content_hash": "35a6c386356aa3ee91f7e3691ba85d41",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 98,
"avg_line_length": 43.297872340425535,
"alnum_prop": 0.7007371007371007,
"repo_name": "pomma89/Dessert",
"id": "ce9fe3eb3030a7e54c223670219f81af0b30c428",
"size": "2037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dessert.Benchmarks/GaloisBenchmark.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "42"
},
{
"name": "Boo",
"bytes": "6758"
},
{
"name": "C#",
"bytes": "1543699"
},
{
"name": "F#",
"bytes": "41883"
},
{
"name": "PowerShell",
"bytes": "13940"
},
{
"name": "Python",
"bytes": "157072"
},
{
"name": "Shell",
"bytes": "30"
},
{
"name": "Visual Basic",
"bytes": "30288"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import shop.models
class Migration(migrations.Migration):
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(blank=True, null=True, storage=shop.models.OverwriteFileSystemStorage(), upload_to=shop.models.generate_product_filename),
),
]
| {
"content_hash": "f07ef7c4e2501f1585fa786bc455a6b2",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 158,
"avg_line_length": 25.842105263157894,
"alnum_prop": 0.6476578411405295,
"repo_name": "uppsaladatavetare/foobar-api",
"id": "16bc97b57448a0b3114520b02f103aa831386b67",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/shop/migrations/0002_auto_20151028_1415.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3317"
},
{
"name": "HTML",
"bytes": "10880"
},
{
"name": "JavaScript",
"bytes": "10604"
},
{
"name": "Makefile",
"bytes": "796"
},
{
"name": "Python",
"bytes": "318730"
}
],
"symlink_target": ""
} |
"""
WSGI config for web_dev_final_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "augmented-pandemic.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| {
"content_hash": "6092c3b845659522235e91574b59ac90",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 28.22222222222222,
"alnum_prop": 0.7933070866141733,
"repo_name": "shintouki/augmented-pandemic",
"id": "77ceae98f4f514195549f7bd116a4f06407744f3",
"size": "508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "augmented-pandemic/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2353"
},
{
"name": "HTML",
"bytes": "19968"
},
{
"name": "JavaScript",
"bytes": "53264"
},
{
"name": "Python",
"bytes": "37545"
}
],
"symlink_target": ""
} |
import os
vars = {
'cube_sdk_version': '1.0.43-SNAPSHOT'
}
files = [
'README.md',
'README-cn.md',
'core/pom.xml',
'core/gradle.properties',
]
current_dir = os.path.dirname(os.path.realpath(__file__))
print current_dir
src_dir = current_dir + '/template/'
dst_dir = current_dir + '/'
def update_var_for_file(file, vars):
src_file = src_dir + file
dst_file = dst_dir + file
print "update_var_for_file: %s => %s" % (src_file, dst_file)
infile = open(src_file)
outfile = open(dst_file, 'w')
for line in infile:
for src, target in vars.iteritems():
line = line.replace(src, target)
outfile.write(line)
infile.close()
outfile.close()
real_vars = {}
for src, target in vars.iteritems():
real_vars['{' + src + '}'] = target
for f in files:
update_var_for_file(f, real_vars)
| {
"content_hash": "93db9dd9e230c77bedbb6dd0c7dff775",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 64,
"avg_line_length": 24.216216216216218,
"alnum_prop": 0.5736607142857143,
"repo_name": "muxiaolin/cube-sdk-dev",
"id": "44d3e6c14a24bbf0fe4e3931bf5bfe3a3edc831f",
"size": "896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build-template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "422130"
},
{
"name": "Python",
"bytes": "896"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
"""Test forms."""
from app.public.forms import LoginForm
from app.user.forms import RegisterForm
class TestRegisterForm:
"""Register form."""
def test_validate_user_already_registered(self, user):
"""Enter username that is already registered."""
form = RegisterForm(username=user.username, email='foo@bar.com',
password='example', confirm='example')
assert form.validate() is False
assert 'Username already registered' in form.username.errors
def test_validate_email_already_registered(self, user):
"""Enter email that is already registered."""
form = RegisterForm(username='unique', email=user.email,
password='example', confirm='example')
assert form.validate() is False
assert 'Email already registered' in form.email.errors
def test_validate_success(self, db):
"""Register with success."""
form = RegisterForm(username='newusername', email='new@test.test',
password='example', confirm='example')
assert form.validate() is True
class TestLoginForm:
"""Login form."""
def test_validate_success(self, user):
"""Login successful."""
user.set_password('example')
user.save()
form = LoginForm(username=user.username, password='example')
assert form.validate() is True
assert form.user == user
def test_validate_unknown_username(self, db):
"""Unknown username."""
form = LoginForm(username='unknown', password='example')
assert form.validate() is False
assert 'Unknown username' in form.username.errors
assert form.user is None
def test_validate_invalid_password(self, user):
"""Invalid password."""
user.set_password('example')
user.save()
form = LoginForm(username=user.username, password='wrongpassword')
assert form.validate() is False
assert 'Invalid password' in form.password.errors
def test_validate_inactive_user(self, user):
"""Inactive user."""
user.active = False
user.set_password('example')
user.save()
# Correct username and password, but user is not activated
form = LoginForm(username=user.username, password='example')
assert form.validate() is False
assert 'User not activated' in form.username.errors
| {
"content_hash": "0ce8a1ec166bb181e35f06508577a68b",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 74,
"avg_line_length": 36.417910447761194,
"alnum_prop": 0.6331967213114754,
"repo_name": "iulian4ik/HeroesOfProgramming",
"id": "c701fb750f115b28db5d02b45bd16211e56764c0",
"size": "2464",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1170"
},
{
"name": "HTML",
"bytes": "8805"
},
{
"name": "JavaScript",
"bytes": "240856"
},
{
"name": "Python",
"bytes": "27891"
}
],
"symlink_target": ""
} |
from ._base import *
# DJANGO ######################################################################
ALLOWED_HOSTS = ('*', )
CACHES['default'].update({'BACKEND': 'redis_lock.django_cache.RedisCache'})
CSRF_COOKIE_SECURE = False # Don't require HTTPS for CSRF cookie
SESSION_COOKIE_SECURE = False # Don't require HTTPS for session cookie
_DATABASE_NAME = 'test_' + DATABASES['default']['NAME']
DATABASES['default'].update({
'NAME': _DATABASE_NAME,
'TEST': {
'NAME': _DATABASE_NAME,
# See: https://docs.djangoproject.com/en/1.7/ref/settings/#serialize
'SERIALIZE': False,
},
})
INSTALLED_APPS += (
'fluent_pages.pagetypes.fluentpage',
'icekit.tests',
'icekit_events.tests',
'glamkit_collections.contrib.work_creator.plugins.artwork',
'glamkit_collections.contrib.work_creator.plugins.film',
'glamkit_collections.contrib.work_creator.plugins.game',
'glamkit_collections.contrib.work_creator.plugins.moving_image',
'glamkit_collections.contrib.work_creator.plugins.organization',
'glamkit_collections.contrib.work_creator.plugins.person',
)
ROOT_URLCONF = 'icekit.tests.urls'
TEMPLATES_DJANGO['DIRS'].insert(
0, os.path.join(ICEKIT_DIR, 'tests', 'templates')),
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
TIME_ZONE = 'Australia/Sydney' # Default: America/Chicago
DDF_FILL_NULLABLE_FIELDS = False
DDF_IGNORE_FIELDS = ['*_ptr'] # Ignore django-polymorphic pointer fields
DEBUG = True
# Disable all django-compressor feature to speed up test runs
COMPRESS_ENABLED = False
COMPRESS_OFFLINE = False
COMPRESS_PRECOMPILERS = []
COMPRESS_CSS_FILTERS = []
# ICEKIT ######################################################################
# RESPONSE_PAGE_PLUGINS = ['ImagePlugin', ]
# HAYSTACK ####################################################################
# HAYSTACK_CONNECTIONS = {
# 'default': {
# 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
# },
# }
# TRAVIS ######################################################################
if 'TRAVIS' in os.environ:
NOSE_ARGS.remove('--with-progressive')
# Unconfigure django-hosts
INSTALLED_APPS = tuple([
app for app in INSTALLED_APPS if app != 'django_hosts'
])
MIDDLEWARE_CLASSES = tuple([
classname for classname in MIDDLEWARE_CLASSES
if 'django_hosts' not in classname
])
| {
"content_hash": "6c79e338c99340605d33f10d100db8b0",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 79,
"avg_line_length": 30.525641025641026,
"alnum_prop": 0.6157076858462831,
"repo_name": "ic-labs/django-icekit",
"id": "5374edf32e59aeabd8e8d174caee05fbc44f6271",
"size": "2381",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "icekit/project/settings/_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18019"
},
{
"name": "HTML",
"bytes": "92605"
},
{
"name": "JavaScript",
"bytes": "27803"
},
{
"name": "Python",
"bytes": "1476354"
},
{
"name": "Shell",
"bytes": "37850"
}
],
"symlink_target": ""
} |
from murano.db import models
from murano.db import session as db_session
from murano.services import actions
from murano.services import states
class SessionServices(object):
@staticmethod
def get_sessions(environment_id, state=None):
"""Get list of sessions for specified environment.
:param environment_id: Environment Id
:param state: murano.services.states.EnvironmentStatus
:return: Sessions for specified Environment, if SessionState is
not defined all sessions for specified environment is returned.
"""
unit = db_session.get_session()
# Here we duplicate logic for reducing calls to database
# Checks for validation is same as in validate.
query = unit.query(models.Session).filter(
# Get all session for this environment
models.Session.environment_id == environment_id,
# Only sessions with same version as current env version are valid
)
if state:
# in this state, if state is not specified return in all states
query = query.filter(models.Session.state == state)
return query.order_by(models.Session.version.desc(),
models.Session.updated.desc()).all()
@staticmethod
def create(environment_id, user_id):
"""Creates session object for specific environment for specified user.
:param environment_id: Environment Id
:param user_id: User Id
:return: Created session
"""
unit = db_session.get_session()
environment = unit.query(models.Environment).get(environment_id)
session = models.Session()
session.environment_id = environment.id
session.user_id = user_id
session.state = states.SessionState.OPENED
# used for checking if other sessions was deployed before this one
session.version = environment.version
# all changes to environment is stored here, and translated to
# environment only after deployment completed
session.description = environment.description
with unit.begin():
unit.add(session)
return session
@staticmethod
def validate(session):
"""Validates session
Session is valid only if no other session for same.
environment was already deployed on in deploying state,
:param session: Session for validation
"""
# if other session is deploying now current session is invalid
unit = db_session.get_session()
# if environment version is higher than version on which current
# session is created then other session was already deployed
current_env = unit.query(models.Environment).\
get(session.environment_id)
if current_env.version > session.version:
return False
# if other session is deploying now current session is invalid
other_is_deploying = unit.query(models.Session).filter_by(
environment_id=session.environment_id,
state=states.SessionState.DEPLOYING
).count() > 0
if session.state == states.SessionState.OPENED and other_is_deploying:
return False
return True
@staticmethod
def deploy(session, environment, unit, context):
"""Prepares and deployes environment
Prepares environment for deployment and send deployment command to
orchestration engine
:param session: session that is going to be deployed
:param unit: SQLalchemy session
:param token: auth token that is going to be used by orchestration
"""
deleted = session.description['Objects'] is None
action_name = None if deleted else 'deploy'
actions.ActionServices.submit_task(
action_name, environment.id,
{}, environment, session,
context, unit)
| {
"content_hash": "ae204b649226fd92055e90aa1bd1598e",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 78,
"avg_line_length": 36.83177570093458,
"alnum_prop": 0.6536412078152753,
"repo_name": "openstack/murano",
"id": "3167eaeb0ed1028e4e2da530accc3f36fbe47fe1",
"size": "4555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "murano/db/services/sessions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "2026"
},
{
"name": "PowerShell",
"bytes": "2966"
},
{
"name": "Python",
"bytes": "1817159"
},
{
"name": "Shell",
"bytes": "37531"
}
],
"symlink_target": ""
} |
"""
Test script for the 'cmd' module
Original by Michael Schneider
"""
import cmd
import sys
import re
import unittest
import io
from test import support
class samplecmdclass(cmd.Cmd):
"""
Instance the sampleclass:
>>> mycmd = samplecmdclass()
Test for the function parseline():
>>> mycmd.parseline("")
(None, None, '')
>>> mycmd.parseline("?")
('help', '', 'help ')
>>> mycmd.parseline("?help")
('help', 'help', 'help help')
>>> mycmd.parseline("!")
('shell', '', 'shell ')
>>> mycmd.parseline("!command")
('shell', 'command', 'shell command')
>>> mycmd.parseline("func")
('func', '', 'func')
>>> mycmd.parseline("func arg1")
('func', 'arg1', 'func arg1')
Test for the function onecmd():
>>> mycmd.onecmd("")
>>> mycmd.onecmd("add 4 5")
9
>>> mycmd.onecmd("")
9
>>> mycmd.onecmd("test")
*** Unknown syntax: test
Test for the function emptyline():
>>> mycmd.emptyline()
*** Unknown syntax: test
Test for the function default():
>>> mycmd.default("default")
*** Unknown syntax: default
Test for the function completedefault():
>>> mycmd.completedefault()
This is the completedefault methode
>>> mycmd.completenames("a")
['add']
Test for the function completenames():
>>> mycmd.completenames("12")
[]
>>> mycmd.completenames("help")
['help']
Test for the function complete_help():
>>> mycmd.complete_help("a")
['add']
>>> mycmd.complete_help("he")
['help']
>>> mycmd.complete_help("12")
[]
>>> sorted(mycmd.complete_help(""))
['add', 'exit', 'help', 'shell']
Test for the function do_help():
>>> mycmd.do_help("testet")
*** No help on testet
>>> mycmd.do_help("add")
help text for add
>>> mycmd.onecmd("help add")
help text for add
>>> mycmd.do_help("")
<BLANKLINE>
Documented commands (type help <topic>):
========================================
add help
<BLANKLINE>
Undocumented commands:
======================
exit shell
<BLANKLINE>
Test for the function print_topics():
>>> mycmd.print_topics("header", ["command1", "command2"], 2 ,10)
header
======
command1
command2
<BLANKLINE>
Test for the function columnize():
>>> mycmd.columnize([str(i) for i in range(20)])
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
>>> mycmd.columnize([str(i) for i in range(20)], 10)
0 7 14
1 8 15
2 9 16
3 10 17
4 11 18
5 12 19
6 13
This is an interactive test, put some commands in the cmdqueue attribute
and let it execute
This test includes the preloop(), postloop(), default(), emptyline(),
parseline(), do_help() functions
>>> mycmd.use_rawinput=0
>>> mycmd.cmdqueue=["", "add", "add 4 5", "help", "help add","exit"]
>>> mycmd.cmdloop()
Hello from preloop
help text for add
*** invalid number of arguments
9
<BLANKLINE>
Documented commands (type help <topic>):
========================================
add help
<BLANKLINE>
Undocumented commands:
======================
exit shell
<BLANKLINE>
help text for add
Hello from postloop
"""
def preloop(self):
print("Hello from preloop")
def postloop(self):
print("Hello from postloop")
def completedefault(self, *ignored):
print("This is the completedefault methode")
def complete_command(self):
print("complete command")
def do_shell(self, s):
pass
def do_add(self, s):
l = s.split()
if len(l) != 2:
print("*** invalid number of arguments")
return
try:
l = [int(i) for i in l]
except ValueError:
print("*** arguments should be numbers")
return
print(l[0]+l[1])
def help_add(self):
print("help text for add")
return
def do_exit(self, arg):
return True
class TestAlternateInput(unittest.TestCase):
class simplecmd(cmd.Cmd):
def do_print(self, args):
print(args, file=self.stdout)
def do_EOF(self, args):
return True
class simplecmd2(simplecmd):
def do_EOF(self, args):
print('*** Unknown syntax: EOF', file=self.stdout)
return True
def test_file_with_missing_final_nl(self):
input = io.StringIO("print test\nprint test2")
output = io.StringIO()
cmd = self.simplecmd(stdin=input, stdout=output)
cmd.use_rawinput = False
cmd.cmdloop()
self.assertMultiLineEqual(output.getvalue(),
("(Cmd) test\n"
"(Cmd) test2\n"
"(Cmd) "))
def test_input_reset_at_EOF(self):
input = io.StringIO("print test\nprint test2")
output = io.StringIO()
cmd = self.simplecmd2(stdin=input, stdout=output)
cmd.use_rawinput = False
cmd.cmdloop()
self.assertMultiLineEqual(output.getvalue(),
("(Cmd) test\n"
"(Cmd) test2\n"
"(Cmd) *** Unknown syntax: EOF\n"))
input = io.StringIO("print \n\n")
output = io.StringIO()
cmd.stdin = input
cmd.stdout = output
cmd.cmdloop()
self.assertMultiLineEqual(output.getvalue(),
("(Cmd) \n"
"(Cmd) \n"
"(Cmd) *** Unknown syntax: EOF\n"))
def test_main(verbose=None):
from test import test_cmd
support.run_doctest(test_cmd, verbose)
support.run_unittest(TestAlternateInput)
def test_coverage(coverdir):
trace = support.import_module('trace')
tracer=trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,],
trace=0, count=1)
tracer.run('import importlib; importlib.reload(cmd); test_main()')
r=tracer.results()
print("Writing coverage results...")
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if __name__ == "__main__":
if "-c" in sys.argv:
test_coverage('/tmp/cmd.cover')
elif "-i" in sys.argv:
samplecmdclass().cmdloop()
else:
test_main()
| {
"content_hash": "8ed7a71469bc3b19350440888b67221a",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 76,
"avg_line_length": 25.765432098765434,
"alnum_prop": 0.5547037214502476,
"repo_name": "batermj/algorithm-challenger",
"id": "dd8981f8935b96a3462245fc2bb67478388a72e4",
"size": "6261",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/test/test_cmd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "655185"
},
{
"name": "Batchfile",
"bytes": "127416"
},
{
"name": "C",
"bytes": "33127630"
},
{
"name": "C++",
"bytes": "1364796"
},
{
"name": "CSS",
"bytes": "3163"
},
{
"name": "Common Lisp",
"bytes": "48962"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "DTrace",
"bytes": "2196"
},
{
"name": "Go",
"bytes": "26248"
},
{
"name": "HTML",
"bytes": "385719"
},
{
"name": "Haskell",
"bytes": "33612"
},
{
"name": "Java",
"bytes": "1084"
},
{
"name": "JavaScript",
"bytes": "20754"
},
{
"name": "M4",
"bytes": "403992"
},
{
"name": "Makefile",
"bytes": "238185"
},
{
"name": "Objective-C",
"bytes": "4934684"
},
{
"name": "PHP",
"bytes": "3513"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Perl",
"bytes": "649"
},
{
"name": "PostScript",
"bytes": "27606"
},
{
"name": "PowerShell",
"bytes": "21737"
},
{
"name": "Python",
"bytes": "55270625"
},
{
"name": "R",
"bytes": "29951"
},
{
"name": "Rich Text Format",
"bytes": "14551"
},
{
"name": "Roff",
"bytes": "292490"
},
{
"name": "Ruby",
"bytes": "519"
},
{
"name": "Scala",
"bytes": "846446"
},
{
"name": "Shell",
"bytes": "491113"
},
{
"name": "Swift",
"bytes": "881"
},
{
"name": "TeX",
"bytes": "337654"
},
{
"name": "VBScript",
"bytes": "140"
},
{
"name": "XSLT",
"bytes": "153"
}
],
"symlink_target": ""
} |
import os
from config import *
#Zdope_command = "/usr/local/bin/Zdope"
def get_dope_score(pdb_file):
result = os.popen("{0} {1}".format(Zdope_command, pdb_file))
result_lines = [l.strip() for l in result]
score_line = result_lines[-2]
return float(score_line.strip().split(":")[1])
| {
"content_hash": "da8264bd2e3683475220b130c0f9f52b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 64,
"avg_line_length": 30,
"alnum_prop": 0.65,
"repo_name": "Bolt64/ProtAnneal",
"id": "b6e3462d9229c50762ac328b8fe8d887b680375c",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/zdope_score.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37078"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import numpy as np
import tensorflow as tf
from tfrecords_io import get_padded_batch
from tfrecords_io import get_spliced_batch
from tf_datasets import SequenceDataset
tf.logging.set_verbosity(tf.logging.INFO)
class TfrecordsIoTest(tf.test.TestCase):
def testReadPaddedBatchTfrecords(self):
"""Test reading padded sequences from tfrecords."""
name = 'valid'
config_file = open(os.path.join(FLAGS.config_dir, name + ".lst"))
tfrecords_lst = []
for line in config_file:
utt_id, inputs_path, labels_path = line.strip().split()
tfrecords_name = os.path.join(FLAGS.data_dir, name,
utt_id + ".tfrecords")
tfrecords_lst.append(tfrecords_name)
with tf.Graph().as_default():
inputs, labels, lengths = get_padded_batch(
tfrecords_lst, FLAGS.batch_size, FLAGS.input_dim,
FLAGS.output_dim, num_enqueuing_threads=FLAGS.num_threads,
num_epochs=FLAGS.num_epochs, infer=False)
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess = tf.Session()
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
while not coord.should_stop():
# Print an overview fairly often.
tr_inputs, tr_labels, tr_lengths = sess.run([
inputs, labels, lengths])
tf.logging.info('inputs shape : '+ str(tr_inputs.shape))
tf.logging.info('labels shape : ' + str(tr_labels.shape))
tf.logging.info('actual lengths : ' + str(tr_lengths))
except tf.errors.OutOfRangeError:
tf.logging.info('Done training -- epoch limit reached')
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def testReadSplicedBatchTfrecords(self):
"""Test reading spliced mini-batch from tfrecords."""
name = 'valid'
config_file = open(os.path.join(FLAGS.config_dir, name + ".lst"))
tfrecords_lst = []
for line in config_file:
utt_id, inputs_path, labels_path = line.strip().split()
tfrecords_name = os.path.join(FLAGS.data_dir, name,
utt_id + ".tfrecords")
tfrecords_lst.append(tfrecords_name)
with tf.Graph().as_default():
inputs, labels = get_spliced_batch(
tfrecords_lst, FLAGS.batch_size, FLAGS.input_dim,
FLAGS.output_dim, num_enqueuing_threads=FLAGS.num_threads,
num_epochs=FLAGS.num_epochs)
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess = tf.Session()
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
while not coord.should_stop():
# Print an overview fairly often.
tr_inputs, tr_labels = sess.run([inputs, labels])
tf.logging.info('inputs shape : '+ str(tr_inputs.shape))
tf.logging.info('labels shape : ' + str(tr_labels.shape))
except tf.errors.OutOfRangeError:
tf.logging.info('Done training -- epoch limit reached')
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def testTfDatasetsForTraining(self):
"""Test reading tfrecords using tf.contrib.data.Dataset for training."""
dataset_valid = SequenceDataset(
subset="valid",
config_dir=FLAGS.config_dir,
data_dir=FLAGS.data_dir,
batch_size=FLAGS.batch_size,
input_size=FLAGS.input_dim,
output_size=FLAGS.output_dim,
num_threads=FLAGS.num_threads,
use_bucket=True,
infer=False,
name="sequence_dataset")()
iterator = dataset_valid.batched_dataset.make_initializable_iterator()
(input_sequence, input_sequence_length,
target_sequence, target_sequence_length) = iterator.get_next()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Compute for 2 epochs
for epoch in range(2):
sess.run(iterator.initializer)
while True:
try:
input_seq, input_seq_len, target_seq, target_seq_len = sess.run(
[input_sequence,
input_sequence_length,
target_sequence,
target_sequence_length])
tf.logging.info('inputs shape : '+ str(input_seq.shape))
tf.logging.info('actual inputs length : '+ str(input_seq_len))
tf.logging.info('labels shape : ' + str(target_seq.shape))
tf.logging.info('actual labels length : '+ str(target_seq_len))
except tf.errors.OutOfRangeError:
break
def testTfDatasetsForInference(self):
"""Test reading tfrecords using tf.contrib.data.Dataset for inference."""
dataset_test = SequenceDataset(
subset="test",
config_dir=FLAGS.config_dir,
data_dir=FLAGS.data_dir,
batch_size=1,
input_size=FLAGS.input_dim,
output_size=FLAGS.output_dim,
infer=True,
name="sequence_dataset")()
iterator = dataset_test.batched_dataset.make_one_shot_iterator()
input_sequence, input_sequence_length = iterator.get_next()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
while True:
try:
input_seq, input_seq_len = sess.run(
[input_sequence, input_sequence_length])
tf.logging.info('inputs shape : '+ str(input_seq.shape))
tf.logging.info('actual inputs length : '+ str(input_seq_len))
except tf.errors.OutOfRangeError:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch_size',
type=int,
default=30,
help='Mini-batch size.'
)
parser.add_argument(
'--input_dim',
type=int,
default=145,
help='The dimension of inputs.'
)
parser.add_argument(
'--output_dim',
type=int,
default=75,
help='The dimension of outputs.'
)
parser.add_argument(
'--num_threads',
type=int,
default=4,
help='The num of threads to read tfrecords files.'
)
parser.add_argument(
'--num_epochs',
type=int,
default=1,
help='The num of epochs to read tfrecords files.'
)
parser.add_argument(
'--data_dir',
type=str,
default='data/',
help='Directory of train, val and test data.'
)
parser.add_argument(
'--config_dir',
type=str,
default='config/',
help='Directory to load train, val and test lists.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.test.main()
| {
"content_hash": "eb5509c0e53daea6874506a832b471d1",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 84,
"avg_line_length": 35.81363636363636,
"alnum_prop": 0.5501967254727758,
"repo_name": "npuichigo/voicenet",
"id": "f8b8b37548588dfdcedf5504a6b3c9cf29be295b",
"size": "8520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/io_funcs/tfrecords_io_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2285523"
},
{
"name": "C++",
"bytes": "357550"
},
{
"name": "M4",
"bytes": "8246"
},
{
"name": "Makefile",
"bytes": "2371358"
},
{
"name": "Objective-C",
"bytes": "23129"
},
{
"name": "Python",
"bytes": "118685"
},
{
"name": "Shell",
"bytes": "59299"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class ImageboardConfig(AppConfig):
name = 'imageboard'
default_app_config = 'imageboard.apps.ImageboardConfig'
| {
"content_hash": "25abcb7e6d7f146b15ecd6979c89c16d",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 19.125,
"alnum_prop": 0.7777777777777778,
"repo_name": "andrewnsk/dorokhin.moscow",
"id": "c27bb2f954eb55cf06759e4ed8fb2d2f9f25d8f4",
"size": "153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imageboard/apps.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "461"
},
{
"name": "HTML",
"bytes": "153184"
},
{
"name": "Python",
"bytes": "127702"
},
{
"name": "Shell",
"bytes": "930"
}
],
"symlink_target": ""
} |
from pathlib import Path
from typing import Optional
from semver import VersionInfo
import virtool.github
def format_hmm_release(
updated: Optional[dict], release: dict, installed: dict
) -> Optional[dict]:
# The release dict will only be replaced if there is a 200 response from GitHub. A 304 indicates the release
# has not changed and `None` is returned from `get_release()`.
if updated is None:
return None
formatted = virtool.github.format_release(updated)
formatted["newer"] = bool(
release is None
or installed is None
or (
installed
and VersionInfo.parse(formatted["name"].lstrip("v"))
> VersionInfo.parse(installed["name"].lstrip("v"))
)
)
return formatted
def hmm_data_exists(file_path: Path) -> bool:
"""
Checks if HMM data exists in the local data path.
:param file_path: Path to where `profiles.hmm` should be
:return: True if both the `hmm` directory and `profiles.hmm` exist, else False
"""
return file_path.parent.is_dir() and file_path.is_file()
| {
"content_hash": "1c3b83193555de6ffd89f379f2a9c9b9",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 112,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.6570135746606335,
"repo_name": "igboyes/virtool",
"id": "30348a5eff5674f2c3b226d9237d7cf5d7f82519",
"size": "1105",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "virtool/hmm/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "961"
},
{
"name": "HTML",
"bytes": "44858"
},
{
"name": "Python",
"bytes": "1316464"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
#
import sys,os
quspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,quspin_path)
#
from quspin.operators import hamiltonian # Hamiltonians and operators
from quspin.basis import spin_basis_1d # Hilbert space spin basis
from quspin.tools.measurements import diag_ensemble
import numpy as np # generic math functions
#
L=12 # syste size
# coupling strenghts
J=1.0 # spin-spin coupling
h=0.8945 # x-field strength
g=0.945 # z-field strength
# create site-coupling lists
J_zz=[[J,i,(i+1)%L] for i in range(L)] # PBC
x_field=[[h,i] for i in range(L)]
z_field=[[g,i] for i in range(L)]
# create static and dynamic lists
static_1=[["x",x_field],["z",z_field]]
static_2=[["zz",J_zz],["x",x_field],["z",z_field]]
dynamic=[]
# create spin-1/2 basis
basis=spin_basis_1d(L,kblock=0,pblock=1)
# set up Hamiltonian
H1=hamiltonian(static_1,dynamic,basis=basis,dtype=np.float64)
H2=hamiltonian(static_2,dynamic,basis=basis,dtype=np.float64)
# compute eigensystems of H1 and H2
E1,V1=H1.eigh()
psi1=V1[:,14] # pick any state as initial state
E2,V2=H2.eigh()
#
# calculate long-time (diagonal ensemble) expectations of H1 and its temporal fluctuations
Diag_Ens=diag_ensemble(L,psi1,E2,V2,Obs=H1,delta_t_Obs=True)
print(Diag_Ens['Obs_pure'],Diag_Ens['delta_t_Obs_pure']) | {
"content_hash": "86775e868c793f9dcbc26dc62a587132",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 90,
"avg_line_length": 35.513513513513516,
"alnum_prop": 0.7267884322678844,
"repo_name": "weinbe58/QuSpin",
"id": "4389f6f5459e0e0c071f9a1a752f88ad60f2ddc4",
"size": "1314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sphinx/doc_examples/diag_ens-example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1064"
},
{
"name": "C",
"bytes": "2687"
},
{
"name": "C++",
"bytes": "266965"
},
{
"name": "Jupyter Notebook",
"bytes": "1058"
},
{
"name": "Makefile",
"bytes": "608"
},
{
"name": "Python",
"bytes": "1534241"
},
{
"name": "Shell",
"bytes": "933"
}
],
"symlink_target": ""
} |
"""Functions concerning tournament graphs.
A `tournament graph`_ is a complete oriented graph. In other words, it
is a directed graph in which there is exactly one directed edge joining
each pair of distinct nodes. For each function in this module that
accepts a graph as input, you must provide a tournament graph. The
responsibility is on the caller to ensure that the graph is a tournament
graph.
To access the functions in this module, you must access them through the
:mod:`networkx.algorithms.tournament` module::
>>> import networkx as nx
>>> from networkx.algorithms import tournament
>>> G = nx.DiGraph([(0, 1), (1, 2), (2, 0)])
>>> tournament.is_tournament(G)
True
.. _tournament graph: https://en.wikipedia.org/wiki/Tournament_%28graph_theory%29
"""
from itertools import combinations
import random
import networkx as nx
from networkx.utils import arbitrary_element
from networkx.utils import is_path
from networkx.utils import not_implemented_for
__all__ = ['hamiltonian_path', 'is_reachable', 'is_strongly_connected',
'is_tournament', 'random_tournament', 'score_sequence']
def index_satisfying(iterable, condition):
"""Returns the index of the first element in ``iterable`` that
satisfies the given condition.
If no such element is found (that is, when the iterable is
exhausted), this returns the length of the iterable (that is, one
greater than the last index of the iterable).
``iterable`` must not be empty. If ``iterable`` is empty, this
function raises :exc:`ValueError`.
"""
# Pre-condition: iterable must not be empty.
for i, x in enumerate(iterable):
if condition(x):
return i
# If we reach the end of the iterable without finding an element
# that satisfies the condition, return the length of the iterable,
# which is one greater than the index of its last element. If the
# iterable was empty, `i` will not be defined, so we raise an
# exception.
try:
return i + 1
except NameError:
raise ValueError('iterable must be non-empty')
@not_implemented_for('undirected')
@not_implemented_for('multigraph')
def is_tournament(G):
"""Returns ``True`` if and only if ``G`` is a tournament.
A tournament is a directed graph, with neither self-loops nor
multi-edges, in which there is exactly one directed edge joining
each pair of distinct nodes.
Parameters
----------
G : NetworkX graph
A directed graph representing a tournament.
Returns
-------
bool
Whether the given graph is a tournament graph.
Notes
-----
Some definitions require a self-loop on each node, but that is not
the convention used here.
"""
# In a tournament, there is exactly one directed edge joining each pair.
return (all((v in G[u]) ^ (u in G[v]) for u, v in combinations(G, 2)) and
G.number_of_selfloops() == 0)
@not_implemented_for('undirected')
@not_implemented_for('multigraph')
def hamiltonian_path(G):
"""Returns a Hamiltonian path in the given tournament graph.
Each tournament has a Hamiltonian path. If furthermore, the
tournament is strongly connected, then the returned Hamiltonian path
is a Hamiltonian cycle (by joining the endpoints of the path).
Parameters
----------
G : NetworkX graph
A directed graph representing a tournament.
Returns
-------
bool
Whether the given graph is a tournament graph.
Notes
-----
This is a recursive implementation with an asymptotic running time
of `O(n^2)`, ignoring multiplicative polylogarithmic factors, where
`n` is the number of nodes in the graph.
"""
if len(G) == 0:
return []
if len(G) == 1:
return [arbitrary_element(G)]
v = arbitrary_element(G)
hampath = hamiltonian_path(G.subgraph(set(G) - {v}))
# Get the index of the first node in the path that does *not* have
# an edge to `v`, then insert `v` before that node.
index = index_satisfying(hampath, lambda u: v not in G[u])
hampath.insert(index, v)
return hampath
def random_tournament(n):
r"""Returns a random tournament graph on ``n`` nodes.
Parameters
----------
n : int
The number of nodes in the returned graph.
Returns
-------
bool
Whether the given graph is a tournament graph.
Notes
-----
This algorithm adds, for each pair of distinct nodes, an edge with
uniformly random orientation. In other words, `\binom{n}{2}` flips
of an unbiased coin decide the orientations of the edges in the
graph.
"""
# Flip an unbiased coin for each pair of distinct nodes.
coins = (random.random() for i in range((n * (n - 1)) // 2))
pairs = combinations(range(n), 2)
edges = ((u, v) if r < 0.5 else (v, u) for (u, v), r in zip(pairs, coins))
return nx.DiGraph(edges)
@not_implemented_for('undirected')
@not_implemented_for('multigraph')
def score_sequence(G):
"""Returns the score sequence for the given tournament graph.
The score sequence is the sorted list of the out-degrees of the
nodes of the graph.
Parameters
----------
G : NetworkX graph
A directed graph representing a tournament.
Returns
-------
list
A sorted list of the out-degrees of the nodes of ``G``.
"""
return sorted(d for v, d in G.out_degree())
@not_implemented_for('undirected')
@not_implemented_for('multigraph')
def tournament_matrix(G):
r"""Returns the tournament matrix for the given tournament graph.
This function requires SciPy.
The *tournament matrix* of a tournament graph with edge set *E* is
the matrix *T* defined by
.. math::
T_{i j} =
\begin{cases}
+1 & \text{if } (i, j) \in E \\
-1 & \text{if } (j, i) \in E \\
0 & \text{if } i == j.
\end{cases}
An equivalent definition is `T = A - A^T`, where *A* is the
adjacency matrix of the graph ``G``.
Parameters
----------
G : NetworkX graph
A directed graph representing a tournament.
Returns
-------
SciPy sparse matrix
The tournament matrix of the tournament graph ``G``.
Raises
------
ImportError
If SciPy is not available.
"""
A = nx.adjacency_matrix(G)
return A - A.T
@not_implemented_for('undirected')
@not_implemented_for('multigraph')
def is_reachable(G, s, t):
"""Decides whether there is a path from ``s`` to ``t`` in the
tournament.
This function is more theoretically efficient than the reachability
checks than the shortest path algorithms in
:mod:`networkx.algorithms.shortest_paths`.
The given graph **must** be a tournament, otherwise this function's
behavior is undefined.
Parameters
----------
G : NetworkX graph
A directed graph representing a tournament.
s : node
A node in the graph.
t : node
A node in the graph.
Returns
-------
bool
Whether there is a path from ``s`` to ``t`` in ``G``.
Notes
-----
Although this function is more theoretically efficient than the
generic shortest path functions, a speedup requires the use of
parallelism. Though it may in the future, the current implementation
does not use parallelism, thus you may not see much of a speedup.
This algorithm comes from [1].
References
----------
.. [1] Tantau, Till.
"A note on the complexity of the reachability problem for
tournaments."
*Electronic Colloquium on Computational Complexity*. 2001.
<http://eccc.hpi-web.de/report/2001/092/>
"""
def two_neighborhood(G, v):
"""Returns the set of nodes at distance at most two from ``v``.
``G`` must be a graph and ``v`` a node in that graph.
The returned set includes the nodes at distance zero (that is,
the node ``v`` itself), the nodes at distance one (that is, the
out-neighbors of ``v``), and the nodes at distance two.
"""
# TODO This is trivially parallelizable.
return {x for x in G
if x == v or x in G[v] or any(is_path(G, v, z, x) for z in G)}
def is_closed(G, nodes):
"""Decides whether the given set of nodes is closed.
A set *S* of nodes is *closed* if for each node *u* in the graph
not in *S* and for each node *v* in *S*, there is an edge from
*u* to *v*.
"""
# TODO This is trivially parallelizable.
return all(v in G[u] for u in set(G) - nodes for v in nodes)
# TODO This is trivially parallelizable.
neighborhoods = [two_neighborhood(G, v) for v in G]
return all(not (is_closed(G, S) and s in S and t not in S)
for S in neighborhoods)
@not_implemented_for('undirected')
@not_implemented_for('multigraph')
def is_strongly_connected(G):
"""Decides whether the given tournament is strongly connected.
This function is more theoretically efficient than the
:func:`~networkx.algorithms.components.is_strongly_connected`
function.
The given graph **must** be a tournament, otherwise this function's
behavior is undefined.
Parameters
----------
G : NetworkX graph
A directed graph representing a tournament.
Returns
-------
bool
Whether the tournament is strongly connected.
Notes
-----
Although this function is more theoretically efficient than the
generic strong connectivity function, a speedup requires the use of
parallelism. Though it may in the future, the current implementation
does not use parallelism, thus you may not see much of a speedup.
This algorithm comes from [1].
References
----------
.. [1] Tantau, Till.
"A note on the complexity of the reachability problem for
tournaments."
*Electronic Colloquium on Computational Complexity*. 2001.
<http://eccc.hpi-web.de/report/2001/092/>
"""
# TODO This is trivially parallelizable.
return all(is_reachable(G, u, v) for u in G for v in G)
| {
"content_hash": "d0770a5b9569ad10311bc5ff685d0426",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 81,
"avg_line_length": 29.970674486803517,
"alnum_prop": 0.6430528375733855,
"repo_name": "Sixshaman/networkx",
"id": "dfcb3d115e657961db6914b743714a1c042b7089",
"size": "10434",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "networkx/algorithms/tournament.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "PowerShell",
"bytes": "3311"
},
{
"name": "Python",
"bytes": "3238984"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from oslo_log import log as logging
from murano.common.i18n import _
from murano import version
paste_deploy_opts = [
cfg.StrOpt('flavor', help='Paste flavor'),
cfg.StrOpt('config_file', help='Path to Paste config file'),
]
bind_opts = [
cfg.StrOpt('bind-host', default='0.0.0.0',
help='Address to bind the Murano API server to.'),
cfg.PortOpt('bind-port',
default=8082,
help='Port the bind the Murano API server to.'),
]
rabbit_opts = [
cfg.StrOpt('host', default='localhost',
help='The RabbitMQ broker address which used for communication '
'with Murano guest agents.'),
cfg.PortOpt('port',
default=5672,
help='The RabbitMQ broker port.'),
cfg.StrOpt('login', default='guest',
help='The RabbitMQ login.'),
cfg.StrOpt('password', default='guest',
help='The RabbitMQ password.'),
cfg.StrOpt('virtual_host', default='/',
help='The RabbitMQ virtual host.'),
cfg.BoolOpt('ssl', default=False,
help='Boolean flag to enable SSL communication through the '
'RabbitMQ broker between murano-engine and guest agents.'),
cfg.StrOpt('ca_certs', default='',
help='SSL cert file (valid only if SSL enabled).')
]
heat_opts = [
cfg.StrOpt('url', help='Optional heat endpoint override'),
cfg.BoolOpt('insecure', default=False,
help='This option explicitly allows Murano to perform '
'"insecure" SSL connections and transfers with Heat API.'),
cfg.StrOpt('ca_file',
help='(SSL) Tells Murano to use the specified certificate file '
'to verify the peer running Heat API.'),
cfg.StrOpt('cert_file',
help='(SSL) Tells Murano to use the specified client '
'certificate file when communicating with Heat.'),
cfg.StrOpt('key_file', help='(SSL/SSH) Private key file name to '
'communicate with Heat API.'),
cfg.StrOpt('endpoint_type', default='publicURL',
help='Heat endpoint type.'),
cfg.ListOpt('stack_tags', default=['murano'],
help='List of tags to be assigned to heat stacks created '
'during environment deployment.')
]
mistral_opts = [
cfg.StrOpt('url', help='Optional mistral endpoint override'),
cfg.StrOpt('endpoint_type', default='publicURL',
help='Mistral endpoint type.'),
cfg.StrOpt('service_type', default='workflowv2',
help='Mistral service type.'),
cfg.BoolOpt('insecure', default=False,
help='This option explicitly allows Murano to perform '
'"insecure" SSL connections and transfers with Mistral.'),
cfg.StrOpt('ca_cert',
help='(SSL) Tells Murano to use the specified client '
'certificate file when communicating with Mistral.')
]
neutron_opts = [
cfg.StrOpt('url', help='Optional neutron endpoint override'),
cfg.BoolOpt('insecure', default=False,
help='This option explicitly allows Murano to perform '
'"insecure" SSL connections and transfers with Neutron API.'),
cfg.StrOpt('ca_cert',
help='(SSL) Tells Murano to use the specified client '
'certificate file when communicating with Neutron.'),
cfg.StrOpt('endpoint_type', default='publicURL',
help='Neutron endpoint type.')
]
murano_opts = [
cfg.StrOpt('url', help='Optional murano url in format '
'like http://0.0.0.0:8082 used by Murano engine'),
cfg.BoolOpt('insecure', default=False,
help='This option explicitly allows Murano to perform '
'"insecure" SSL connections and transfers used by '
'Murano engine.'),
cfg.StrOpt('cacert',
help='(SSL) Tells Murano to use the specified client '
'certificate file when communicating with Murano API '
'used by Murano engine.'),
cfg.StrOpt('cert_file',
help='(SSL) Tells Murano to use the specified client '
'certificate file when communicating with Murano '
'used by Murano engine.'),
cfg.StrOpt('key_file', help='(SSL/SSH) Private key file name '
'to communicate with Murano API used by '
'Murano engine.'),
cfg.StrOpt('endpoint_type', default='publicURL',
help='Murano endpoint type used by Murano engine.'),
cfg.ListOpt('enabled_plugins',
help="List of enabled Extension Plugins. "
"Remove or leave commented to enable all installed "
"plugins.")
]
networking_opts = [
cfg.IntOpt('max_environments', default=250,
help='Maximum number of environments that use a single router '
'per tenant'),
cfg.IntOpt('max_hosts', default=250,
help='Maximum number of VMs per environment'),
cfg.StrOpt('env_ip_template', default='10.0.0.0',
help='Template IP address for generating environment '
'subnet cidrs'),
cfg.ListOpt('default_dns', default=[],
help='List of default DNS nameservers to be assigned to '
'created Networks'),
cfg.StrOpt('external_network', default='ext-net',
help='ID or name of the external network for routers '
'to connect to'),
cfg.StrOpt('router_name', default='murano-default-router',
help='Name of the router that going to be used in order to '
'join all networks created by Murano'),
cfg.BoolOpt('create_router', default=True,
help='This option will create a router when one with '
'"router_name" does not exist'),
cfg.StrOpt('network_config_file', default='netconfig.yaml',
help='If provided networking configuration will be taken '
'from this file')
]
stats_opts = [
cfg.IntOpt('period', default=5,
help=_('Statistics collection interval in minutes.'
'Default value is 5 minutes.'))
]
engine_opts = [
cfg.BoolOpt('disable_murano_agent', default=False,
help=_('Disallow the use of murano-agent')),
cfg.StrOpt('class_configs', default='/etc/murano/class-configs',
help=_('Path to class configuration files')),
cfg.BoolOpt('use_trusts', default=True,
help=_("Create resources using trust token rather "
"than user's token")),
cfg.BoolOpt('enable_model_policy_enforcer', default=False,
help=_('Enable model policy enforcer using Congress')),
cfg.IntOpt('agent_timeout', default=3600,
help=_('Time for waiting for a response from murano agent '
'during the deployment')),
]
# TODO(sjmc7): move into engine opts?
metadata_dir = [
cfg.StrOpt('metadata-dir', default='./meta',
help='Metadata dir')
]
packages_opts = [
cfg.StrOpt('packages_cache',
help='Location (directory) for Murano package cache.'),
cfg.BoolOpt('enable_packages_cache', default=True,
help=_('Enables murano-engine to persist on disk '
'packages downloaded during deployments. '
'The packages would be re-used for consequent '
'deployments.')),
cfg.ListOpt('load_packages_from', default=[],
help=_('List of directories to load local packages from. '
'If not provided, packages will be loaded only API')),
cfg.IntOpt('package_size_limit', default=5,
help='Maximum application package size, Mb'),
cfg.IntOpt('limit_param_default', default=20,
help='Default value for package pagination in API.'),
cfg.IntOpt('api_limit_max', default=100,
help='Maximum number of packages to be returned in a single '
'pagination request'),
cfg.StrOpt('packages_service', default='murano',
help=_('The service to store murano packages: murano (stands '
'for legacy behavior using murano-api) or glance '
'(stands for Glance V3 artifact repository)'))
]
glance_opts = [
cfg.StrOpt('url', help='Optional murano url in format '
'like http://0.0.0.0:9292 used by Glance API'),
cfg.BoolOpt('insecure', default=False,
help='This option explicitly allows Murano to perform '
'"insecure" SSL connections and transfers with Glance API.'),
cfg.StrOpt('ca_file',
help='(SSL) Tells Murano to use the specified certificate file '
'to verify the peer running Glance API.'),
cfg.StrOpt('cert_file',
help='(SSL) Tells Murano to use the specified client '
'certificate file when communicating with Glance.'),
cfg.StrOpt('key_file', help='(SSL/SSH) Private key file name to '
'communicate with Glance API.'),
cfg.StrOpt('endpoint_type', default='publicURL',
help='Glance endpoint type.')
]
file_server = [
cfg.StrOpt('file_server', default='',
help='Set a file server.')
]
home_region = cfg.StrOpt(
'home_region', default=None,
help="Default region name used to get services endpoints.")
CONF = cfg.CONF
CONF.register_opts(paste_deploy_opts, group='paste_deploy')
CONF.register_cli_opts(bind_opts)
CONF.register_opts(rabbit_opts, group='rabbitmq')
CONF.register_opts(heat_opts, group='heat')
CONF.register_opts(mistral_opts, group='mistral')
CONF.register_opts(neutron_opts, group='neutron')
CONF.register_opts(murano_opts, group='murano')
CONF.register_opts(engine_opts, group='engine')
CONF.register_opts(file_server)
CONF.register_opt(home_region)
CONF.register_cli_opts(metadata_dir)
CONF.register_opts(packages_opts, group='packages_opts')
CONF.register_opts(stats_opts, group='stats')
CONF.register_opts(networking_opts, group='networking')
CONF.register_opts(glance_opts, group='glance')
def parse_args(args=None, usage=None, default_config_files=None):
logging.register_options(CONF)
CONF(args=args,
project='murano',
version=version.version_string,
usage=usage,
default_config_files=default_config_files)
| {
"content_hash": "eee34294ebf8118550e764b0e859e400",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 79,
"avg_line_length": 37.89716312056738,
"alnum_prop": 0.5979227098343782,
"repo_name": "olivierlemasle/murano",
"id": "63d3e8babe0f65b08e7a450d59e09b5678615e60",
"size": "11317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "murano/common/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "152"
},
{
"name": "Mako",
"bytes": "1013"
},
{
"name": "PowerShell",
"bytes": "2772"
},
{
"name": "Puppet",
"bytes": "86"
},
{
"name": "Python",
"bytes": "1267810"
},
{
"name": "Ruby",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "25578"
}
],
"symlink_target": ""
} |
from __future__ import division
from numpy import sqrt, mean, absolute, real, conj
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return sqrt(mean(absolute(a)**2))
def rms_fft(spectrum):
"""
Use Parseval's theorem to find the RMS value of a signal from its fft,
without wasting time doing an inverse FFT.
For a signal x, these should produce the same result, to within numerical
accuracy:
rms_flat(x) ~= rms_fft(fft(x))
"""
return rms_flat(spectrum)/sqrt(len(spectrum))
def rms_rfft(spectrum, n=None):
"""
Use Parseval's theorem to find the RMS value of an even-length signal
from its rfft, without wasting time doing an inverse real FFT.
spectrum is produced as spectrum = numpy.fft.rfft(signal)
For a signal x with an even number of samples, these should produce the
same result, to within numerical accuracy:
rms_flat(x) ~= rms_rfft(rfft(x))
If len(x) is odd, n must be included, or the result will only be
approximate, due to the ambiguity of rfft for odd lengths.
"""
if n is None:
n = (len(spectrum) - 1) * 2
sq = real(spectrum * conj(spectrum))
if n % 2: # odd-length
mean = (sq[0] + 2*sum(sq[1:]) )/n
else: # even-length
mean = (sq[0] + 2*sum(sq[1:-1]) + sq[-1])/n
root = sqrt(mean)
return root/sqrt(n)
if __name__ == "__main__":
from numpy.random import randn
from numpy.fft import fft, ifft, rfft, irfft
n = 17
x = randn(n)
X = fft(x)
rX = rfft(x)
print(rms_flat(x))
print(rms_flat(ifft(X)))
print(rms_fft(X))
print()
# Accurate for odd n:
print(rms_flat(irfft(rX, n)))
print(rms_rfft(rX, n))
print()
# Only approximate for odd n:
print(rms_flat(irfft(rX)))
print(rms_rfft(rX))
| {
"content_hash": "0fd19abc36bc55231352fc76cc1089ce",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 77,
"avg_line_length": 25.98611111111111,
"alnum_prop": 0.6130411544628541,
"repo_name": "sthenc/bss_tester",
"id": "c3d55d7a2707efee4d298a70b3055837801a641c",
"size": "1895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parseval_functions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "45332"
}
],
"symlink_target": ""
} |
"""
`Unit tests for cargo.expressions.Case`
--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--
2016 Jared Lunde © The MIT License (MIT)
http://github.com/jaredlunde
"""
import unittest
from cargo import fields, ORM, safe
from cargo.expressions import Case
from unit_tests import configure
from unit_tests.configure import new_field
class TestCase(unittest.TestCase):
def test_el(self):
fielda = new_field(table='foo', name='bar')
case = Case(fielda == 1, 'one', fielda == 2, 'two', el='three')
caseb = Case()
caseb.when(fielda == 1, 'one')
caseb.when(fielda == 2, 'two')
caseb.el('four')
caseb.el('three')
self.assertEqual(case.string % case.params,
'CASE WHEN foo.bar = 1 THEN one WHEN foo.bar = 2 ' +
'THEN two ELSE three END')
self.assertEqual(case.string % case.params,
caseb.string % caseb.params)
def test_alias(self):
fielda = new_field(table='foo', name='bar')
case = Case(fielda == 1, 'one', fielda == 2, 'two', el='three',
alias='foo_alias')
self.assertEqual(case.string % case.params,
'CASE WHEN foo.bar = 1 THEN one WHEN foo.bar = 2 ' +
'THEN two ELSE three END foo_alias')
def test_when(self):
fielda = new_field(table='foo', name='bar')
case = Case(fielda == 1, 'one', fielda == 2, 'two')
caseb = Case()
caseb.when(fielda == 1, 'one', fielda == 2, 'two')
casec = Case()
casec.when(fielda == 1, 'one')
casec.when(fielda == 2, 'two')
self.assertEqual(case.string % case.params,
'CASE WHEN foo.bar = 1 THEN one WHEN foo.bar = 2 ' +
'THEN two END')
self.assertEqual(case.string % case.params,
caseb.string % caseb.params,
casec.string % casec.params)
def test_use_field_name(self):
fielda = new_field(table='foo', name='bar')
case = Case(fielda == 1, 'one', fielda == 2, 'two',
use_field_name=True)
caseb = Case(use_field_name=True)
caseb.when(fielda == 1, 'one', fielda == 2, 'two')
casec = Case(use_field_name=True)
casec.when(fielda == 1, 'one')
casec.when(fielda == 2, 'two')
self.assertEqual(case.string % case.params,
'CASE WHEN bar = 1 THEN one WHEN bar = 2 ' +
'THEN two END')
self.assertEqual(case.string % case.params,
caseb.string % caseb.params,
casec.string % casec.params)
def test_select(self):
orm = ORM()
q = orm.select(Case(safe('1=1'), 'one',
safe('1=2'), 'two',
el='three', alias="foo"))
self.assertIsInstance(q, list)
self.assertEqual(q[0].foo, 'one')
q = orm.select(Case(safe('1=1'), 'one',
safe('1=2'), 'two',
el='three'))
self.assertIsInstance(q, list)
self.assertEqual(q[0].case, 'one')
if __name__ == '__main__':
# Unit test
configure.run_tests(TestCase, failfast=True, verbosity=2)
| {
"content_hash": "1b74e5d49c402940cf3bdaca2f8dfd3c",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 80,
"avg_line_length": 35.208333333333336,
"alnum_prop": 0.49644970414201184,
"repo_name": "jaredlunde/cargo-orm",
"id": "5745ffb6f33fb9d0be2d8c52ed52cc0e61908096",
"size": "3453",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "unit_tests/expressions/Case.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1155740"
},
{
"name": "Shell",
"bytes": "288"
}
],
"symlink_target": ""
} |
"""
Management class for basic VM operations.
"""
import functools
import os
import time
from eventlet import timeout as etimeout
from os_win import constants as os_win_const
from os_win import exceptions as os_win_exc
from os_win.utils.io import ioutils
from os_win import utilsfactory
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from nova.api.metadata import base as instance_metadata
import nova.conf
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
from nova import utils
from nova.virt import configdrive
from nova.virt import hardware
from nova.virt.hyperv import constants
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.BoolOpt('limit_cpu_features',
default=False,
help='Required for live migration among '
'hosts with different CPU features'),
cfg.BoolOpt('config_drive_inject_password',
default=False,
help='Sets the admin password in the config drive image'),
cfg.StrOpt('qemu_img_cmd',
default="qemu-img.exe",
help='Path of qemu-img command which is used to convert '
'between different image types'),
cfg.BoolOpt('config_drive_cdrom',
default=False,
help='Attaches the Config Drive image as a cdrom drive '
'instead of a disk drive'),
cfg.BoolOpt('enable_instance_metrics_collection',
default=False,
help='Enables metrics collections for an instance by using '
'Hyper-V\'s metric APIs. Collected data can by retrieved '
'by other apps and services, e.g.: Ceilometer. '
'Requires Hyper-V / Windows Server 2012 and above'),
cfg.FloatOpt('dynamic_memory_ratio',
default=1.0,
help='Enables dynamic memory allocation (ballooning) when '
'set to a value greater than 1. The value expresses '
'the ratio between the total RAM assigned to an '
'instance and its startup RAM amount. For example a '
'ratio of 2.0 for an instance with 1024MB of RAM '
'implies 512MB of RAM allocated at startup'),
cfg.IntOpt('wait_soft_reboot_seconds',
default=60,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
]
CONF = nova.conf.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
CONF.import_opt('network_api_class', 'nova.network')
SHUTDOWN_TIME_INCREMENT = 5
REBOOT_TYPE_SOFT = 'SOFT'
REBOOT_TYPE_HARD = 'HARD'
VM_GENERATIONS = {
constants.IMAGE_PROP_VM_GEN_1: constants.VM_GEN_1,
constants.IMAGE_PROP_VM_GEN_2: constants.VM_GEN_2
}
VM_GENERATIONS_CONTROLLER_TYPES = {
constants.VM_GEN_1: constants.CTRL_TYPE_IDE,
constants.VM_GEN_2: constants.CTRL_TYPE_SCSI
}
def check_admin_permissions(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
# Make sure the windows account has the required admin permissions.
self._vmutils.check_admin_permissions()
return function(self, *args, **kwds)
return wrapper
class VMOps(object):
_vif_driver_class_map = {
'nova.network.neutronv2.api.API':
'nova.virt.hyperv.vif.HyperVNeutronVIFDriver',
'nova.network.api.API':
'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver',
}
# The console log is stored in two files, each should have at most half of
# the maximum console log size.
_MAX_CONSOLE_LOG_FILE_SIZE = units.Mi / 2
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._metricsutils = utilsfactory.get_metricsutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._hostutils = utilsfactory.get_hostutils()
self._pathutils = pathutils.PathUtils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
self._vif_driver = None
self._load_vif_driver_class()
self._vm_log_writers = {}
def _load_vif_driver_class(self):
try:
class_name = self._vif_driver_class_map[CONF.network_api_class]
self._vif_driver = importutils.import_object(class_name)
except KeyError:
raise TypeError(_("VIF driver not found for "
"network_api_class: %s") %
CONF.network_api_class)
def list_instance_uuids(self):
instance_uuids = []
for (instance_name, notes) in self._vmutils.list_instance_notes():
if notes and uuidutils.is_uuid_like(notes[0]):
instance_uuids.append(str(notes[0]))
else:
LOG.debug("Notes not found or not resembling a GUID for "
"instance: %s" % instance_name)
return instance_uuids
def list_instances(self):
return self._vmutils.list_instances()
def get_info(self, instance):
"""Get information about the VM."""
LOG.debug("get_info called for instance", instance=instance)
instance_name = instance.name
if not self._vmutils.vm_exists(instance_name):
raise exception.InstanceNotFound(instance_id=instance.uuid)
info = self._vmutils.get_vm_summary_info(instance_name)
state = constants.HYPERV_POWER_STATE[info['EnabledState']]
return hardware.InstanceInfo(state=state,
max_mem_kb=info['MemoryUsage'],
mem_kb=info['MemoryUsage'],
num_cpu=info['NumberOfProcessors'],
cpu_time_ns=info['UpTime'])
def _create_root_vhd(self, context, instance):
base_vhd_path = self._imagecache.get_cached_image(context, instance)
base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path)
base_vhd_size = base_vhd_info['VirtualSize']
format_ext = base_vhd_path.split('.')[-1]
root_vhd_path = self._pathutils.get_root_vhd_path(instance.name,
format_ext)
root_vhd_size = instance.root_gb * units.Gi
try:
if CONF.use_cow_images:
LOG.debug("Creating differencing VHD. Parent: "
"%(base_vhd_path)s, Target: %(root_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path},
instance=instance)
self._vhdutils.create_differencing_vhd(root_vhd_path,
base_vhd_path)
vhd_type = self._vhdutils.get_vhd_format(base_vhd_path)
if vhd_type == constants.DISK_FORMAT_VHD:
# The base image has already been resized. As differencing
# vhdx images support it, the root image will be resized
# instead if needed.
return root_vhd_path
else:
LOG.debug("Copying VHD image %(base_vhd_path)s to target: "
"%(root_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path},
instance=instance)
self._pathutils.copyfile(base_vhd_path, root_vhd_path)
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
base_vhd_path, root_vhd_size))
if self._is_resize_needed(root_vhd_path, base_vhd_size,
root_vhd_internal_size,
instance):
self._vhdutils.resize_vhd(root_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(root_vhd_path):
self._pathutils.remove(root_vhd_path)
return root_vhd_path
def _is_resize_needed(self, vhd_path, old_size, new_size, instance):
if new_size < old_size:
raise exception.FlavorDiskSmallerThanImage(
flavor_size=new_size, image_size=old_size)
elif new_size > old_size:
LOG.debug("Resizing VHD %(vhd_path)s to new "
"size %(new_size)s" %
{'new_size': new_size,
'vhd_path': vhd_path},
instance=instance)
return True
return False
def create_ephemeral_vhd(self, instance):
eph_vhd_size = instance.get('ephemeral_gb', 0) * units.Gi
if eph_vhd_size:
vhd_format = self._vhdutils.get_best_supported_vhd_format()
eph_vhd_path = self._pathutils.get_ephemeral_vhd_path(
instance.name, vhd_format)
self._vhdutils.create_dynamic_vhd(eph_vhd_path, eph_vhd_size)
return eph_vhd_path
@check_admin_permissions
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
"""Create a new VM and start it."""
LOG.info(_LI("Spawning new instance"), instance=instance)
instance_name = instance.name
if self._vmutils.vm_exists(instance_name):
raise exception.InstanceExists(name=instance_name)
# Make sure we're starting with a clean slate.
self._delete_disk_files(instance_name)
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._create_root_vhd(context, instance)
eph_vhd_path = self.create_ephemeral_vhd(instance)
vm_gen = self.get_image_vm_generation(
instance.uuid, root_vhd_path, image_meta)
try:
self.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen)
if configdrive.required_by(instance):
configdrive_path = self._create_config_drive(instance,
injected_files,
admin_password,
network_info)
self.attach_config_drive(instance, configdrive_path, vm_gen)
self.power_on(instance)
except Exception:
with excutils.save_and_reraise_exception():
self.destroy(instance)
def create_instance(self, instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen):
instance_name = instance.name
instance_path = os.path.join(CONF.instances_path, instance_name)
self._vmutils.create_vm(instance_name,
instance.memory_mb,
instance.vcpus,
CONF.hyperv.limit_cpu_features,
CONF.hyperv.dynamic_memory_ratio,
vm_gen,
instance_path,
[instance.uuid])
self._vmutils.create_scsi_controller(instance_name)
controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
ctrl_disk_addr = 0
if root_vhd_path:
self._attach_drive(instance_name, root_vhd_path, 0, ctrl_disk_addr,
controller_type)
ctrl_disk_addr = 1
if eph_vhd_path:
self._attach_drive(instance_name, eph_vhd_path, 0, ctrl_disk_addr,
controller_type)
# If ebs_root is False, the first volume will be attached to SCSI
# controller. Generation 2 VMs only has a SCSI controller.
ebs_root = vm_gen is not constants.VM_GEN_2 and root_vhd_path is None
self._volumeops.attach_volumes(block_device_info,
instance_name,
ebs_root)
for vif in network_info:
LOG.debug('Creating nic for instance', instance=instance)
self._vmutils.create_nic(instance_name,
vif['id'],
vif['address'])
self._vif_driver.plug(instance, vif)
if CONF.hyperv.enable_instance_metrics_collection:
self._metricsutils.enable_vm_metrics_collection(instance_name)
self._create_vm_com_port_pipe(instance)
def _attach_drive(self, instance_name, path, drive_addr, ctrl_disk_addr,
controller_type, drive_type=constants.DISK):
if controller_type == constants.CTRL_TYPE_SCSI:
self._vmutils.attach_scsi_drive(instance_name, path, drive_type)
else:
self._vmutils.attach_ide_drive(instance_name, path, drive_addr,
ctrl_disk_addr, drive_type)
def get_image_vm_generation(self, instance_id, root_vhd_path, image_meta):
default_vm_gen = self._hostutils.get_default_vm_generation()
image_prop_vm = image_meta.properties.get(
'hw_machine_type', default_vm_gen)
if image_prop_vm not in self._hostutils.get_supported_vm_types():
reason = _LE('Requested VM Generation %s is not supported on '
'this OS.') % image_prop_vm
raise exception.InstanceUnacceptable(instance_id=instance_id,
reason=reason)
vm_gen = VM_GENERATIONS[image_prop_vm]
if (vm_gen != constants.VM_GEN_1 and root_vhd_path and
self._vhdutils.get_vhd_format(
root_vhd_path) == constants.DISK_FORMAT_VHD):
reason = _LE('Requested VM Generation %s, but provided VHD '
'instead of VHDX.') % vm_gen
raise exception.InstanceUnacceptable(instance_id=instance_id,
reason=reason)
return vm_gen
def _create_config_drive(self, instance, injected_files, admin_password,
network_info):
if CONF.config_drive_format != 'iso9660':
raise exception.ConfigDriveUnsupportedFormat(
format=CONF.config_drive_format)
LOG.info(_LI('Using config drive for instance'), instance=instance)
extra_md = {}
if admin_password and CONF.hyperv.config_drive_inject_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files,
extra_md=extra_md,
network_info=network_info)
instance_path = self._pathutils.get_instance_dir(
instance.name)
configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': configdrive_path_iso}, instance=instance)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
try:
cdb.make_drive(configdrive_path_iso)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed with '
'error: %s'),
e, instance=instance)
if not CONF.hyperv.config_drive_cdrom:
configdrive_path = os.path.join(instance_path,
'configdrive.vhd')
utils.execute(CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
configdrive_path_iso,
configdrive_path,
attempts=1)
self._pathutils.remove(configdrive_path_iso)
else:
configdrive_path = configdrive_path_iso
return configdrive_path
def attach_config_drive(self, instance, configdrive_path, vm_gen):
configdrive_ext = configdrive_path[(configdrive_path.rfind('.') + 1):]
# Do the attach here and if there is a certain file format that isn't
# supported in constants.DISK_FORMAT_MAP then bomb out.
try:
drive_type = constants.DISK_FORMAT_MAP[configdrive_ext]
controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
self._attach_drive(instance.name, configdrive_path, 1, 0,
controller_type, drive_type)
except KeyError:
raise exception.InvalidDiskFormat(disk_format=configdrive_ext)
def _delete_disk_files(self, instance_name):
self._pathutils.get_instance_dir(instance_name,
create_dir=False,
remove_dir=True)
def destroy(self, instance, network_info=None, block_device_info=None,
destroy_disks=True):
instance_name = instance.name
LOG.info(_LI("Got request to destroy instance"), instance=instance)
try:
if self._vmutils.vm_exists(instance_name):
# Stop the VM first.
self._vmutils.stop_vm_jobs(instance_name)
self.power_off(instance)
if network_info:
for vif in network_info:
self._vif_driver.unplug(instance, vif)
self._vmutils.destroy_vm(instance_name)
self._volumeops.disconnect_volumes(block_device_info)
else:
LOG.debug("Instance not found", instance=instance)
if destroy_disks:
self._delete_disk_files(instance_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to destroy instance: %s'),
instance_name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
LOG.debug("Rebooting instance", instance=instance)
if reboot_type == REBOOT_TYPE_SOFT:
if self._soft_shutdown(instance):
self.power_on(instance)
return
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_REBOOT)
def _soft_shutdown(self, instance,
timeout=CONF.hyperv.wait_soft_reboot_seconds,
retry_interval=SHUTDOWN_TIME_INCREMENT):
"""Perform a soft shutdown on the VM.
:return: True if the instance was shutdown within time limit,
False otherwise.
"""
LOG.debug("Performing Soft shutdown on instance", instance=instance)
while timeout > 0:
# Perform a soft shutdown on the instance.
# Wait maximum timeout for the instance to be shutdown.
# If it was not shutdown, retry until it succeeds or a maximum of
# time waited is equal to timeout.
wait_time = min(retry_interval, timeout)
try:
LOG.debug("Soft shutdown instance, timeout remaining: %d",
timeout, instance=instance)
self._vmutils.soft_shutdown_vm(instance.name)
if self._wait_for_power_off(instance.name, wait_time):
LOG.info(_LI("Soft shutdown succeeded."),
instance=instance)
return True
except os_win_exc.HyperVException as e:
# Exception is raised when trying to shutdown the instance
# while it is still booting.
LOG.debug("Soft shutdown failed: %s", e, instance=instance)
time.sleep(wait_time)
timeout -= retry_interval
LOG.warning(_LW("Timed out while waiting for soft shutdown."),
instance=instance)
return False
def pause(self, instance):
"""Pause VM instance."""
LOG.debug("Pause instance", instance=instance)
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_PAUSED)
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug("Unpause instance", instance=instance)
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_ENABLED)
def suspend(self, instance):
"""Suspend the specified instance."""
LOG.debug("Suspend instance", instance=instance)
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_SUSPENDED)
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug("Resume instance", instance=instance)
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_ENABLED)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
LOG.debug("Power off instance", instance=instance)
if retry_interval <= 0:
retry_interval = SHUTDOWN_TIME_INCREMENT
try:
if timeout and self._soft_shutdown(instance,
timeout,
retry_interval):
return
self._set_vm_state(instance,
os_win_const.HYPERV_VM_STATE_DISABLED)
except os_win_exc.HyperVVMNotFoundException:
# The manager can call the stop API after receiving instance
# power off events. If this is triggered when the instance
# is being deleted, it might attempt to power off an unexisting
# instance. We'll just pass in this case.
LOG.debug("Instance not found. Skipping power off",
instance=instance)
def power_on(self, instance, block_device_info=None):
"""Power on the specified instance."""
LOG.debug("Power on instance", instance=instance)
if block_device_info:
self._volumeops.fix_instance_volume_disk_paths(instance.name,
block_device_info)
self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_ENABLED)
def _set_vm_state(self, instance, req_state):
instance_name = instance.name
instance_uuid = instance.uuid
try:
self._vmutils.set_vm_state(instance_name, req_state)
if req_state in (os_win_const.HYPERV_VM_STATE_DISABLED,
os_win_const.HYPERV_VM_STATE_REBOOT):
self._delete_vm_console_log(instance)
if req_state in (os_win_const.HYPERV_VM_STATE_ENABLED,
os_win_const.HYPERV_VM_STATE_REBOOT):
self.log_vm_serial_output(instance_name,
instance_uuid)
LOG.debug("Successfully changed state of VM %(instance_name)s"
" to: %(req_state)s", {'instance_name': instance_name,
'req_state': req_state})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to change vm state of %(instance_name)s"
" to %(req_state)s"),
{'instance_name': instance_name,
'req_state': req_state})
def _get_vm_state(self, instance_name):
summary_info = self._vmutils.get_vm_summary_info(instance_name)
return summary_info['EnabledState']
def _wait_for_power_off(self, instance_name, time_limit):
"""Waiting for a VM to be in a disabled state.
:return: True if the instance is shutdown within time_limit,
False otherwise.
"""
desired_vm_states = [os_win_const.HYPERV_VM_STATE_DISABLED]
def _check_vm_status(instance_name):
if self._get_vm_state(instance_name) in desired_vm_states:
raise loopingcall.LoopingCallDone()
periodic_call = loopingcall.FixedIntervalLoopingCall(_check_vm_status,
instance_name)
try:
# add a timeout to the periodic call.
periodic_call.start(interval=SHUTDOWN_TIME_INCREMENT)
etimeout.with_timeout(time_limit, periodic_call.wait)
except etimeout.Timeout:
# VM did not shutdown in the expected time_limit.
return False
finally:
# stop the periodic call, in case of exceptions or Timeout.
periodic_call.stop()
return True
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""Resume guest state when a host is booted."""
self.power_on(instance, block_device_info)
def log_vm_serial_output(self, instance_name, instance_uuid):
# Uses a 'thread' that will run in background, reading
# the console output from the according named pipe and
# write it to a file.
console_log_path = self._pathutils.get_vm_console_log_paths(
instance_name)[0]
pipe_path = r'\\.\pipe\%s' % instance_uuid
@utils.synchronized(pipe_path)
def log_serial_output():
vm_log_writer = self._vm_log_writers.get(instance_uuid)
if vm_log_writer and vm_log_writer.is_active():
LOG.debug("Instance %s log writer is already running.",
instance_name)
else:
vm_log_writer = ioutils.IOThread(
pipe_path, console_log_path,
self._MAX_CONSOLE_LOG_FILE_SIZE)
vm_log_writer.start()
self._vm_log_writers[instance_uuid] = vm_log_writer
log_serial_output()
def get_console_output(self, instance):
console_log_paths = (
self._pathutils.get_vm_console_log_paths(instance.name))
try:
instance_log = ''
# Start with the oldest console log file.
for console_log_path in console_log_paths[::-1]:
if os.path.exists(console_log_path):
with open(console_log_path, 'rb') as fp:
instance_log += fp.read()
return instance_log
except IOError as err:
raise exception.ConsoleLogOutputException(
instance_id=instance.uuid, reason=six.text_type(err))
def _delete_vm_console_log(self, instance):
console_log_files = self._pathutils.get_vm_console_log_paths(
instance.name)
vm_log_writer = self._vm_log_writers.get(instance.uuid)
if vm_log_writer:
vm_log_writer.join()
for log_file in console_log_files:
fileutils.delete_if_exists(log_file)
def copy_vm_console_logs(self, vm_name, dest_host):
local_log_paths = self._pathutils.get_vm_console_log_paths(
vm_name)
remote_log_paths = self._pathutils.get_vm_console_log_paths(
vm_name, remote_server=dest_host)
for local_log_path, remote_log_path in zip(local_log_paths,
remote_log_paths):
if self._pathutils.exists(local_log_path):
self._pathutils.copy(local_log_path,
remote_log_path)
def _create_vm_com_port_pipe(self, instance):
# Creates a pipe to the COM 0 serial port of the specified vm.
pipe_path = r'\\.\pipe\%s' % instance.uuid
self._vmutils.get_vm_serial_port_connection(
instance.name, update_connection=pipe_path)
def restart_vm_log_writers(self):
# Restart the VM console log writers after nova compute restarts.
active_instances = self._vmutils.get_active_instances()
for instance_name in active_instances:
instance_path = self._pathutils.get_instance_dir(instance_name)
# Skip instances that are not created by Nova
if not os.path.exists(instance_path):
continue
vm_serial_conn = self._vmutils.get_vm_serial_port_connection(
instance_name)
if vm_serial_conn:
instance_uuid = os.path.basename(vm_serial_conn)
self.log_vm_serial_output(instance_name, instance_uuid)
def copy_vm_dvd_disks(self, vm_name, dest_host):
dvd_disk_paths = self._vmutils.get_vm_dvd_disk_paths(vm_name)
dest_path = self._pathutils.get_instance_dir(
vm_name, remote_server=dest_host)
for path in dvd_disk_paths:
self._pathutils.copyfile(path, dest_path)
def _check_hotplug_available(self, instance):
"""Check whether attaching an interface is possible for the given
instance.
:returns: True if attaching / detaching interfaces is possible for the
given instance.
"""
vm_state = self._get_vm_state(instance.name)
if vm_state == os_win_const.HYPERV_VM_STATE_DISABLED:
# can attach / detach interface to stopped VMs.
return True
if not self._hostutils.check_min_windows_version(10, 0):
# TODO(claudiub): add set log level to error after string freeze.
LOG.debug("vNIC hot plugging is supported only in newer "
"versions than Windows Hyper-V / Server 2012 R2.")
return False
if (self._vmutils.get_vm_generation(instance.name) ==
constants.VM_GEN_1):
# TODO(claudiub): add set log level to error after string freeze.
LOG.debug("Cannot hot plug vNIC to a first generation VM.",
instance=instance)
return False
return True
def attach_interface(self, instance, vif):
if not self._check_hotplug_available(instance):
raise exception.InterfaceAttachFailed(instance_uuid=instance.uuid)
LOG.debug('Attaching vif: %s', vif['id'], instance=instance)
self._vmutils.create_nic(instance.name, vif['id'], vif['address'])
self._vif_driver.plug(instance, vif)
def detach_interface(self, instance, vif):
try:
if not self._check_hotplug_available(instance):
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
LOG.debug('Detaching vif: %s', vif['id'], instance=instance)
self._vif_driver.unplug(instance, vif)
self._vmutils.destroy_nic(instance.name, vif['id'])
except os_win_exc.HyperVVMNotFoundException:
# TODO(claudiub): add set log level to error after string freeze.
LOG.debug("Instance not found during detach interface. It "
"might have been destroyed beforehand.",
instance=instance)
raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid)
| {
"content_hash": "8baba9630252b7bd21af1cba59dd2fe5",
"timestamp": "",
"source": "github",
"line_count": 755,
"max_line_length": 79,
"avg_line_length": 42.8887417218543,
"alnum_prop": 0.5627065254315803,
"repo_name": "dims/nova",
"id": "ebf83f59fca2a669284068dec87ab7309d55a233",
"size": "33056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/hyperv/vmops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16952469"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "317320"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2017 Russell Harkanson
See the file LICENSE.txt for copying permission.
""" | {
"content_hash": "65bad07259fdbe646a606a1554e2043f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 48,
"avg_line_length": 18.8,
"alnum_prop": 0.7446808510638298,
"repo_name": "rharkanson/pyriscope",
"id": "6d574f246546c91ddcf7d3f63fd85766cef03c0d",
"size": "94",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyriscope/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21524"
}
],
"symlink_target": ""
} |
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import datetime
import shutil
import gzip
import numpy as np
from clawpack.geoclaw.surge.storm import Storm
import clawpack.clawutil as clawutil
from clawpack.geoclaw import topotools
# Time Conversions
def days2seconds(days):
return days * 60.0**2 * 24.0
# directory for storing topo and dtopo files:
CLAW = os.environ['CLAW']
DATA = os.path.join(os.environ.get('DATA_DIR', os.getcwd()))
# ------------------------------
def setrun(claw_pkg='geoclaw'):
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
# ------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amr2ez.data for AMR)
# ------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = -89.83 # west longitude
clawdata.upper[0] = -28.62 # east longitude
clawdata.lower[1] = 12.96 # south latitude
clawdata.upper[1] = 63.80 # north latitude
# Number of grid cells:
degree_factor = 4 # (0.25º,0.25º) ~ (25237.5 m, 27693.2 m) resolution
clawdata.num_cells[0] = int(clawdata.upper[0] - clawdata.lower[0]) \
* degree_factor
clawdata.num_cells[1] = int(clawdata.upper[1] - clawdata.lower[1]) \
* degree_factor
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
# First three are from shallow GeoClaw, fourth is friction and last 3 are
# storm fields
clawdata.num_aux = 3 + 1 + 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = -days2seconds(3)
# Restart from checkpoint file of a previous run?
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
# --------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.output_style = 1
if clawdata.output_style == 1:
# Output nout frames at equally spaced times up to tfinal:
clawdata.tfinal = days2seconds(5)
recurrence = 4
clawdata.num_output_times = int((clawdata.tfinal - clawdata.t0) *
recurrence / (60**2 * 24))
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list of output times.
clawdata.output_times = [0.5, 1.0]
elif clawdata.output_style == 3:
# Output every iout timesteps with a total of ntot time steps:
clawdata.output_step_interval = 1
clawdata.total_steps = 1
clawdata.output_t0 = True
clawdata.output_format = 'ascii' # 'ascii' or 'binary'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'all'
clawdata.output_aux_onlyonce = False # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.016
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.75
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 15000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 1
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 1
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'mc' ==> MC limiter
# 4 or 'vanleer' ==> van Leer
clawdata.limiter = ['mc', 'mc', 'mc']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none'
# ==> no source term (src routine never called)
# src_split == 1 or 'godunov'
# ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang'
# ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 'godunov'
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap'
clawdata.bc_upper[0] = 'extrap'
clawdata.bc_lower[1] = 'extrap'
clawdata.bc_upper[1] = 'extrap'
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif np.abs(clawdata.checkpt_style) == 1:
# Checkpoint only at tfinal.
pass
elif np.abs(clawdata.checkpt_style) == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1, 0.15]
elif np.abs(clawdata.checkpt_style) == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 5
# List of refinement ratios at each level (length at least mxnest-1)
amrdata.refinement_ratios_x = [2, 2, 2, 6, 16]
amrdata.refinement_ratios_y = [2, 2, 2, 6, 16]
amrdata.refinement_ratios_t = [2, 2, 2, 6, 16]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center', 'capacity', 'yleft', 'center', 'center',
'center', 'center']
# Flag using refinement routine flag2refine rather than richardson error
amrdata.flag_richardson = False # use Richardson?
amrdata.flag2refine = True
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.700000
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
# More AMR parameters can be set -- see the defaults in pyclaw/data.py
# == setregions.data values ==
regions = rundata.regiondata.regions
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# Noel guages from NOAA stations and newspapers
rundata.gaugedata.gauges.append([1, -80.16 , 25.73,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([2, -81.81, 24.54,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([3, -67.94, 18.09,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([4, -67.05, 17.97,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([5, -74.90, 23.65,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
rundata.gaugedata.gauges.append([6, -75.44, 24.29,
rundata.clawdata.t0,
rundata.clawdata.tfinal])
# Force the gauges to also record the wind and pressure fields
rundata.gaugedata.aux_out_fields = [4, 5, 6]
# ------------------------------------------------------------------
# GeoClaw specific parameters:
# ------------------------------------------------------------------
rundata = setgeo(rundata)
return rundata
# end of function setrun
# ----------------------
# -------------------
def setgeo(rundata):
"""
Set GeoClaw specific runtime parameters.
For documentation see ....
"""
geo_data = rundata.geo_data
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367.5e3
geo_data.rho = 1025.0
geo_data.rho_air = 1.15
geo_data.ambient_pressure = 101.3e3
# == Forcing Options
geo_data.coriolis_forcing = True
geo_data.friction_forcing = True
geo_data.friction_depth = 1e10
# == Algorithm and Initial Conditions ==
geo_data.sea_level = 0.0
geo_data.dry_tolerance = 1.e-2
# Refinement Criteria
refine_data = rundata.refinement_data
refine_data.wave_tolerance = 1.0
refine_data.speed_tolerance = [1.0, 2.0, 3.0, 4.0]
refine_data.deep_depth = 300.0
refine_data.max_level_deep = 4
refine_data.variable_dt_refinement_ratios = True
# == settopo.data values ==
topo_data = rundata.topo_data
topo_data.topofiles = []
# for topography, append lines of the form
# [topotype, minlevel, maxlevel, t1, t2, fname]
# See regions for control over these regions, need better bathy data for
# the smaller domains
#use .tt3 file from data directory
topo_path = os.path.join(DATA, 'topo_for_noel.tt3')
topo_data.topofiles.append([3, 1, 5, rundata.clawdata.t0, rundata.clawdata.tfinal, topo_path])
# == setfixedgrids.data values ==
rundata.fixed_grid_data.fixedgrids = []
# for fixed grids append lines of the form
# [t1,t2,noutput,x1,x2,y1,y2,xpoints,ypoints,\
# ioutarrivaltimes,ioutsurfacemax]
# ================
# Set Surge Data
# ================
data = rundata.surge_data
# Source term controls
data.wind_forcing = True
data.drag_law = 1
data.pressure_forcing = True
data.display_landfall_time = True
# AMR parameters, m/s and m respectively
data.wind_refine = [20.0, 40.0, 60.0]
data.R_refine = [60.0e3, 40e3, 20e3]
# Storm parameters - Parameterized storm (Holland 1980)
data.storm_specification_type = 'holland80' # (type 1)
data.storm_file = os.path.expandvars(os.path.join(os.getcwd(),
'noel.storm'))
# Convert ATCF data to GeoClaw format
clawutil.data.get_remote_file(
"http://ftp.nhc.noaa.gov/atcf/archive/2007/bal162007.dat.gz")
atcf_path = os.path.join(DATA, "bal162007.dat")
# Note that the get_remote_file function does not support gzip files which
# are not also tar files. The following code handles this
with gzip.open(".".join((atcf_path, 'gz')), 'rb') as atcf_file, \
open(atcf_path, 'w') as atcf_unzipped_file:
atcf_unzipped_file.write(atcf_file.read().decode('ascii'))
noel = Storm(path=atcf_path, file_format="ATCF")
# Calculate landfall time - Need to specify as the file above does not
noel.time_offset = datetime.datetime(2007, 10, 31, 18)
noel.write(data.storm_file, file_format='geoclaw')
# =======================
# Set Variable Friction
# =======================
data = rundata.friction_data
# Variable friction
data.variable_friction = True
# Region based friction
# Entire domain
data.friction_regions.append([rundata.clawdata.lower,
rundata.clawdata.upper,
[np.infty, 0.0, -np.infty],
[0.030, 0.022]])
# La-Tex Shelf
data.friction_regions.append([(-98, 25.25), (-90, 30),
[np.infty, -10.0, -200.0, -np.infty],
[0.030, 0.012, 0.022]])
return rundata
# end of function setgeo
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
if len(sys.argv) == 2:
rundata = setrun(sys.argv[1])
else:
rundata = setrun()
rundata.write()
| {
"content_hash": "f30204b2a25b2f5f418cb0e6b20ba4b8",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 98,
"avg_line_length": 33.90315789473684,
"alnum_prop": 0.5869349230004968,
"repo_name": "mandli/surge-examples",
"id": "500f330e450e95ca6c35d75c41207dab741e2ea4",
"size": "16124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "noel/setrun.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "375523"
},
{
"name": "Makefile",
"bytes": "74863"
},
{
"name": "Python",
"bytes": "1273452"
}
],
"symlink_target": ""
} |
from typing import List, Tuple
from phi import math
from . import GridCell
from ._geom import Geometry
from ..math import Tensor, expand
from ..math._shape import shape_stack, Shape, INSTANCE_DIM, non_channel
from ..math._magic_ops import variable_attributes, copy_with
from ..math.magic import slicing_dict
class GeometryStack(Geometry):
def __init__(self, geometries: Tensor):
self.geometries = geometries
self._shape = shape_stack(geometries.shape, *[g.shape for g in geometries])
def unstack(self, dimension) -> tuple:
if dimension == self.geometries.shape.name:
return tuple(self.geometries)
else:
# return GeometryStack([g.unstack(dimension) for g in self.geometries], self.geometries.shape)
raise NotImplementedError()
@property
def center(self):
centers = [g.center for g in self.geometries]
return math.stack(centers, self.geometries.shape)
@property
def spatial_rank(self) -> int:
return next(iter(self.geometries)).spatial_rank
@property
def shape(self) -> Shape:
return self._shape
@property
def volume(self) -> math.Tensor:
if self.geometries.shape.type == INSTANCE_DIM:
raise NotImplementedError("instance dimensions not yet supported")
return math.stack([g.volume for g in self.geometries], self.geometries.shape)
@property
def shape_type(self) -> Tensor:
types = [g.shape_type for g in self.geometries]
return math.stack(types, self.geometries.shape)
def lies_inside(self, location: math.Tensor):
if self.geometries.shape in location.shape:
location = location.unstack(self.geometries.shape.name)
else:
location = [location] * len(self.geometries)
inside = [g.lies_inside(loc) for g, loc in zip(self.geometries, location)]
return math.stack(inside, self.geometries.shape)
def approximate_signed_distance(self, location: math.Tensor):
raise NotImplementedError()
def bounding_radius(self):
radii = [expand(g.bounding_radius(), non_channel(g)) for g in self.geometries]
return math.stack(radii, self.geometries.shape)
def bounding_half_extent(self):
values = [expand(g.bounding_half_extent(), non_channel(g)) for g in self.geometries]
return math.stack(values, self.geometries.shape)
def shifted(self, delta: math.Tensor):
deltas = delta.dimension(self.geometries.shape).unstack(len(self.geometries))
geometries = [g.shifted(d) for g, d in zip(self.geometries, deltas)]
return GeometryStack(math.layout(geometries, self.geometries.shape))
def rotated(self, angle):
geometries = [g.rotated(angle) for g in self.geometries]
return GeometryStack(math.layout(geometries, self.geometries.shape))
def push(self, positions: Tensor, outward: bool = True, shift_amount: float = 0) -> Tensor:
raise NotImplementedError('GeometryStack.push() is not yet implemented.')
def __eq__(self, other):
return isinstance(other, GeometryStack) \
and self._shape == other.shape \
and self.geometries.shape == other.stack_dim \
and self.geometries == other.geometries
def shallow_equals(self, other):
if self is other:
return True
if not isinstance(other, GeometryStack) or self._shape != other.shape:
return False
if self.geometries.shape != other.geometries.shape:
return False
return all(g1.shallow_equals(g2) for g1, g2 in zip(self.geometries, other.geometries))
def __hash__(self):
return hash(self.geometries)
def __getitem__(self, item):
selected = self.geometries[slicing_dict(self, item)]
if selected.shape.volume > 1:
return GeometryStack(selected)
else:
return next(iter(selected))
| {
"content_hash": "318d814e76659462586796f29e227b2b",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 106,
"avg_line_length": 38.49514563106796,
"alnum_prop": 0.6567465321563682,
"repo_name": "tum-pbs/PhiFlow",
"id": "7c972392b7a084a73c5f1c9b50c412dcce6dc8cd",
"size": "3965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phi/geom/_stack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "86714"
},
{
"name": "Python",
"bytes": "1413858"
}
],
"symlink_target": ""
} |
from django.urls import path
from django.views.generic.base import RedirectView
from ..views import facultad
urlpatterns = [
# Portada de materias
path('',
facultad.HomePageView.as_view(),
name='home'),
path('materias/',
RedirectView.as_view(url='/facultad/'),
name='materias'),
path('materias/cargar/',
facultad.CargarMateriasView.as_view(),
name='cargar-materias'),
path('materias/<str:plancarrera>/',
facultad.PlanCarreraView.as_view(),
name='materias-carrera'),
path('materia/<str:codigo>/',
facultad.MateriaView.as_view(),
name='materia'),
]
| {
"content_hash": "ab5987599599f7d6afe2d51c5ec940b5",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 50,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.6107784431137725,
"repo_name": "maru/fiubar",
"id": "c23cb87b779d1dfb8550ff8d4b4dc78c00ae7669",
"size": "692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fiubar/facultad/urls/facultad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15115"
},
{
"name": "Dockerfile",
"bytes": "85"
},
{
"name": "HTML",
"bytes": "68752"
},
{
"name": "JavaScript",
"bytes": "20080"
},
{
"name": "Python",
"bytes": "233798"
},
{
"name": "Shell",
"bytes": "286"
}
],
"symlink_target": ""
} |
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class OneloadResolver(UrlResolver):
name = "oneload"
domains = ["oneload.co", "oneload.com"]
pattern = "(?://|\.)(oneload\.(?:co|com))/([a-zA-Z0-9]+)"
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT, 'Referer': web_url}
form_data = {'op': 'download2', 'id': media_id, 'rand': '', 'referer': web_url, 'method_free': 'Free Download', 'method_premium': '', 'adblock_detected': '0'}
html = self.net.http_POST(web_url, form_data=form_data, headers=headers).content
if html:
source = re.search(r"""href=["'](.+?oneload.co:\d+/d/\w+/([^"']+)).+?>\2</a>""", html)
if source: return source.group(1) + helpers.append_headers(headers)
raise ResolverError('Video not found')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://oneload.co/{media_id}')
| {
"content_hash": "27f43328dfb16c3159a7e5a514d07e64",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 166,
"avg_line_length": 43.707317073170735,
"alnum_prop": 0.6629464285714286,
"repo_name": "dbiesecke/dbiesecke.github.io",
"id": "bbbb957f4f1d6c5857f54d1068e13df83aae3ef9",
"size": "1792",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "repo/script.module.urlresolver/lib/urlresolver/plugins/oneload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23106"
},
{
"name": "HTML",
"bytes": "1689379"
},
{
"name": "JavaScript",
"bytes": "103456"
},
{
"name": "Makefile",
"bytes": "4554"
},
{
"name": "Perl",
"bytes": "2785"
},
{
"name": "Python",
"bytes": "14200477"
},
{
"name": "Shell",
"bytes": "1804"
}
],
"symlink_target": ""
} |
""" Test the simple random number generator. """
import os
import shutil
import time
import unittest
import rnglib
TEST_DIR = 'tmp'
class TestSimpleRNG(unittest.TestCase):
""" Test the simple random number generator. """
def setUp(self):
now = time.time()
self.rng = rnglib.SimpleRNG(now)
os.makedirs(TEST_DIR, exist_ok=True, mode=0o755)
def tearDown(self):
pass
# utility functions #############################################
def _build_data(self, count):
""" Return the requested number of quasi-random bytes. """
self.assertTrue(count > 0)
data = bytearray(count)
for i in range(count):
self.assertTrue(data[i] == 0)
self.assertEqual(count, len(data))
return data
# # NOT CURRENTLY USED
# def set_a_bit(self, vector, value):
# """ treat a 32 byte vector as a bit vector of 256 bits """
# byte = int(value / 8)
# bit = value % 8
# vector[byte] |= 1 << bit
# return vector
# # NOT CURRENTLY USED
# def non_zero_bits(self, vector):
# pass
# actual unit tests #############################################
def test_constants(self):
"""" Verify constants have expected values. """
max_int16 = rnglib.MAX_INT16
max_int32 = rnglib.MAX_INT32
max_int64 = rnglib.MAX_INT64
self.assertEqual(65536, max_int16)
self.assertEqual(max_int16 * max_int16, max_int32)
self.assertEqual(max_int32 * max_int32, max_int64)
def test_simplest_constructor(self):
""" Verify that the class's random number generator is not None. """
self.assertFalse(self.rng is None)
def test_seed(self):
""" Check the bahavior of seed values. """
seed = self.rng.next_int16()
# if the seed is the same, the numbers generated should be the same
rng1 = rnglib.SimpleRNG(seed)
rng2 = rnglib.SimpleRNG(seed)
for _ in range(16):
aaa = rng1.next_int16()
bbb = rng2.next_int16()
self.assertEqual(aaa, bbb)
# if the seeds differ, with a very high probability the numbers
# generated should differ
seed1 = (seed << 16) | seed
seed2 = ~seed1
rng1 = rnglib.SimpleRNG(seed1)
rng2 = rnglib.SimpleRNG(seed2)
aaa = rng1.next_int16()
bbb = rng2.next_int16()
self.assertTrue(aaa != bbb) # fails, rarely
def test_next_boolean(self):
""" Check the behavior of the next_boolean() function. """
value = self.rng.next_boolean()
self.assertTrue((value is True) or (value is False))
self.assertTrue(isinstance(value, bool))
def test_next_byte(self):
""" Check the behavior of the next_byte() function. """
value = self.rng.next_byte()
self.assertTrue((value >= 0) and (value < 256))
# would like to test that the entire range is filled
# where the cost of the test is reasonable
def test_next_bytes(self):
""" Check the behavior of the next_bytes() function. """
length = 16 + self.rng.next_byte()
self.assertTrue((length >= 16) and (length < 272))
data = self._build_data(length) # builds a byte array
self.rng.next_bytes(data)
self.assertEqual(length, len(data))
def test_next_file_name(self):
""" Check the behavior of the next_file() function. """
for count in range(8):
max_len = 16 + count
name = self.rng.next_file_name(max_len)
self.assertTrue(max_len > len(name))
self.assertTrue(len(name) > 0)
def test_next_data_file(self):
""" Check the behavior of the next_data_file() function. """
for _ in range(9):
file_len = 16 + self.rng.next_byte()
(count, path_to_file) = self.rng.next_data_file(
TEST_DIR, file_len + 1, file_len)
self.assertTrue(os.path.exists(path_to_file))
self.assertEqual(os.path.getsize(path_to_file), count)
def test_some_bytes(self):
""" Check the behavior of the some_bytes() function. """
now = time.time()
rng = rnglib.SimpleRNG(now)
for _ in range(8):
count = 1 + rng.next_int16(16)
b_val = rng.some_bytes(count)
self.assertEqual(len(b_val), count)
self.assertTrue(isinstance(b_val, bytearray))
def do_next_data_dir_test(self, width, depth):
""" Check the behavior of the next_data_dir() function. """
dir_name = self.rng.next_file_name(8)
dir_path = "%s/%s" % (TEST_DIR, dir_name)
if os.path.exists(dir_path):
if os.path.isfile(dir_path):
os.unlink(dir_path)
else:
shutil.rmtree(dir_path)
self.rng.next_data_dir(dir_path, width, depth, 32)
def test_next_data_dir(self):
""" Check the behavior of the next_data_dir() function. """
self.do_next_data_dir_test(1, 1)
self.do_next_data_dir_test(1, 4)
self.do_next_data_dir_test(4, 1)
self.do_next_data_dir_test(4, 4)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "36863c6f6583980d355e84f1566749f1",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 76,
"avg_line_length": 32.15337423312884,
"alnum_prop": 0.5682121732493799,
"repo_name": "jddixon/rnglib",
"id": "d8802994cb813fe9d130495c1bac6bec1966298a",
"size": "5284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_simple_rng.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6648"
},
{
"name": "Python",
"bytes": "29045"
},
{
"name": "Shell",
"bytes": "1597"
}
],
"symlink_target": ""
} |
"""This module provides backwards compatibility for RelatedObject."""
# flake8: noqa
try:
# Django <= 1.7
from django.db.models.related import RelatedObject
except:
# Django >= 1.8
# See: https://code.djangoproject.com/ticket/21414
from django.db.models.fields.related import (
ForeignObjectRel as RelatedObject
)
| {
"content_hash": "4e24feaa6fb49d00dcfb4c25ff202eeb",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 69,
"avg_line_length": 28.916666666666668,
"alnum_prop": 0.6974063400576369,
"repo_name": "AltSchool/dynamic-rest",
"id": "9e9bcb28cd00f2545ba60d93b95bf7148ef8d804",
"size": "347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynamic_rest/related.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "906"
},
{
"name": "Dockerfile",
"bytes": "1366"
},
{
"name": "HTML",
"bytes": "7045"
},
{
"name": "Jinja",
"bytes": "1331"
},
{
"name": "Makefile",
"bytes": "3784"
},
{
"name": "Procfile",
"bytes": "112"
},
{
"name": "Python",
"bytes": "354930"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.