text stringlengths 4 1.02M | meta dict |
|---|---|
from .core import db
class Project(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
sprints = db.relationship('Sprint', backref='project')
class Sprint(db.Model):
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer, nullable=False)
points = db.Column(db.Float, nullable=False)
days = db.Column(db.Integer, nullable=False)
project_id = db.Column(db.Integer, db.ForeignKey('project.id'))
burns = db.relationship('Burn', backref='sprint')
class Burn(db.Model):
id = db.Column(db.Integer, primary_key=True)
day = db.Column(db.Integer, nullable=False)
points = db.Column(db.Float, nullable=False)
sprint_id = db.Column(db.Integer, db.ForeignKey('sprint.id'))
| {
"content_hash": "4c2f518060d284a4a1861bf56a862a51",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 67,
"avg_line_length": 34.17391304347826,
"alnum_prop": 0.688295165394402,
"repo_name": "victorneo/enkindle",
"id": "4b2265f353121f7fbaa22a895a0a82b873e88998",
"size": "786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enkindle/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53718"
},
{
"name": "JavaScript",
"bytes": "19456"
},
{
"name": "Python",
"bytes": "4110"
}
],
"symlink_target": ""
} |
"""External libraries"""
| {
"content_hash": "f54c41ef9a460e071213a9ce1e280ae8",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 25,
"alnum_prop": 0.68,
"repo_name": "toros-astro/corral",
"id": "b8ab5a3ff9e4f405dcb31bfe0c69a8f964981633",
"size": "1806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corral/libs/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Mako",
"bytes": "988"
},
{
"name": "Python",
"bytes": "279543"
},
{
"name": "TeX",
"bytes": "228160"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from u2flib_host import u2f, exc, __version__
from u2flib_host.constants import APDU_USE_NOT_SATISFIED
from u2flib_host.utils import u2str
from u2flib_host.yubicommon.compat import text_type
import time
import json
import argparse
import sys
def authenticate(devices, params, facet, check_only):
"""
Interactively authenticates a AuthenticateRequest using an attached U2F
device.
"""
for device in devices[:]:
try:
device.open()
except:
devices.remove(device)
try:
prompted = False
while devices:
removed = []
for device in devices:
try:
return u2f.authenticate(device, params, facet, check_only)
except exc.APDUError as e:
if e.code == APDU_USE_NOT_SATISFIED:
if check_only:
sys.stderr.write('\nCorrect U2F device present!\n')
sys.exit(0)
if not prompted:
sys.stderr.write('\nTouch the flashing U2F device '
'to authenticate...\n')
prompted = True
else:
removed.append(device)
except exc.DeviceError:
removed.append(device)
devices = [d for d in devices if d not in removed]
for d in removed:
d.close()
time.sleep(0.25)
finally:
for device in devices:
device.close()
sys.stderr.write('\nThe required U2F device is not present!\n')
sys.exit(1)
def parse_args():
parser = argparse.ArgumentParser(
description="Authenticaties an AuthenticateRequest.\n"
"Takes a JSON formatted AuthenticateRequest object on stdin, and "
"returns the resulting AuthenticateResponse on stdout.",
add_help=True
)
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('facet', help='the facet for the challenge')
parser.add_argument('-c', '--check-only', action="store_true", help='Check if '
'the key handle is correct only, don\'t sign.')
parser.add_argument('-i', '--infile', help='specify a file to read '
'AuthenticateRequest from, instead of stdin')
parser.add_argument('-o', '--outfile', help='specify a file to write '
'the AuthenticateResponse to, instead of stdout')
parser.add_argument('-s', '--soft', help='Specify a soft U2F token file to use')
return parser.parse_args()
def main():
args = parse_args()
facet = text_type(args.facet)
if args.infile:
with open(args.infile, 'r') as f:
data = f.read()
else:
if sys.stdin.isatty():
sys.stderr.write('Enter AuthenticateRequest JSON data...\n')
data = sys.stdin.read()
params = json.loads(data)
if args.soft:
from u2flib_host.soft import SoftU2FDevice
devices = [SoftU2FDevice(args.soft)]
else:
devices = u2f.list_devices()
result = authenticate(devices, params, facet, args.check_only)
if args.outfile:
with open(args.outfile, 'w') as f:
json.dump(result, f)
sys.stderr.write('Output written to %s\n' % args.outfile)
else:
sys.stderr.write('\n---Result---\n')
print(json.dumps(result))
if __name__ == '__main__':
main()
| {
"content_hash": "dbfbd4d58a39816e372d64358481fbcc",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 84,
"avg_line_length": 34.29245283018868,
"alnum_prop": 0.5587345254470426,
"repo_name": "Yubico/python-u2flib-host",
"id": "19fbd3dc3b19b168ec4c970b8c94354613f79d74",
"size": "5007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "u2flib_host/authenticate.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "70663"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from .models import Course, CourseModule
# Variables for use in unit tests
LTI_CONTEXT_ID = 'some_context_id'
LTI_TOOL_CONSUMER_INSTANCE_GUID = 'some_guid'
API_COURSE_ID = 1
LTI_RESOURCE_LINK_ID = 'some_resource_link_id'
LTI_RESOURCE_LINK_TITLE = 'some_resource_link_title'
API_COLLECTION_ID = 1
# Test class for the "Course" model
class CourseModelTest(TestCase):
def setUp(self):
"""
Create a Course object.
"""
Course.objects.create(
lti_context_id = LTI_CONTEXT_ID,
lti_tool_consumer_instance_guid = LTI_TOOL_CONSUMER_INSTANCE_GUID,
api_course_id = API_COURSE_ID
)
def test_creation_of_a_course(self):
"""
Test the successful creation of a Course object..
"""
course = Course.objects.get(id=1)
self.assertEqual(course.lti_context_id, LTI_CONTEXT_ID)
self.assertEqual(course.lti_tool_consumer_instance_guid, LTI_TOOL_CONSUMER_INSTANCE_GUID)
self.assertEqual(course.api_course_id, API_COURSE_ID)
# Test class for the "CourseModule" model
class CourseModuleModelTest(TestCase):
def setUp(self):
"""
Create a CourseModule object.
"""
sample_course_obj = Course.objects.create(
lti_context_id = LTI_CONTEXT_ID,
lti_tool_consumer_instance_guid = LTI_TOOL_CONSUMER_INSTANCE_GUID,
api_course_id = API_COURSE_ID
)
CourseModule.objects.create(
course = sample_course_obj,
lti_resource_link_id = LTI_RESOURCE_LINK_ID,
lti_resource_link_title = LTI_RESOURCE_LINK_TITLE,
api_collection_id = API_COURSE_ID
)
def test_creation_of_a_course_module(self):
"""
Test the successful creation of a CourseModule object.
"""
course_module = CourseModule.objects.get(id=1)
self.assertEqual(course_module.lti_resource_link_id, LTI_RESOURCE_LINK_ID)
self.assertEqual(course_module.lti_resource_link_title, LTI_RESOURCE_LINK_TITLE)
self.assertEqual(course_module.api_collection_id, API_COLLECTION_ID)
# TODO: Add more unit tests
| {
"content_hash": "9c5543e457245066824c247b64b3cc97",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 97,
"avg_line_length": 32.411764705882355,
"alnum_prop": 0.6429219600725953,
"repo_name": "Harvard-ATG/media_management_lti",
"id": "eb8b0a951e7738b8a30280313b4f05db53127434",
"size": "2204",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "media_manager/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "169121"
},
{
"name": "Dockerfile",
"bytes": "352"
},
{
"name": "HTML",
"bytes": "67662"
},
{
"name": "JavaScript",
"bytes": "2450115"
},
{
"name": "Python",
"bytes": "49220"
},
{
"name": "Shell",
"bytes": "15473"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 Logvinenko Maksim
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import abc
import threading
from photon.enums import ConnectionState, DebugLevel, StatusCode
from photon.protocol import deserialize_op_response, deserialize_event_data
from photon.utils import now_in_millis
class BasePeer:
def __init__(self, peer_listener):
self.peer_listener = peer_listener
self.debug_level = DebugLevel.Error
self.traffic_stats_enabled = False
self._state = ConnectionState.Disconnected
self._INIT_BYTES = bytearray([0] * 41)
self._action_queue = []
self._action_queue_lock = threading.Lock()
self.m_applicationIsInitialized = False
self.m_connectionTime = 0
self.m_roundTripTime = 0
self.m_lastRoundTripTime = 0
self.m_roundTripTimeVariance = 0
self.m_lowestRoundTripTime = 0
self.m_highestRoundTripTimeVariance = 0
self.m_warningSize = 100
self.m_time_ping_interval = 1000
self.m_channelCount = 2
def get_local_ms_timestamp(self):
return now_in_millis() - self.m_connectionTime
def enqueue_action_for_dispatch(self, action):
with self._action_queue_lock:
self._action_queue.append(action)
def enqueue_debug_return(self, debug_level, message):
with self._action_queue_lock:
self._action_queue.append(
lambda:
self.peer_listener.debug_return(debug_level, message)
)
def enqueue_status_callback(self, status):
with self._action_queue_lock:
self._action_queue.append(
lambda:
self.peer_listener.on_status_changed(status)
)
@abc.abstractmethod
def enqueue_operation(self, params, op_code, reliable, channel_id, encrypt, message_type=2):
pass
def init_once(self):
self._INIT_BYTES[0] = 256 - 13
self._INIT_BYTES[1] = 0
self._INIT_BYTES[2] = 1
self._INIT_BYTES[3] = 6
self._INIT_BYTES[4] = 1
self._INIT_BYTES[5] = 3
self._INIT_BYTES[6] = 0
self._INIT_BYTES[7] = 2
self._INIT_BYTES[8] = 7
def init_peer(self):
self.m_connectionTime = 0
self._state = ConnectionState.Disconnected
self.m_applicationIsInitialized = False
def init_callback(self):
if self._state == ConnectionState.Connecting:
self._state = ConnectionState.Connected
self.m_applicationIsInitialized = True
self.peer_listener.on_status_changed(StatusCode.Connect)
def deserialize_message_and_callback(self, payload):
if len(payload) < 2:
if self.debug_level >= DebugLevel.Error:
self.peer_listener.debug_return(DebugLevel.Error, "Incoming data too short! {}".format(len(payload)))
return False
if payload[0] != 256 - 13 and payload[1] != 256 - 3:
if self.debug_level >= DebugLevel.Error:
self.peer_listener.debug_return(DebugLevel.Error, "No regular operation message: {}".format(payload[0]))
return False
msg_type = payload[1] & 0x7F
is_encrypted = (payload[1] & 0x80) > 0
if msg_type != 1:
try:
if is_encrypted:
raise Exception("We don't supper encrypted connect yet")
else:
payload = payload[2:]
except Exception as e:
if self.debug_level >= DebugLevel.Error:
self.peer_listener.debug_return(DebugLevel.Error, e)
return False
if msg_type == 3:
self.peer_listener.on_operation_response(deserialize_op_response(payload))
elif msg_type == 4:
self.peer_listener.on_event(deserialize_event_data(payload))
elif msg_type == 1:
self.init_callback()
elif msg_type == 7:
print("Receive shared key")
else:
if self.debug_level >= DebugLevel.Error:
self.enqueue_debug_return(DebugLevel.Error, "unexpected msgType {}".format(msg_type))
@abc.abstractmethod
def connect(self, host, port, app_id=None):
pass
@abc.abstractmethod
def disconnect(self):
pass
@abc.abstractmethod
def stop_connection(self):
pass
@abc.abstractmethod
def send_outgoing_commands(self):
pass
@abc.abstractmethod
def dispatch_incoming_commands(self):
pass
def update_round_trip_time_and_variance(self, last_round_trip_time):
if last_round_trip_time < 0:
return
self.m_roundTripTimeVariance -= self.m_roundTripTimeVariance / 4
if last_round_trip_time >= self.m_roundTripTime:
self.m_roundTripTime += (last_round_trip_time - self.m_roundTripTime) / 8
self.m_roundTripTimeVariance += (last_round_trip_time - self.m_roundTripTime) / 4
else:
self.m_roundTripTime += (last_round_trip_time - self.m_roundTripTime) / 8
self.m_roundTripTimeVariance -= (last_round_trip_time - self.m_roundTripTime) / 4
if self.m_roundTripTime < self.m_lowestRoundTripTime:
self.m_lowestRoundTripTime = self.m_roundTripTime
if self.m_roundTripTimeVariance > self.m_highestRoundTripTimeVariance:
self.m_highestRoundTripTimeVariance = self.m_roundTripTimeVariance | {
"content_hash": "cdef01dbf95f7b01bf47425a6e995400",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 120,
"avg_line_length": 33.988571428571426,
"alnum_prop": 0.6267652992602556,
"repo_name": "logarithm/photon-python",
"id": "79a8d119dafa6cc58ba2c8c6c24d4ca6b8c8ca23",
"size": "5948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "photon/basepeer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "47965"
}
],
"symlink_target": ""
} |
"""
query.py
Utility functions to query vcf explorer from the command line.
Needs refactoring -> delly hardcoded code.
"""
import re
import pymongo
from . import connection, db
def create_sample_query( query_command ):
match = re.match(r"^SAMPLE(=|\!=|\?=)\[?(.+)\]?",query_command)
query = {}
query_2 = {}
if match:
sample_list = match.group(2).split(',')
sample_list[-1] = sample_list[-1].replace("]","")
# Create must query
if match.group(1) == "=":
## Variant may not be present in all other samples in the database
query['samples'] = { '$elemMatch': { 'sample': { '$nin': sample_list} } }
## Check if the Variant is be present in the given sample(s), otherwise it get the filter NotPresent
query_2['samples.sample'] = { '$in': sample_list }
# Create may query
elif match.group(1) == "?=":
## Variant may only be present in the given sample(s)
query['samples'] = { '$elemMatch': { 'sample': { '$nin': sample_list} } }
# Create against query
elif match.group(1) == "!=":
## Filter variant only when its present in the given sample(s)
query['samples.sample'] = { '$in': sample_list }
return query, query_2
| {
"content_hash": "3c06766042a94cea2c0db2577cdb7da3",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 108,
"avg_line_length": 32.2,
"alnum_prop": 0.5729813664596274,
"repo_name": "CuppenResearch/vcf-explorer",
"id": "7669d9f0321c11ce135b685272cff4043740c082",
"size": "1288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/query.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1296"
},
{
"name": "HTML",
"bytes": "3278"
},
{
"name": "JavaScript",
"bytes": "3626"
},
{
"name": "Python",
"bytes": "45303"
},
{
"name": "TypeScript",
"bytes": "12932"
}
],
"symlink_target": ""
} |
"""Functions to create chemical series data sets."""
from __future__ import absolute_import
import numpy
from fermilib.utils._molecular_data import (MolecularData,
periodic_hash_table,
periodic_polarization)
# Define error objects which inherit from Exception.
class MolecularLatticeError(Exception):
pass
def make_atomic_ring(n_atoms, spacing, basis,
atom_type='H', charge=0, filename=''):
"""Function to create atomic rings with n_atoms.
Note that basic geometry suggests that for spacing L between atoms
the radius of the ring should be L / (2 * cos (pi / 2 - theta / 2))
Args:
n_atoms: Integer, the number of atoms in the ring.
spacing: The spacing between atoms in the ring in Angstroms.
basis: The basis in which to perform the calculation.
atom_type: String, the atomic symbol of the element in the ring.
this defaults to 'H' for Hydrogen.
charge: An integer giving the total molecular charge. Defaults to 0.
filename: An optional string to give a filename for the molecule.
Returns:
molecule: A an instance of the MolecularData class.
"""
# Make geometry.
geometry = []
theta = 2. * numpy.pi / float(n_atoms)
radius = spacing / (2. * numpy.cos(numpy.pi / 2. - theta / 2.))
for atom in range(n_atoms):
x_coord = radius * numpy.cos(atom * theta)
y_coord = radius * numpy.sin(atom * theta)
geometry += [(atom_type, (x_coord, y_coord, 0.))]
# Set multiplicity.
n_electrons = n_atoms * periodic_hash_table[atom_type]
n_electrons -= charge
if (n_electrons % 2):
multiplicity = 2
else:
multiplicity = 1
# Create molecule and return.
description = 'ring_{}'.format(spacing)
molecule = MolecularData(geometry,
basis,
multiplicity,
charge,
description,
filename)
return molecule
def make_atomic_lattice(nx_atoms, ny_atoms, nz_atoms, spacing, basis,
atom_type='H', charge=0, filename=''):
"""Function to create atomic lattice with n_atoms.
Args:
nx_atoms: Integer, the length of lattice (in number of atoms).
ny_atoms: Integer, the width of lattice (in number of atoms).
nz_atoms: Integer, the depth of lattice (in number of atoms).
spacing: The spacing between atoms in the lattice in Angstroms.
basis: The basis in which to perform the calculation.
atom_type: String, the atomic symbol of the element in the ring.
this defaults to 'H' for Hydrogen.
charge: An integer giving the total molecular charge. Defaults to 0.
filename: An optional string to give a filename for the molecule.
Returns:
molecule: A an instance of the MolecularData class.
Raises:
MolecularLatticeError: If lattice specification is invalid.
"""
# Make geometry.
geometry = []
for x_dimension in range(nx_atoms):
for y_dimension in range(ny_atoms):
for z_dimension in range(nz_atoms):
x_coord = spacing * x_dimension
y_coord = spacing * y_dimension
z_coord = spacing * z_dimension
geometry += [(atom_type, (x_coord, y_coord, z_coord))]
# Set multiplicity.
n_atoms = nx_atoms * ny_atoms * nz_atoms
n_electrons = n_atoms * periodic_hash_table[atom_type]
n_electrons -= charge
if (n_electrons % 2):
multiplicity = 2
else:
multiplicity = 1
# Name molecule.
dimensions = bool(nx_atoms > 1) + bool(ny_atoms > 1) + bool(nz_atoms > 1)
if dimensions == 1:
description = 'linear_{}'.format(spacing)
elif dimensions == 2:
description = 'planar_{}'.format(spacing)
elif dimensions == 3:
description = 'cubic_{}'.format(spacing)
else:
raise MolecularLatticeError('Invalid lattice dimensions.')
# Create molecule and return.
molecule = MolecularData(geometry,
basis,
multiplicity,
charge,
description,
filename)
return molecule
def make_atom(atom_type, basis, filename=''):
"""Prepare a molecular data instance for a single element.
Args:
atom_type: Float giving atomic symbol.
basis: The basis in which to perform the calculation.
Returns:
atom: An instance of the MolecularData class.
"""
geometry = [(atom_type, (0., 0., 0.))]
atomic_number = periodic_hash_table[atom_type]
spin = periodic_polarization[atomic_number] / 2.
multiplicity = int(2 * spin + 1)
atom = MolecularData(geometry,
basis,
multiplicity,
filename=filename)
return atom
| {
"content_hash": "8f220f2a8468f90fae36b424f51552ae",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 77,
"avg_line_length": 35.852112676056336,
"alnum_prop": 0.5833824395992929,
"repo_name": "ProjectQ-Framework/FermiLib",
"id": "4cc6c3d3b0cb46dd028b0768abca22f039af87cd",
"size": "5712",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/fermilib/utils/_chemical_series.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "623992"
}
],
"symlink_target": ""
} |
"""
:class:`Teleport` geocoder.
"""
from geopy.compat import urlencode
from geopy.exc import GeocoderServiceError
from geopy.geocoders.base import (
Geocoder,
DEFAULT_TIMEOUT,
DEFAULT_FORMAT_STRING,
)
from geopy.location import Location
from geopy.util import logger
try:
from urllib.parse import quote # @UnresolvedImport @UnusedImport
except:
from urllib import quote # @Reimport
__all__ = ("Teleport", )
DEFAULT_FORWARD_EMBEDDINGS = (
"city:search-results/city:item/{city:country,city:admin1_division}"
)
DEFAULT_REVERSE_EMBEDDINGS = (
"location:nearest-cities/location:nearest-city" +
"/{city:country,city:admin1_division}"
)
try:
basestring
except:
basestring = str # @ReservedAssignment
class Teleport(Geocoder):
"""
Teleport geocoder and reverse geocoder
See API documentation at (https://developers.teleport.org/api/) for
details.
Example::
from geopy.geocoders import Teleport
from geopy.geocoders.teleport import DEFAULT_FORWARD_EMBEDDINGS
teleport = Teleport(forward_embeddings=DEFAULT_FORWARD_EMBEDDINGS +
",city:search-results/city:item/" +
"city:urban_area/ua:scores")
locations = teleport.geocode("sfo", exactly_one=False)
for location in locations:
print (location.address,
location.point,
Teleport.get_embedded(location.raw,
"city:item/city:urban_area/ua:scores",
{})
.get('teleport_city_score'))
location = teleport.reverse("37.774531,-122.418297")
print (location.address,
location.point,
location.raw.get('distance_km'))
"""
def __init__(self, format_string=DEFAULT_FORMAT_STRING, scheme='https', # pylint: disable=R0913
timeout=DEFAULT_TIMEOUT, proxies=None, user_agent=None,
forward_embeddings=DEFAULT_FORWARD_EMBEDDINGS,
reverse_embeddings=DEFAULT_REVERSE_EMBEDDINGS):
"""
:param string format_string: The format string where the input string
to geocode is interpolated. For example, if you only need to
geocode locations in Cleveland, Ohio, you could do
format_string="%s, Cleveland OH"
:param string scheme: The scheme/protocol to use to communicate with
the API. Can be either 'https' (default) or 'http'.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
:param string user_agent: The user agent string to send to the API in
the User-Agent HTTP header.
:param string forward_embeddings: The data to embed into responses for
forward geocode requests (see the Teleport API documentation on
how to compose the embed paths:
https://developers.teleport.org/api/)
:param string reverse_embeddings: The data to embed into responses for
reverse geocode requests (see the Teleport API documentation on
how to compose the embed paths:
https://developers.teleport.org/api/)
"""
super(Teleport, self).__init__(
format_string=format_string,
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent
)
self.forward_embeddings = forward_embeddings
self.reverse_embeddings = reverse_embeddings
self.api = "%s://api.teleport.org/api/cities/" % self.scheme
self.api_reverse = (
"%s://api.teleport.org/api/locations/%%s/" % self.scheme
)
def geocode(self, query, exactly_one=True, timeout=None):
"""
Geocode a location query.
:param string query: The city or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
.. versionadded:: 0.97
"""
params = {
'search': self.format_string % (query,),
'embed': self.forward_embeddings,
}
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one,
)
def reverse(self, query, exactly_one=True, timeout=None):
"""
Given a point, find an address.
.. versionadded:: 1.2.0
:param string query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param boolean exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception.
"""
coordinates = quote(self._coerce_point_to_string(query), safe=',')
params = {
'embed': self.reverse_embeddings,
}
url = self.api_reverse % (coordinates,)
url = "?".join((url, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json_reverse(
self._call_geocoder(url, timeout=timeout),
exactly_one,
)
@staticmethod
def get_embedded(parent, path, default=None):
"""
Get an embedded sub-object of an object using the specified name or
path.
:param dict parent: The parent object to find embedded objects in
:param string path: The embedded object path, e.g.
"city:item/city:country"
:param object default: The default value to return if the embedded
object cannot be found
"""
if isinstance(path, basestring):
elements = path.split("/")
else:
elements = path
name = elements[0]
rest = elements[1:]
embedded = parent.get('_embedded', {})
relation = embedded.get(name, None)
if relation is not None:
if rest:
return Teleport.get_embedded(relation, rest, default)
else:
return relation
else:
return default
@staticmethod
def _parse_city(city, raw_response, name=None):
"""
Parse a city.
"""
latlon = city.get('location', {}).get('latlon', {})
latitude = latlon.get('latitude', None)
longitude = latlon.get('longitude', None)
if name is None:
admin1 = Teleport.get_embedded(city, "city:admin1_division", {})
country = Teleport.get_embedded(city, "city:country", {})
parts = [city['name']]
admin1_name = admin1.get('name')
if admin1_name:
parts.append(admin1_name)
country_name = country.get('name')
if country_name:
parts.append(country_name)
name = ", ".join(parts)
return Location(name, (latitude, longitude), raw_response)
def _parse_json(self, doc, exactly_one):
"""
Parse JSON response body.
"""
message = doc.get('message', None)
if message is not None:
raise GeocoderServiceError(message)
search_results = Teleport.get_embedded(doc, 'city:search-results', [])
if not len(search_results):
return None
def parse_result(search_result):
"""
Parse a single result.
"""
city = Teleport.get_embedded(search_result, 'city:item', {})
name = search_result.get('matching_full_name')
return self._parse_city(city, search_result, name=name)
if exactly_one:
return parse_result(search_results[0])
else:
return [parse_result(search_result)
for search_result in search_results]
def _parse_json_reverse(self, doc, exactly_one):
"""
Parse JSON response body.
"""
message = doc.get('message', None)
if message is not None:
raise GeocoderServiceError(message)
search_results = Teleport.get_embedded(doc,
'location:nearest-cities', [])
if not len(search_results):
return None
def parse_result(search_result):
"""
Parse a single result.
"""
city = Teleport.get_embedded(search_result,
'location:nearest-city', {})
return self._parse_city(city, search_result)
if exactly_one:
return parse_result(search_results[0])
else:
return [parse_result(search_result)
for search_result in search_results]
| {
"content_hash": "4ea80bd8e8cf4812b9bc2a0b817d534f",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 100,
"avg_line_length": 34.66077738515901,
"alnum_prop": 0.5793658884697727,
"repo_name": "magnushiie/geopy",
"id": "525017f96a5d8ed1dbcb1e4a5ef7a1ef86c70a55",
"size": "9809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geopy/geocoders/teleport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "754"
},
{
"name": "Python",
"bytes": "295953"
},
{
"name": "Shell",
"bytes": "2116"
}
],
"symlink_target": ""
} |
from enumfields import Enum
class Operation(Enum):
CREATE = 'CREATE'
READ = 'READ'
UPDATE = 'UPDATE'
DELETE = 'DELETE'
| {
"content_hash": "4ee18fb01bda3ab4bcfcd5153e867cd1",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 27,
"avg_line_length": 17.125,
"alnum_prop": 0.6277372262773723,
"repo_name": "seebass/drf-tools",
"id": "6da0f80ea23f4784706b863c90709e528b579069",
"size": "137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drf_tools/auth/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85710"
}
],
"symlink_target": ""
} |
server = 'examples.hello_server.HelloServer'
# The module path of the worker class
worker = 'examples.hello_worker.HelloWorker'
# The base name of the server that will be combined with host and pid
# and used when requesting tasks from AWS.
name = 'HelloExample'
# Set the ARN for the activity that this server will work on.
activity_arn = 'arn:aws:states:us-east-1:00000000000000:activity:hello'
# [OPTIONAL] The number of worker processes.
# If None, it will be set to the number of cores.
# Default is None.
processes = None
# [OPTIONAL] Number of seconds between heartbeats.
# None or 0 means there is no heartbeat.
# Default is no heartbeat.
heartbeat = 120
# [OPTIONAL] Maximum number of tasks for a worker to run before the worker
# process is automatically killed and a new one created.
# If None, workers will not be killed.
# Default is None.
maxtasksperchild = None
# [OPTIONAL] The multiprocessing start method for worker processes.
# See https://docs.python.org/3.7/library/multiprocessing.html for more info
# The default is 'spawn' which starts a fresh python interpreter process.
# It is rather slow compared to using fork or forkserver, but we typically
# create workers and leave them running so the impact should be minimal.
# Possible values are:
# spawn - Recommended (Unix and Windows)
# fork - Not recommended due to thread-safety issues
# forkserver - On Unix platforms which support passing fds over Unix pipes
# '' - Uses the python defaults. Not recommended.
start_method = 'spawn'
# [OPTIONAL] If set to a non-zero integer, an HTTP healthcheck handler listens on
# the port number.
# Healthcheck requests are GET requests to 'http://localhost:<healthcheck>/'
# and return JSON: {"status": "ok"}
# Default is 8080
healthcheck = 8080
# [OPTIONAL] The server_config is an arbitrary dictionary that is available
# in the server instance as self.config and passed to server init()
# Use it for server-specific configuration.
server_config = {
'foo': 'bar'
}
# [OPTIONAL] The worker_config is an arbitrary dictionary that is available
# in the worker instance as self.config
# Use it for worker-specific configuration.
worker_config = {
'foo': 'bar'
}
| {
"content_hash": "9d4e9acde9c691cda14d4ffc691e1899",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 81,
"avg_line_length": 36.53333333333333,
"alnum_prop": 0.7541058394160584,
"repo_name": "irothschild/stefuna",
"id": "f9ab34b60c3a2e2263cabde72380b9f8316b0638",
"size": "2282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/hello_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30564"
}
],
"symlink_target": ""
} |
import wx
from wx.lib.floatcanvas import FloatCanvas as FC
from modder import MOD_REGISTRY
from modder.gui.graph.models import Block, ConnectorLine, CanvasMixin
class ModManagerFrame(wx.Frame, CanvasMixin):
def __init__(self):
wx.Frame.__init__(
self, None, -1, "FloatCanvas Graph Test", wx.DefaultPosition, (700, 700)
)
CanvasMixin.__init__(self)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.SetupUI()
self.Center()
def SetupLayout(self):
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(self.Canvas, 1, wx.EXPAND | wx.ALL, 10)
main_sizer.Add(self.ModList, 1, wx.EXPAND | wx.ALL, 10)
self.SetSizer(main_sizer)
main_sizer.Fit(self)
self.Layout()
def SetupUI(self):
self.Canvas = FC.FloatCanvas(
self, -1, (500, 300),
ProjectionFun=None,
Debug=0,
BackgroundColor='CanvasBackgoundColor'
)
self.ModList = wx.ListCtrl(
self, wx.ID_ANY, style=wx.LC_REPORT | wx.LC_HRULES
)
self.ModList.InsertColumn(0, 'Name', format=wx.LIST_FORMAT_LEFT, width=200)
self.ModList.InsertColumn(1, 'Events', format=wx.LIST_FORMAT_LEFT, width=200)
# self.ModList.InsertColumn(2, 'File', format=wx.LIST_FORMAT_LEFT, width=100)
blocks = []
lines = []
mods = {}
for index, (eventname, modlist) in enumerate(MOD_REGISTRY.items()):
event_blocks = filter(lambda o: o.Text == eventname, blocks)
if not any(event_blocks):
event_block = Block((0, index * 2), (6, 1), eventname)
blocks.append(event_block)
else:
event_block = list(event_blocks)[0]
for index, mod in enumerate(modlist):
mod_name = mod.__doc__ or mod.__name__
if not any(filter(lambda o: o.Text == mod_name, blocks)):
mod_block = Block((7, index * 2 - 1), (4, 1), mod_name)
lines.append(ConnectorLine(event_block, mod_block))
blocks.append(mod_block)
if mod not in mods:
mods[mod] = [eventname]
else:
mods[mod].append(eventname)
for mod, events in mods.items():
self.ModList.Append([mod.__doc__ or mod.__name__, ', '.join(events)])
self.Canvas.AddObjects(lines)
self.Canvas.AddObjects(blocks)
for block in blocks:
self.Canvas.AddObject(block)
block.Bind(FC.EVT_FC_ENTER_OBJECT, block.OnHover)
block.Bind(FC.EVT_FC_LEAVE_OBJECT, block.OffHover)
block.Bind(FC.EVT_FC_LEFT_DOWN, self.ObjectHit)
self.SetupLayout()
self.SetupCanvasMixin()
def OnClose(self, evt):
self.Hide()
| {
"content_hash": "c47f15e63fc5e54f025cc851c87fa312",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 85,
"avg_line_length": 34.951219512195124,
"alnum_prop": 0.5617585484996511,
"repo_name": "JokerQyou/Modder2",
"id": "85a303d3f7ed94a3bb715b564760240666664052",
"size": "2882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modder/gui/frames.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24412"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from collections import defaultdict
import datetime
import decimal
import json
import re
from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel
from moto.core.utils import unix_time
from .comparisons import get_comparison_func
class DynamoJsonEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'to_json'):
return obj.to_json()
def dynamo_json_dump(dynamo_object):
return json.dumps(dynamo_object, cls=DynamoJsonEncoder)
class DynamoType(object):
"""
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes
"""
def __init__(self, type_as_dict):
self.type = list(type_as_dict)[0]
self.value = list(type_as_dict.values())[0]
def __hash__(self):
return hash((self.type, self.value))
def __eq__(self, other):
return (
self.type == other.type and
self.value == other.value
)
def __lt__(self, other):
return self.value < other.value
def __le__(self, other):
return self.value <= other.value
def __gt__(self, other):
return self.value > other.value
def __ge__(self, other):
return self.value >= other.value
def __repr__(self):
return "DynamoType: {0}".format(self.to_json())
@property
def cast_value(self):
if self.type == 'N':
try:
return int(self.value)
except ValueError:
return float(self.value)
else:
return self.value
def to_json(self):
return {self.type: self.value}
def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.cast_value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.cast_value, *range_values)
class Item(BaseModel):
def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs):
self.hash_key = hash_key
self.hash_key_type = hash_key_type
self.range_key = range_key
self.range_key_type = range_key_type
self.attrs = {}
for key, value in attrs.items():
self.attrs[key] = DynamoType(value)
def __repr__(self):
return "Item: {0}".format(self.to_json())
def to_json(self):
attributes = {}
for attribute_key, attribute in self.attrs.items():
attributes[attribute_key] = {
attribute.type: attribute.value
}
return {
"Attributes": attributes
}
def describe_attrs(self, attributes):
if attributes:
included = {}
for key, value in self.attrs.items():
if key in attributes:
included[key] = value
else:
included = self.attrs
return {
"Item": included
}
def update(self, update_expression, expression_attribute_names, expression_attribute_values):
# Update subexpressions are identifiable by the operator keyword, so split on that and
# get rid of the empty leading string.
parts = [p for p in re.split(r'\b(SET|REMOVE|ADD|DELETE)\b', update_expression) if p]
# make sure that we correctly found only operator/value pairs
assert len(parts) % 2 == 0, "Mismatched operators and values in update expression: '{}'".format(update_expression)
for action, valstr in zip(parts[:-1:2], parts[1::2]):
values = valstr.split(',')
for value in values:
# A Real value
value = value.lstrip(":").rstrip(",").strip()
for k, v in expression_attribute_names.items():
value = re.sub(r'{0}\b'.format(k), v, value)
if action == "REMOVE":
self.attrs.pop(value, None)
elif action == 'SET':
key, value = value.split("=")
key = key.strip()
value = value.strip()
if value in expression_attribute_values:
self.attrs[key] = DynamoType(expression_attribute_values[value])
else:
self.attrs[key] = DynamoType({"S": value})
else:
raise NotImplementedError('{} update action not yet supported'.format(action))
def update_with_attribute_updates(self, attribute_updates):
for attribute_name, update_action in attribute_updates.items():
action = update_action['Action']
if action == 'DELETE' and 'Value' not in update_action:
if attribute_name in self.attrs:
del self.attrs[attribute_name]
continue
new_value = list(update_action['Value'].values())[0]
if action == 'PUT':
# TODO deal with other types
if isinstance(new_value, list) or isinstance(new_value, set):
self.attrs[attribute_name] = DynamoType({"SS": new_value})
elif isinstance(new_value, dict):
self.attrs[attribute_name] = DynamoType({"M": new_value})
elif update_action['Value'].keys() == ['N']:
self.attrs[attribute_name] = DynamoType({"N": new_value})
elif update_action['Value'].keys() == ['NULL']:
if attribute_name in self.attrs:
del self.attrs[attribute_name]
else:
self.attrs[attribute_name] = DynamoType({"S": new_value})
elif action == 'ADD':
if set(update_action['Value'].keys()) == set(['N']):
existing = self.attrs.get(
attribute_name, DynamoType({"N": '0'}))
self.attrs[attribute_name] = DynamoType({"N": str(
decimal.Decimal(existing.value) +
decimal.Decimal(new_value)
)})
else:
# TODO: implement other data types
raise NotImplementedError(
'ADD not supported for %s' % ', '.join(update_action['Value'].keys()))
class Table(BaseModel):
def __init__(self, table_name, schema=None, attr=None, throughput=None, indexes=None, global_indexes=None):
self.name = table_name
self.attr = attr
self.schema = schema
self.range_key_attr = None
self.hash_key_attr = None
self.range_key_type = None
self.hash_key_type = None
for elem in schema:
if elem["KeyType"] == "HASH":
self.hash_key_attr = elem["AttributeName"]
self.hash_key_type = elem["KeyType"]
else:
self.range_key_attr = elem["AttributeName"]
self.range_key_type = elem["KeyType"]
if throughput is None:
self.throughput = {
'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10}
else:
self.throughput = throughput
self.throughput["NumberOfDecreasesToday"] = 0
self.indexes = indexes
self.global_indexes = global_indexes if global_indexes else []
self.created_at = datetime.datetime.utcnow()
self.items = defaultdict(dict)
self.table_arn = self._generate_arn(table_name)
self.tags = []
def _generate_arn(self, name):
return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name
def describe(self, base_key='TableDescription'):
results = {
base_key: {
'AttributeDefinitions': self.attr,
'ProvisionedThroughput': self.throughput,
'TableSizeBytes': 0,
'TableName': self.name,
'TableStatus': 'ACTIVE',
'TableArn': self.table_arn,
'KeySchema': self.schema,
'ItemCount': len(self),
'CreationDateTime': unix_time(self.created_at),
'GlobalSecondaryIndexes': [index for index in self.global_indexes],
'LocalSecondaryIndexes': [index for index in self.indexes],
}
}
return results
def __len__(self):
count = 0
for key, value in self.items.items():
if self.has_range_key:
count += len(value)
else:
count += 1
return count
@property
def hash_key_names(self):
keys = [self.hash_key_attr]
for index in self.global_indexes:
hash_key = None
for key in index['KeySchema']:
if key['KeyType'] == 'HASH':
hash_key = key['AttributeName']
keys.append(hash_key)
return keys
@property
def range_key_names(self):
keys = [self.range_key_attr]
for index in self.global_indexes:
range_key = None
for key in index['KeySchema']:
if key['KeyType'] == 'RANGE':
range_key = keys.append(key['AttributeName'])
keys.append(range_key)
return keys
def put_item(self, item_attrs, expected=None, overwrite=False):
hash_value = DynamoType(item_attrs.get(self.hash_key_attr))
if self.has_range_key:
range_value = DynamoType(item_attrs.get(self.range_key_attr))
else:
range_value = None
item = Item(hash_value, self.hash_key_type, range_value,
self.range_key_type, item_attrs)
if not overwrite:
if expected is None:
expected = {}
lookup_range_value = range_value
else:
expected_range_value = expected.get(
self.range_key_attr, {}).get("Value")
if(expected_range_value is None):
lookup_range_value = range_value
else:
lookup_range_value = DynamoType(expected_range_value)
current = self.get_item(hash_value, lookup_range_value)
if current is None:
current_attr = {}
elif hasattr(current, 'attrs'):
current_attr = current.attrs
else:
current_attr = current
for key, val in expected.items():
if 'Exists' in val and val['Exists'] is False:
if key in current_attr:
raise ValueError("The conditional request failed")
elif key not in current_attr:
raise ValueError("The conditional request failed")
elif 'Value' in val and DynamoType(val['Value']).value != current_attr[key].value:
raise ValueError("The conditional request failed")
elif 'ComparisonOperator' in val:
comparison_func = get_comparison_func(
val['ComparisonOperator'])
dynamo_types = [DynamoType(ele) for ele in val[
"AttributeValueList"]]
for t in dynamo_types:
if not comparison_func(current_attr[key].value, t.value):
raise ValueError('The conditional request failed')
if range_value:
self.items[hash_value][range_value] = item
else:
self.items[hash_value] = item
return item
def __nonzero__(self):
return True
def __bool__(self):
return self.__nonzero__()
@property
def has_range_key(self):
return self.range_key_attr is not None
def get_item(self, hash_key, range_key=None):
if self.has_range_key and not range_key:
raise ValueError(
"Table has a range key, but no range key was passed into get_item")
try:
if range_key:
return self.items[hash_key][range_key]
if hash_key in self.items:
return self.items[hash_key]
raise KeyError
except KeyError:
return None
def delete_item(self, hash_key, range_key):
try:
if range_key:
return self.items[hash_key].pop(range_key)
else:
return self.items.pop(hash_key)
except KeyError:
return None
def query(self, hash_key, range_comparison, range_objs, limit,
exclusive_start_key, scan_index_forward, index_name=None, **filter_kwargs):
results = []
if index_name:
all_indexes = (self.global_indexes or []) + (self.indexes or [])
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
if index_name not in indexes_by_name:
raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % (
index_name, self.name, ', '.join(indexes_by_name.keys())
))
index = indexes_by_name[index_name]
try:
index_hash_key = [key for key in index[
'KeySchema'] if key['KeyType'] == 'HASH'][0]
except IndexError:
raise ValueError('Missing Hash Key. KeySchema: %s' %
index['KeySchema'])
possible_results = []
for item in self.all_items():
if not isinstance(item, Item):
continue
item_hash_key = item.attrs.get(index_hash_key['AttributeName'])
if item_hash_key and item_hash_key == hash_key:
possible_results.append(item)
else:
possible_results = [item for item in list(self.all_items()) if isinstance(
item, Item) and item.hash_key == hash_key]
if index_name:
try:
index_range_key = [key for key in index[
'KeySchema'] if key['KeyType'] == 'RANGE'][0]
except IndexError:
index_range_key = None
if range_comparison:
if index_name and not index_range_key:
raise ValueError(
'Range Key comparison but no range key found for index: %s' % index_name)
elif index_name:
for result in possible_results:
if result.attrs.get(index_range_key['AttributeName']).compare(range_comparison, range_objs):
results.append(result)
else:
for result in possible_results:
if result.range_key.compare(range_comparison, range_objs):
results.append(result)
if filter_kwargs:
for result in possible_results:
for field, value in filter_kwargs.items():
dynamo_types = [DynamoType(ele) for ele in value[
"AttributeValueList"]]
if result.attrs.get(field).compare(value['ComparisonOperator'], dynamo_types):
results.append(result)
if not range_comparison and not filter_kwargs:
# If we're not filtering on range key or on an index return all
# values
results = possible_results
if index_name:
if index_range_key:
results.sort(key=lambda item: item.attrs[index_range_key['AttributeName']].value
if item.attrs.get(index_range_key['AttributeName']) else None)
else:
results.sort(key=lambda item: item.range_key)
if scan_index_forward is False:
results.reverse()
scanned_count = len(list(self.all_items()))
results, last_evaluated_key = self._trim_results(results, limit,
exclusive_start_key)
return results, scanned_count, last_evaluated_key
def all_items(self):
for hash_set in self.items.values():
if self.range_key_attr:
for item in hash_set.values():
yield item
else:
yield hash_set
def scan(self, filters, limit, exclusive_start_key):
results = []
scanned_count = 0
for result in self.all_items():
scanned_count += 1
passes_all_conditions = True
for attribute_name, (comparison_operator, comparison_objs) in filters.items():
attribute = result.attrs.get(attribute_name)
if attribute:
# Attribute found
if not attribute.compare(comparison_operator, comparison_objs):
passes_all_conditions = False
break
elif comparison_operator == 'NULL':
# Comparison is NULL and we don't have the attribute
continue
else:
# No attribute found and comparison is no NULL. This item
# fails
passes_all_conditions = False
break
if passes_all_conditions:
results.append(result)
results, last_evaluated_key = self._trim_results(results, limit,
exclusive_start_key)
return results, scanned_count, last_evaluated_key
def _trim_results(self, results, limit, exclusive_start_key):
if exclusive_start_key is not None:
hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr))
range_key = exclusive_start_key.get(self.range_key_attr)
if range_key is not None:
range_key = DynamoType(range_key)
for i in range(len(results)):
if results[i].hash_key == hash_key and results[i].range_key == range_key:
results = results[i + 1:]
break
last_evaluated_key = None
if limit and len(results) > limit:
results = results[:limit]
last_evaluated_key = {
self.hash_key_attr: results[-1].hash_key
}
if results[-1].range_key is not None:
last_evaluated_key[self.range_key_attr] = results[-1].range_key
return results, last_evaluated_key
def lookup(self, *args, **kwargs):
if not self.schema:
self.describe()
for x, arg in enumerate(args):
kwargs[self.schema[x].name] = arg
ret = self.get_item(**kwargs)
if not ret.keys():
return None
return ret
class DynamoDBBackend(BaseBackend):
def __init__(self):
self.tables = OrderedDict()
def create_table(self, name, **params):
if name in self.tables:
return None
table = Table(name, **params)
self.tables[name] = table
return table
def delete_table(self, name):
return self.tables.pop(name, None)
def tag_resource(self, table_arn, tags):
for table in self.tables:
if self.tables[table].table_arn == table_arn:
self.tables[table].tags.extend(tags)
def list_tags_of_resource(self, table_arn):
required_table = None
for table in self.tables:
if self.tables[table].table_arn == table_arn:
required_table = self.tables[table]
return required_table.tags
def update_table_throughput(self, name, throughput):
table = self.tables[name]
table.throughput = throughput
return table
def update_table_global_indexes(self, name, global_index_updates):
table = self.tables[name]
gsis_by_name = dict((i['IndexName'], i) for i in table.global_indexes)
for gsi_update in global_index_updates:
gsi_to_create = gsi_update.get('Create')
gsi_to_update = gsi_update.get('Update')
gsi_to_delete = gsi_update.get('Delete')
if gsi_to_delete:
index_name = gsi_to_delete['IndexName']
if index_name not in gsis_by_name:
raise ValueError('Global Secondary Index does not exist, but tried to delete: %s' %
gsi_to_delete['IndexName'])
del gsis_by_name[index_name]
if gsi_to_update:
index_name = gsi_to_update['IndexName']
if index_name not in gsis_by_name:
raise ValueError('Global Secondary Index does not exist, but tried to update: %s' %
gsi_to_update['IndexName'])
gsis_by_name[index_name].update(gsi_to_update)
if gsi_to_create:
if gsi_to_create['IndexName'] in gsis_by_name:
raise ValueError(
'Global Secondary Index already exists: %s' % gsi_to_create['IndexName'])
gsis_by_name[gsi_to_create['IndexName']] = gsi_to_create
table.global_indexes = gsis_by_name.values()
return table
def put_item(self, table_name, item_attrs, expected=None, overwrite=False):
table = self.tables.get(table_name)
if not table:
return None
return table.put_item(item_attrs, expected, overwrite)
def get_table_keys_name(self, table_name, keys):
"""
Given a set of keys, extracts the key and range key
"""
table = self.tables.get(table_name)
if not table:
return None, None
else:
if len(keys) == 1:
for key in keys:
if key in table.hash_key_names:
return key, None
# for potential_hash, potential_range in zip(table.hash_key_names, table.range_key_names):
# if set([potential_hash, potential_range]) == set(keys):
# return potential_hash, potential_range
potential_hash, potential_range = None, None
for key in set(keys):
if key in table.hash_key_names:
potential_hash = key
elif key in table.range_key_names:
potential_range = key
return potential_hash, potential_range
def get_keys_value(self, table, keys):
if table.hash_key_attr not in keys or (table.has_range_key and table.range_key_attr not in keys):
raise ValueError(
"Table has a range key, but no range key was passed into get_item")
hash_key = DynamoType(keys[table.hash_key_attr])
range_key = DynamoType(
keys[table.range_key_attr]) if table.has_range_key else None
return hash_key, range_key
def get_table(self, table_name):
return self.tables.get(table_name)
def get_item(self, table_name, keys):
table = self.get_table(table_name)
if not table:
raise ValueError("No table found")
hash_key, range_key = self.get_keys_value(table, keys)
return table.get_item(hash_key, range_key)
def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts,
limit, exclusive_start_key, scan_index_forward, index_name=None, **filter_kwargs):
table = self.tables.get(table_name)
if not table:
return None, None
hash_key = DynamoType(hash_key_dict)
range_values = [DynamoType(range_value)
for range_value in range_value_dicts]
return table.query(hash_key, range_comparison, range_values, limit,
exclusive_start_key, scan_index_forward, index_name, **filter_kwargs)
def scan(self, table_name, filters, limit, exclusive_start_key):
table = self.tables.get(table_name)
if not table:
return None, None, None
scan_filters = {}
for key, (comparison_operator, comparison_values) in filters.items():
dynamo_types = [DynamoType(value) for value in comparison_values]
scan_filters[key] = (comparison_operator, dynamo_types)
return table.scan(scan_filters, limit, exclusive_start_key)
def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names,
expression_attribute_values, expected=None):
table = self.get_table(table_name)
if all([table.hash_key_attr in key, table.range_key_attr in key]):
# Covers cases where table has hash and range keys, ``key`` param
# will be a dict
hash_value = DynamoType(key[table.hash_key_attr])
range_value = DynamoType(key[table.range_key_attr])
elif table.hash_key_attr in key:
# Covers tables that have a range key where ``key`` param is a dict
hash_value = DynamoType(key[table.hash_key_attr])
range_value = None
else:
# Covers other cases
hash_value = DynamoType(key)
range_value = None
item = table.get_item(hash_value, range_value)
if item is None:
item_attr = {}
elif hasattr(item, 'attrs'):
item_attr = item.attrs
else:
item_attr = item
if not expected:
expected = {}
for key, val in expected.items():
if 'Exists' in val and val['Exists'] is False:
if key in item_attr:
raise ValueError("The conditional request failed")
elif key not in item_attr:
raise ValueError("The conditional request failed")
elif 'Value' in val and DynamoType(val['Value']).value != item_attr[key].value:
raise ValueError("The conditional request failed")
elif 'ComparisonOperator' in val:
comparison_func = get_comparison_func(
val['ComparisonOperator'])
dynamo_types = [DynamoType(ele) for ele in val[
"AttributeValueList"]]
for t in dynamo_types:
if not comparison_func(item_attr[key].value, t.value):
raise ValueError('The conditional request failed')
# Update does not fail on new items, so create one
if item is None:
data = {
table.hash_key_attr: {
hash_value.type: hash_value.value,
},
}
if range_value:
data.update({
table.range_key_attr: {
range_value.type: range_value.value,
}
})
table.put_item(data)
item = table.get_item(hash_value, range_value)
if update_expression:
item.update(update_expression, expression_attribute_names,
expression_attribute_values)
else:
item.update_with_attribute_updates(attribute_updates)
return item
def delete_item(self, table_name, keys):
table = self.get_table(table_name)
if not table:
return None
hash_key, range_key = self.get_keys_value(table, keys)
return table.delete_item(hash_key, range_key)
dynamodb_backend2 = DynamoDBBackend()
| {
"content_hash": "64a2b98f5bd1ab842f9d88a02474114d",
"timestamp": "",
"source": "github",
"line_count": 719,
"max_line_length": 122,
"avg_line_length": 38.378303198887345,
"alnum_prop": 0.5430890773356527,
"repo_name": "kefo/moto",
"id": "7590ee1e1bc75cebe7946cc2f12a367d21f550a9",
"size": "27594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/dynamodb2/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "712"
},
{
"name": "Python",
"bytes": "2996908"
},
{
"name": "Ruby",
"bytes": "188"
}
],
"symlink_target": ""
} |
import os, logging
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ejxy^_(!sf_)ps@#2dr*q+x2jkuv0rre3dlm$orh%1*pvj1_jz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
LOG = logging.basicConfig(filename='/tmp/myapp.log', level=logging.DEBUG)
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'customers',
'discounts',
'utils',
'products',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'website.urls'
STATIC_ROOT = '/home/docker/code/app/static'
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
TEST = False
if not TEST:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "e08d54650f81c41f698423edb5e097c1",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 91,
"avg_line_length": 26.78048780487805,
"alnum_prop": 0.6530054644808743,
"repo_name": "jroeland/teapot",
"id": "57ec30d45b9d0b3c8e190902d50a61a93f5f31a9",
"size": "3294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/web/app/website/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59770"
},
{
"name": "Shell",
"bytes": "2058"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0023_alter_page_revision_on_delete_behaviour'),
('wagtailimages', '0010_change_on_delete_behaviour'),
]
operations = [
migrations.CreateModel(
name='StaticPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', models.CharField(blank=True, max_length=250)),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('main_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| {
"content_hash": "e98df07a0c1a52e93ca7efc8ff9b055c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 191,
"avg_line_length": 36.483870967741936,
"alnum_prop": 0.6162687886825818,
"repo_name": "samuelleeuwenburg/Samplate",
"id": "2e340c670717f0b9c4fed27427c1a6179dee2a0c",
"size": "1203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "staticpage/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36"
},
{
"name": "HTML",
"bytes": "5986"
},
{
"name": "JavaScript",
"bytes": "29"
},
{
"name": "Python",
"bytes": "16990"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.views.generic.base import TemplateView
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
import core.models as coremodels
from django.views.generic.edit import CreateView, UpdateView
from sitegate.decorators import redirect_signedin, sitegate_view
# Create your views here.
class LandingView(TemplateView):
template_name = 'base/index.html'
class LocationListView(ListView):
model = coremodels.Location
template_name = 'location/list.html'
paginate_by = 1
class SearchListView(LocationListView):
def get_queryset(self):
incoming_query_string = self.request.GET.get('query', '')
return coremodels.Location.objects.filter(title__icontains=incoming_query_string)
class LocationDetailView(DetailView):
model = coremodels.Location
template_name = 'location/detail.html'
context_object_name = 'location'
def get_context_data(self, **kwargs):
context = super(LocationDetailView, self).get_context_data(**kwargs)
location = coremodels.Location.objects.get(id=self.kwargs['pk'])
if self.request.user.is_authenticated():
user_reviews = coremodels.Review.objects.filter(location=location, user=self.request.user)
if user_reviews.count() > 0:
context['user_review'] = user_reviews[0]
else:
context['user_review'] = None
return context
class LocationCreateView(CreateView):
model = coremodels.Location
template_name = 'base/form.html'
fields = "__all__"
class LocationUpdateView(UpdateView):
model = coremodels.Location
template_name = 'base/form.html'
fields = "__all__"
class ReviewCreateView(CreateView):
model = coremodels.Review
template_name = 'base/form.html'
fields =['description', 'rating']
def form_valid(self, form):
form.instance.user = self.request.user
form.instance.location = coremodels.Location.objects.get(id=self.kwargs['pk'])
return super(ReviewCreateView, self).form_valid(form)
def get_success_url(self):
return self.object.location.get_absolute_url()
class ReviewUpdateView(UpdateView):
model = coremodels.Review
template_name = 'base/form.html'
fields =['description', 'rating']
def get_object(self):
return coremodels.Review.objects.get(location__id=self.kwargs['pk'], user=self.request.user)
def get_success_url(self):
return self.object.location.get_absolute_url()
# from standard sitegate docs-signup functionality
@sitegate_view(widget_attrs={'class': 'form-control', 'placeholder': lambda f: f.label}, template='form_bootstrap3') # This also prevents logged in users from accessing our sign in/sign up page.
def entrance(request):
return render(request, 'base/entrance.html', {'title': 'Sign in & Sign up'})
| {
"content_hash": "f997fb57af16f810bad525f11a98745d",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 194,
"avg_line_length": 37.142857142857146,
"alnum_prop": 0.7181818181818181,
"repo_name": "georgebcservices/coffeedapp",
"id": "5a8012f84aad798e7a665af1cdbc85d209b68fb5",
"size": "3101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "145773"
},
{
"name": "HTML",
"bytes": "21323"
},
{
"name": "JavaScript",
"bytes": "102620"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23829"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._lots_operations import build_list_by_billing_account_request, build_list_by_billing_profile_request, build_list_by_customer_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LotsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.consumption.aio.ConsumptionManagementClient`'s
:attr:`lots` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_billing_profile(
self,
billing_account_id: str,
billing_profile_id: str,
**kwargs: Any
) -> AsyncIterable[_models.Lots]:
"""Lists all Azure credits for a billing account or a billing profile. The API is only supported
for Microsoft Customer Agreements (MCA) billing accounts.
:param billing_account_id: BillingAccount ID.
:type billing_account_id: str
:param billing_profile_id: Azure Billing Profile ID.
:type billing_profile_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Lots or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.consumption.models.Lots]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-10-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.Lots]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_billing_profile_request(
billing_account_id=billing_account_id,
billing_profile_id=billing_profile_id,
api_version=api_version,
template_url=self.list_by_billing_profile.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_by_billing_profile_request(
billing_account_id=billing_account_id,
billing_profile_id=billing_profile_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("Lots", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_billing_profile.metadata = {'url': "/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}/providers/Microsoft.Consumption/lots"} # type: ignore
@distributed_trace
def list_by_billing_account(
self,
billing_account_id: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable[_models.Lots]:
"""Lists all Microsoft Azure consumption commitments for a billing account. The API is only
supported for Microsoft Customer Agreements (MCA) and Direct Enterprise Agreement (EA) billing
accounts.
:param billing_account_id: BillingAccount ID.
:type billing_account_id: str
:param filter: May be used to filter the lots by Status, Source etc. The filter supports 'eq',
'lt', 'gt', 'le', 'ge', and 'and'. It does not currently support 'ne', 'or', or 'not'. Tag
filter is a key value pair string where key and value is separated by a colon (:). Default
value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Lots or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.consumption.models.Lots]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-10-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.Lots]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_billing_account_request(
billing_account_id=billing_account_id,
api_version=api_version,
filter=filter,
template_url=self.list_by_billing_account.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_by_billing_account_request(
billing_account_id=billing_account_id,
api_version=api_version,
filter=filter,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("Lots", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_billing_account.metadata = {'url': "/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/providers/Microsoft.Consumption/lots"} # type: ignore
@distributed_trace
def list_by_customer(
self,
billing_account_id: str,
customer_id: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable[_models.Lots]:
"""Lists all Azure credits for a customer. The API is only supported for Microsoft Partner
Agreements (MPA) billing accounts.
:param billing_account_id: BillingAccount ID.
:type billing_account_id: str
:param customer_id: Customer ID.
:type customer_id: str
:param filter: May be used to filter the lots by Status, Source etc. The filter supports 'eq',
'lt', 'gt', 'le', 'ge', and 'and'. Tag filter is a key value pair string where key and value is
separated by a colon (:). Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Lots or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.consumption.models.Lots]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-10-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.Lots]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_customer_request(
billing_account_id=billing_account_id,
customer_id=customer_id,
api_version=api_version,
filter=filter,
template_url=self.list_by_customer.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_by_customer_request(
billing_account_id=billing_account_id,
customer_id=customer_id,
api_version=api_version,
filter=filter,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("Lots", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_customer.metadata = {'url': "/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/customers/{customerId}/providers/Microsoft.Consumption/lots"} # type: ignore
| {
"content_hash": "9b4877d3495e471bb7e44dec4d0ea1db",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 201,
"avg_line_length": 45.09003215434084,
"alnum_prop": 0.6014404906225487,
"repo_name": "Azure/azure-sdk-for-python",
"id": "2aa39a2edb04854fe413f9b6d523691280236343",
"size": "14523",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/consumption/azure-mgmt-consumption/azure/mgmt/consumption/aio/operations/_lots_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from django.test import TestCase, Client
from django.core.urlresolvers import resolve
from django.contrib.auth.models import User
from allaccess.views import OAuthRedirect, OAuthCallback
from authentication.views import ForgotPasswordView, ResetPasswordView,\
ActivateAccountView
from authentication.emails import SendGrid
class UserLoginRouteTestCase(TestCase):
"""
Test that post and get requests to login routes is successful
"""
def setUp(self):
self.client = Client()
self.user = User.objects.create_user('johndoe',
'johndoe@gmail.com',
'12345')
def test_route_get_auth_login(self):
response = self.client.get('/login/')
self.assertEquals(response.status_code, 200)
def test_route_post_auth_login(self):
response = self.client.post('/login/',
dict(username='johndoe@gmail.com',
password='12345'))
self.assertEquals(response.status_code, 302)
class UserLogoutRouteTestCase(TestCase):
"""
Test that user can logout of session.
"""
def setUp(self):
self.client = Client()
self.user = User.objects.create_user('johndoe',
'johndoe@gmail.com',
'12345')
def test_route_get_auth_logout(self):
self.client.post(
'/login',
dict(
username='johndoe@gmail.com',
password='12345'
)
)
response = self.client.get('/logout/')
self.assertIsNone(response.context)
self.assertEquals(response.status_code, 302)
class ForgotPasswordRouteTestCase(TestCase):
def setUp(self):
self.client = Client()
def test_get_forgot_route_returns_200(self):
response = self.client.get('/recovery/')
self.assertEquals(response.status_code, 200)
def test_post_forgot_route_returns_200(self):
response = self.client.post(
'/recovery/',
{"email": "random@mail.com"}
)
self.assertEquals(response.status_code, 200)
def test_forgot_route_resolves_to_correct_view(self):
response = self.client.get('/recovery/')
self.assertEqual(
response.resolver_match.func.__name__,
ForgotPasswordView.as_view().__name__
)
class ResetPasswordRouteTestCase(TestCase):
def setUp(self):
self.client = Client()
def test_reset_route_resolves_to_correct_view(self):
response = self.client.get(
'/recovery/ajkzfYba9847DgJ7wbkwAaSbkTjUdawGG998qo3HG8qae83'
)
self.assertEqual(
response.resolver_match.func.__name__,
ResetPasswordView.as_view().__name__
)
class UserRegistrationRouteTest(TestCase):
"""
Test class to user registration.
"""
def setUp(self):
"""
User sign's up with data.
"""
self.client_stub = Client()
self.form_data = dict(
username="andela",
password1="andela",
password2="andela",
email="andela@andela.com",
)
def test_view_register_route(self):
"""
User register page is called.
"""
response = self.client_stub.get('/register/')
self.assertEquals(response.status_code, 200)
def test_view_reg_route(self):
"""
User is redirected after registration data is validated.
"""
response = self.client_stub.post('/register/', self.form_data)
self.assertEquals(response.status_code, 302)
def test_view_reg_success_route(self):
"""
User gets to view confirmation page after registration.
"""
response = self.client_stub.get('/confirm/')
self.assertEquals(response.status_code, 200)
def test_user_register_function_called(self):
"""
Test that register route binds to UserRegistrationView
"""
response = resolve('/register/')
self.assertEquals(response.func.__name__, 'UserRegistrationView')
class FacebookSignupTestCase(TestCase):
def setUp(self):
self.client_stub = Client()
def test_user_signup_via_facebook(self):
response = self.client_stub.post('/accounts/login/facebook/')
self.assertEqual(
response.resolver_match.func.__name__,
OAuthRedirect.as_view().__name__
)
def test_user_redirected_after_facebook_signup(self):
response = self.client_stub.post('/accounts/callback/facebook/')
self.assertEqual(
response.resolver_match.func.__name__,
OAuthCallback.as_view().__name__
)
class ActivateAccountRoute(TestCase):
def setUp(self):
self.client_stub = Client()
self.form_data = dict(
username="andela",
password1="andela",
password2="andela",
email="andela@andela.com",
)
def test_activation_link_calls_actual_view_class(self):
response = self.client.get(
'/activation/ajkzfYba9847DgJ7wbkwAaSbkTjUdawGG998qo3HG8qae83'
)
self.assertEqual(
response.resolver_match.func.__name__,
ActivateAccountView.as_view().__name__
)
def test_activation_successful(self):
"""
Successful activation triggers feedback.
"""
response = self.client_stub.post('/register/', self.form_data)
activation_hash_url = response.context[0]['activation_hash_url']
activation_hash = activation_hash_url.split('/')[-1]
response = self.client.get('/activation/%s' % activation_hash)
self.assertEquals(response.templates[0].name,
'authentication/activation_successful.html')
| {
"content_hash": "a95b32c53c45689851117aeda0bd7f56",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 73,
"avg_line_length": 30.932642487046632,
"alnum_prop": 0.5884422110552764,
"repo_name": "andela/troupon",
"id": "c00082a6a998fe05f31707388c57b77db08078c2",
"size": "5994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "troupon/authentication/tests/test_routes.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import os
import caffe
import yaml
from fast_rcnn.config import cfg
import numpy as np
import numpy.random as npr
from rpn.generate_anchors import generate_anchors
from utils.cython_bbox import bbox_overlaps
from fast_rcnn.bbox_transform import bbox_transform
DEBUG = False
class AnchorTargetLayer(caffe.Layer):
"""
Assign anchors to ground-truth targets. Produces anchor classification
labels and bounding-box regression targets.
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str_)
anchor_scales = layer_params.get('scales', (8, 16, 32))
self._anchors = generate_anchors(scales=np.array(anchor_scales))
self._num_anchors = self._anchors.shape[0]
self._feat_stride = layer_params['feat_stride']
if DEBUG:
print 'anchors:'
print self._anchors
print 'anchor shapes:'
print np.hstack((
self._anchors[:, 2::4] - self._anchors[:, 0::4],
self._anchors[:, 3::4] - self._anchors[:, 1::4],
))
self._counts = cfg.EPS
self._sums = np.zeros((1, 4))
self._squared_sums = np.zeros((1, 4))
self._fg_sum = 0
self._bg_sum = 0
self._count = 0
# allow boxes to sit over the edge by a small amount
self._allowed_border = layer_params.get('allowed_border', 0)
height, width = bottom[0].data.shape[-2:]
if DEBUG:
print 'AnchorTargetLayer: height', height, 'width', width
A = self._num_anchors
# labels
top[0].reshape(1, 1, A * height, width)
# bbox_targets
top[1].reshape(1, A * 4, height, width)
# bbox_inside_weights
top[2].reshape(1, A * 4, height, width)
# bbox_outside_weights
top[3].reshape(1, A * 4, height, width)
def forward(self, bottom, top):
# Algorithm:
#
# for each (H, W) location i
# generate 9 anchor boxes centered on cell i
# apply predicted bbox deltas at cell i to each of the 9 anchors
# filter out-of-image anchors
# measure GT overlap
assert bottom[0].data.shape[0] == 1, \
'Only single item batches are supported'
# map of shape (..., H, W)
height, width = bottom[0].data.shape[-2:]
# GT boxes (x1, y1, x2, y2, label)
gt_boxes = bottom[1].data
# im_info
im_info = bottom[2].data[0, :]
if DEBUG:
print ''
print 'im_size: ({}, {})'.format(im_info[0], im_info[1])
print 'scale: {}'.format(im_info[2])
print 'height, width: ({}, {})'.format(height, width)
print 'rpn: gt_boxes.shape', gt_boxes.shape
print 'rpn: gt_boxes', gt_boxes
# 1. Generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
all_anchors = (self._anchors.reshape((1, A, 4)) +
shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
total_anchors = int(K * A)
# only keep anchors inside the image
inds_inside = np.where(
(all_anchors[:, 0] >= -self._allowed_border) &
(all_anchors[:, 1] >= -self._allowed_border) &
(all_anchors[:, 2] < im_info[1] + self._allowed_border) & # width
(all_anchors[:, 3] < im_info[0] + self._allowed_border) # height
)[0]
if DEBUG:
print 'total_anchors', total_anchors
print 'inds_inside', len(inds_inside)
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
if DEBUG:
print 'anchors.shape', anchors.shape
# label: 1 is positive, 0 is negative, -1 is dont care
labels = np.empty((len(inds_inside), ), dtype=np.float32)
labels.fill(-1)
# overlaps between the anchors and the gt boxes
# overlaps (ex, gt)
overlaps = bbox_overlaps(
np.ascontiguousarray(anchors, dtype=np.float),
np.ascontiguousarray(gt_boxes, dtype=np.float))
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps,
np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels first so that positive labels can clobber them
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# fg label: for each gt, anchor with highest overlap
labels[gt_argmax_overlaps] = 1
# fg label: above threshold IOU
labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels last so that negative labels can clobber positives
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# subsample positive labels if we have too many
num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = npr.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False)
labels[disable_inds] = -1
# subsample negative labels if we have too many
num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)
bg_inds = np.where(labels == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = npr.choice(
bg_inds, size=(len(bg_inds) - num_bg), replace=False)
labels[disable_inds] = -1
#print "was %s inds, disabling %s, now %s inds" % (
#len(bg_inds), len(disable_inds), np.sum(labels == 0))
bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])
bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)
bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:
# uniform weighting of examples (given non-uniform sampling)
num_examples = np.sum(labels >= 0)
positive_weights = np.ones((1, 4)) * 1.0 / num_examples
negative_weights = np.ones((1, 4)) * 1.0 / num_examples
else:
assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &
(cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))
positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /
np.sum(labels == 1))
negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /
np.sum(labels == 0))
bbox_outside_weights[labels == 1, :] = positive_weights
bbox_outside_weights[labels == 0, :] = negative_weights
if DEBUG:
self._sums += bbox_targets[labels == 1, :].sum(axis=0)
self._squared_sums += (bbox_targets[labels == 1, :] ** 2).sum(axis=0)
self._counts += np.sum(labels == 1)
means = self._sums / self._counts
stds = np.sqrt(self._squared_sums / self._counts - means ** 2)
print 'means:'
print means
print 'stdevs:'
print stds
# map up to original set of anchors
labels = _unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)
bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)
bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)
if DEBUG:
print 'rpn: max max_overlap', np.max(max_overlaps)
print 'rpn: num_positive', np.sum(labels == 1)
print 'rpn: num_negative', np.sum(labels == 0)
self._fg_sum += np.sum(labels == 1)
self._bg_sum += np.sum(labels == 0)
self._count += 1
print 'rpn: num_positive avg', self._fg_sum / self._count
print 'rpn: num_negative avg', self._bg_sum / self._count
# labels
labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)
labels = labels.reshape((1, 1, A * height, width))
top[0].reshape(*labels.shape)
top[0].data[...] = labels
# bbox_targets
bbox_targets = bbox_targets \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
top[1].reshape(*bbox_targets.shape)
top[1].data[...] = bbox_targets
# bbox_inside_weights
bbox_inside_weights = bbox_inside_weights \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
assert bbox_inside_weights.shape[2] == height
assert bbox_inside_weights.shape[3] == width
top[2].reshape(*bbox_inside_weights.shape)
top[2].data[...] = bbox_inside_weights
# bbox_outside_weights
bbox_outside_weights = bbox_outside_weights \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
assert bbox_outside_weights.shape[2] == height
assert bbox_outside_weights.shape[3] == width
top[3].reshape(*bbox_outside_weights.shape)
top[3].data[...] = bbox_outside_weights
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count, ) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret
def _compute_targets(ex_rois, gt_rois):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
#---魔改造assert gt_rois.shape[1] == 5 # 4 + クラス数になる めんどい
return bbox_transform(ex_rois, gt_rois[:, :4]).astype(np.float32, copy=False)
| {
"content_hash": "f69c88685ba20a9cceed6894af8c413b",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 95,
"avg_line_length": 40.54744525547445,
"alnum_prop": 0.5650765076507651,
"repo_name": "AtsushiHashimoto/fujino_mthesis",
"id": "b90aa5546ebc5d5111e2f2282d67caa2d607a2e9",
"size": "11428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/frcnn/anchor_target_layer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Shell",
"bytes": "13458"
}
],
"symlink_target": ""
} |
"""HTTP API connector implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import json
import logging
from future.moves.urllib import parse as urlparse
from future.utils import iterkeys
import requests
from werkzeug import routing
from google.protobuf import json_format
from google.protobuf import symbol_database
from grr_api_client import connector
from grr_api_client import errors
from grr_api_client import utils
from grr_response_proto.api import reflection_pb2
logger = logging.getLogger(__name__)
class Error(Exception):
"""Base error class for HTTP connector."""
class HttpConnector(connector.Connector):
"""API connector implementation that works through HTTP API."""
JSON_PREFIX = ")]}\'\n"
DEFAULT_PAGE_SIZE = 50
DEFAULT_BINARY_CHUNK_SIZE = 66560
def __init__(self,
api_endpoint=None,
auth=None,
proxies=None,
verify=True,
cert=None,
trust_env=True,
page_size=None):
super(HttpConnector, self).__init__()
self.api_endpoint = api_endpoint
self.auth = auth
self.proxies = proxies
self.verify = verify
self.cert = cert
self.trust_env = trust_env
self._page_size = page_size or self.DEFAULT_PAGE_SIZE
self.csrf_token = None
self.api_methods = {}
def _GetCSRFToken(self):
logger.debug("Fetching CSRF token from %s...", self.api_endpoint)
with requests.Session() as session:
session.trust_env = self.trust_env
index_response = session.get(
self.api_endpoint,
auth=self.auth,
proxies=self.proxies,
verify=self.verify,
cert=self.cert)
self._CheckResponseStatus(index_response)
csrf_token = index_response.cookies.get("csrftoken")
if not csrf_token:
raise RuntimeError("Can't get CSRF token.")
logger.debug("Got CSRF token: %s", csrf_token)
return csrf_token
def _FetchRoutingMap(self):
headers = {
"x-csrftoken": self.csrf_token,
"x-requested-with": "XMLHttpRequest"
}
cookies = {"csrftoken": self.csrf_token}
url = "%s/%s" % (self.api_endpoint.strip("/"),
"api/v2/reflection/api-methods")
with requests.Session() as session:
session.trust_env = self.trust_env
response = session.get(
url,
headers=headers,
cookies=cookies,
auth=self.auth,
proxies=self.proxies,
verify=self.verify,
cert=self.cert)
self._CheckResponseStatus(response)
json_str = response.content[len(self.JSON_PREFIX):]
# Register descriptors in the database, so that all API-related
# protos are recognized when Any messages are unpacked.
utils.RegisterProtoDescriptors(symbol_database.Default())
proto = reflection_pb2.ApiListApiMethodsResult()
json_format.Parse(json_str, proto, ignore_unknown_fields=True)
routing_rules = []
self.api_methods = {}
for method in proto.items:
if not method.http_route.startswith("/api/v2/"):
method.http_route = method.http_route.replace("/api/", "/api/v2/", 1)
self.api_methods[method.name] = method
routing_rules.append(
routing.Rule(
method.http_route,
methods=method.http_methods,
endpoint=method.name))
self.handlers_map = routing.Map(routing_rules)
parsed_endpoint_url = urlparse.urlparse(self.api_endpoint)
self.urls = self.handlers_map.bind(
parsed_endpoint_url.netloc, url_scheme=parsed_endpoint_url.scheme)
def _InitializeIfNeeded(self):
if not self.csrf_token:
self.csrf_token = self._GetCSRFToken()
if not self.api_methods:
self._FetchRoutingMap()
def _CoerceValueToQueryStringType(self, field, value):
if isinstance(value, bool):
value = int(value)
elif field.enum_type:
value = field.enum_type.values_by_number[value].name.lower()
return value
def _GetMethodUrlAndPathParamsNames(self, handler_name, args):
path_params = {}
if args:
for field, value in args.ListFields():
if self.handlers_map.is_endpoint_expecting(handler_name, field.name):
path_params[field.name] = self._CoerceValueToQueryStringType(
field, value)
url = self.urls.build(handler_name, path_params, force_external=True)
method = None
for rule in self.handlers_map.iter_rules():
if rule.endpoint == handler_name:
method = [m for m in rule.methods if m != "HEAD"][0]
if not method:
raise RuntimeError("Can't find method for %s" % handler_name)
return method, url, list(iterkeys(path_params))
def _ArgsToQueryParams(self, args, exclude_names):
if not args:
return {}
# Using OrderedDict guarantess stable order of query parameters in the
# generated URLs.
result = collections.OrderedDict()
for field, value in sorted(args.ListFields(), key=lambda f: f[0].name):
if field.name not in exclude_names:
result[field.name] = self._CoerceValueToQueryStringType(field, value)
return result
def _ArgsToBody(self, args, exclude_names):
if not args:
return None
args_copy = utils.CopyProto(args)
for name in exclude_names:
args_copy.ClearField(name)
return json_format.MessageToJson(args_copy)
def _CheckResponseStatus(self, response):
if response.status_code == 200:
return
content = response.content
json_str = content[len(self.JSON_PREFIX):]
try:
# TODO(hanuszczak): `json` package should not be used.
parsed_json = json.loads(json_str)
message = parsed_json["message"] + "\n" + parsed_json.get("traceBack", "")
except (ValueError, KeyError):
message = content
if response.status_code == 403:
raise errors.AccessForbiddenError(message)
elif response.status_code == 404:
raise errors.ResourceNotFoundError(message)
elif response.status_code == 422:
raise errors.InvalidArgumentError(message)
elif response.status_code == 501:
raise errors.ApiNotImplementedError(message)
else:
raise errors.UnknownError(message)
def BuildRequest(self, method_name, args):
self._InitializeIfNeeded()
method, url, path_params_names = self._GetMethodUrlAndPathParamsNames(
method_name, args)
if method == "GET":
body = None
query_params = self._ArgsToQueryParams(args, path_params_names)
else:
body = self._ArgsToBody(args, path_params_names)
query_params = {}
headers = {
"x-csrftoken": self.csrf_token,
"x-requested-with": "XMLHttpRequest"
}
cookies = {"csrftoken": self.csrf_token}
return requests.Request(
method,
url,
data=body,
params=query_params,
headers=headers,
cookies=cookies,
auth=self.auth)
@property
def page_size(self):
return self._page_size
def SendRequest(self, handler_name, args):
self._InitializeIfNeeded()
method_descriptor = self.api_methods[handler_name]
request = self.BuildRequest(method_descriptor.name, args)
prepped_request = request.prepare()
with requests.Session() as session:
session.trust_env = self.trust_env
options = session.merge_environment_settings(prepped_request.url,
self.proxies or {}, None,
self.verify, self.cert)
response = session.send(prepped_request, **options)
self._CheckResponseStatus(response)
content = response.content
json_str = content[len(self.JSON_PREFIX):]
if method_descriptor.result_type_descriptor.name:
default_value = method_descriptor.result_type_descriptor.default
result = utils.TypeUrlToMessage(default_value.type_url)
json_format.Parse(json_str, result, ignore_unknown_fields=True)
return result
def SendStreamingRequest(self, handler_name, args):
self._InitializeIfNeeded()
method_descriptor = self.api_methods[handler_name]
request = self.BuildRequest(method_descriptor.name, args)
prepped_request = request.prepare()
session = requests.Session()
session.trust_env = self.trust_env
options = session.merge_environment_settings(prepped_request.url,
self.proxies or {}, None,
self.verify, self.cert)
options["stream"] = True
response = session.send(prepped_request, **options)
self._CheckResponseStatus(response)
def GenerateChunks():
for chunk in response.iter_content(self.DEFAULT_BINARY_CHUNK_SIZE):
yield chunk
def Close():
response.close()
session.close()
return utils.BinaryChunkIterator(chunks=GenerateChunks(), on_close=Close)
| {
"content_hash": "f2b31fc280dee9b296bebd58d6b311fb",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 80,
"avg_line_length": 30.344594594594593,
"alnum_prop": 0.6505232687597418,
"repo_name": "dunkhong/grr",
"id": "976b6307e432c753376afe3a635c409bdeb5ed49",
"size": "9004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api_client/python/grr_api_client/connectors/http_connector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "882"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "36745"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "193751"
},
{
"name": "JavaScript",
"bytes": "12795"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7430923"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "49155"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "10560"
},
{
"name": "TypeScript",
"bytes": "56756"
}
],
"symlink_target": ""
} |
import azure.mgmt.resource.resources as resources
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.rdbms import postgresql
from msrestazure.azure_exceptions import CloudError
from common.methods import set_progress
from resourcehandlers.azure_arm.models import AzureARMHandler
RESOURCE_IDENTIFIER = "azure_database_name"
def get_tenant_id_for_azure(handler):
'''
Handling Azure RH table changes for older and newer versions (> 9.4.5)
'''
if hasattr(handler,"azure_tenant_id"):
return handler.azure_tenant_id
return handler.tenant_id
def discover_resources(**kwargs):
discovered_azure_sql = []
for handler in AzureARMHandler.objects.all():
set_progress(
"Connecting to Azure sql \
DB for handler: {}".format(
handler
)
)
credentials = ServicePrincipalCredentials(
client_id=handler.client_id, secret=handler.secret, tenant=get_tenant_id_for_azure(handler)
)
azure_client = postgresql.PostgreSQLManagementClient(
credentials, handler.serviceaccount
)
azure_resources_client = resources.ResourceManagementClient(
credentials, handler.serviceaccount
)
for resource_group in azure_resources_client.resource_groups.list():
for server in azure_client.servers.list()._get_next().json()["value"]:
try:
for db in azure_client.databases.list_by_server(
resource_group.name, server["name"]
):
if db.name in [
"information_schema",
"performance_schema",
"postgres",
]:
continue
discovered_azure_sql.append(
{
"name": server["name"],
"azure_server_name": server["name"],
"azure_database_name": db.name,
"resource_group_name": resource_group.name,
"azure_rh_id": handler.id,
}
)
except CloudError as e:
set_progress("Azure CloudError: {}".format(e))
continue
return discovered_azure_sql
| {
"content_hash": "d735e357163425f6e0dac950d05d9262",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 103,
"avg_line_length": 36.69117647058823,
"alnum_prop": 0.5346693386773547,
"repo_name": "CloudBoltSoftware/cloudbolt-forge",
"id": "3708a718c62cba309e70d694c028dea38aab2085",
"size": "2495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blueprints/azure_postgres/sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1665"
},
{
"name": "HTML",
"bytes": "165828"
},
{
"name": "JavaScript",
"bytes": "1871"
},
{
"name": "PowerShell",
"bytes": "5779"
},
{
"name": "Python",
"bytes": "1742154"
},
{
"name": "Shell",
"bytes": "16836"
}
],
"symlink_target": ""
} |
"""Compute NeighborRank for nodes in a graph."""
from citenet import util
def neighborrank(graph, amount=100, neighborhood_depth=2):
"""Compute the NeighborRank of the top ``amount`` nodes in graph,
using the specified neighborhood_depth."""
# Get top n nodes with highest outdegree (most often cited).
nodes = util.top_n_from_dict(graph.out_degree(), amount)
# Find neighborhood sizes.
nhood_sizes = {}
for root in nodes:
# Neighborhood begins with just the root.
nhood = set([root])
# Expand the neighborhood repeatedly until the depth is reached.
for _ in range(neighborhood_depth):
prev_nhood = nhood.copy()
for node in prev_nhood:
nhood |= set(graph.predecessors(node))
# Update the results dict.
nhood_sizes[root] = len(nhood)
return nhood_sizes
| {
"content_hash": "a4bb0deb853413efc5dc46bfc507465a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 72,
"avg_line_length": 36.5,
"alnum_prop": 0.6449771689497716,
"repo_name": "Pringley/citenet",
"id": "f26b08220ccc852687563af163fbd20fe506c9bb",
"size": "876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "citenet/neighborrank.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20675"
}
],
"symlink_target": ""
} |
'''
@author: Fyzel@users.noreply.github.com
@copyright: 2017 Englesh.org. All rights reserved.
@license: https://github.com/Fyzel/weather-data-flaskapi/blob/master/LICENSE
@contact: Fyzel@users.noreply.github.com
@deffield updated: 2017-06-14
'''
import json
import logging
import random
import sys
import string
import unittest
from datetime import datetime
import pytz
import requests
def get_random_datetime():
'''Return a random datetime from 0001-01-01 00:00:00.0 to 9999-12-28 23:59:59.999999'''
year = random.randint(1000, datetime.now().year)
month = random.randint(1, 12)
day = random.randint(1, 28)
hour = random.randint(0, 23)
minute = random.randint(0, 59)
second = random.randint(0, 59)
return datetime(year=year,
month=month,
day=day,
hour=hour,
minute=minute,
second=second,
tzinfo=pytz.UTC)
def get_random_string(length: int) -> str:
'''Generate a random string of length.
:arg length: The length of the string
:rtype str
'''
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))
def get_random_record_data():
'''Generate a random pressure data record'''
value = float('{:.4f}'.format(random.uniform(0, 100.0)))
value_units = get_random_string(16)
value_error_range = float('{:.6f}'.format(random.uniform(0.0, 1.0)))
timestamp = get_random_datetime()
latitude = float('{:.6f}'.format(random.uniform(-90.0, 90.0)))
longitude = float('{:.6f}'.format(random.uniform(-180.0, 180.0)))
elevation = float('{:.4f}'.format(random.uniform(-90.0, 999.0)))
elevation_units = get_random_string(16)
city = 'Edmonton'
province = 'AB'
country = 'CA'
return {
'value': value,
'value_units': value_units,
'value_error_range': value_error_range,
'latitude': latitude,
'longitude': longitude,
'elevation': elevation,
'elevation_units': elevation_units,
'timestamp': timestamp,
'city': city,
'province': province,
'country': country
}
class TestCaseProtectedPressure(unittest.TestCase):
@property
def token(self):
return self._token
@token.setter
def token(self, value):
self._token = value
@property
def previous_record(self):
return self._previous_record
@previous_record.setter
def previous_record(self, value):
self._previous_record = value
@property
def last_id(self):
return self._last_id
@last_id.setter
def last_id(self, value: int):
self._last_id = value
def setUp(self):
'''
Configure these to target the environment being tested. Sample values provided.
'''
self.base_url = 'http://localhost.localdomain:5000'
self.context = 'weather'
self.resource = 'protected/pressure'
self.username = 'admin'
self.password = 'secret'
self._token = None
self._last_id = None
def tearDown(self):
pass
def test_step_00_login_fail(self):
'''Test the login fail capability'''
log = logging.getLogger('TestCase.test_step_00_login_fail')
log.info('Start')
auth_url = '{base_url}/auth'.format(
base_url=self.base_url
)
payload = '{{"username": "{username_value}","password": "{password_value}"}}'.format(
username_value=self.username[::-1],
password_value=self.password[::-1]
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('auth_url= {url}'.format(url=auth_url))
log.debug('username= {username}'.format(username=self.username[::-1])) # reversed username
log.debug('password= {password}'.format(password=self.password[::-1])) # reversed password
log.debug('payload= {payload}'.format(payload=payload))
headers = {
'content-type': 'application/json',
'cache-control': 'no-cache'
}
response = requests.request('POST', auth_url, data=payload, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=401)
)
assert response.status_code == 401, 'Expected a HTTP status code 401'
assert len(response.text) > 0, 'Expected data in the response'
log.debug('response.text= {text}'.format(text=response.text))
log.info('End')
def test_step_01_login(self):
'''Test the login capability and setup for the next set of calls'''
log = logging.getLogger('TestCase.test_step_01_login')
log.info('Start')
log.debug('base_url= %r', self.base_url)
auth_url = '{base_url}/auth'.format(
base_url=self.base_url
)
payload = '{{"username": "{username_value}","password": "{password_value}"}}'.format(
username_value=self.username,
password_value=self.password
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('auth_url= {url}'.format(url=auth_url))
log.debug('username= {username}'.format(username=self.username[::-1]))
log.debug('password= {password}'.format(password=self.password[::-1]))
log.debug('payload= {payload}'.format(payload=payload))
headers = {
'content-type': 'application/json',
'cache-control': 'no-cache'
}
response = requests.request('POST', auth_url, data=payload, headers=headers)
assert response.status_code == 200, 'Expected a HTTP status code 200'
assert len(response.text) > 0, 'Expected data in the response'
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=200)
)
log.debug('response.text= {text}'.format(text=response.text))
json_data = json.loads(response.text)
assert json_data['access_token'] is not None, 'Access token is not returned'
TestCaseProtectedPressure.token = json_data['access_token']
log.debug('JWT token= {token}'.format(token=self.token))
log.info('End')
def test_step_02_create_record_without_auth(self):
'''Create a pressure record without JWT token.'''
log = logging.getLogger('TestCase.test_step_02_create_record_without_auth')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('auth_url= {url}'.format(url=app_url))
record_data = get_random_record_data()
payload = '{{\n' \
'"value": {value},\n' \
'"value_units": "{value_units}",\n' \
'"value_error_range": {value_error_range},\n' \
'"latitude": {latitude},\n' \
'"latitude_public": {latitude_public},\n' \
'"longitude": {longitude},\n' \
'"longitude_public": {longitude_public},\n' \
'"city": "{city}",\n' \
'"province": "{province}",\n' \
'"country": "{country}",\n' \
'"elevation": {elevation},\n' \
'"elevation_units": "{elevation_units}",\n' \
'"timestamp": "{timestamp}"\n' \
'}}'.format(value=record_data['value'],
value_units=record_data['value_units'],
value_error_range=record_data['value_error_range'],
timestamp=str(record_data['timestamp'].strftime('%Y-%m-%dT%H:%M:%S')),
latitude=record_data['latitude'],
latitude_public=float(int(record_data['latitude'] * 1000)) / 1000,
longitude=record_data['longitude'],
longitude_public=float(int(record_data['longitude'] * 1000)) / 1000,
city=record_data['city'],
province=record_data['province'],
country=record_data['country'],
elevation=record_data['elevation'],
elevation_units=record_data['elevation_units'])
log.debug('payload= {payload}'.format(payload=payload))
headers = {
'content-type': 'application/json',
'cache-control': 'no-cache'
}
response = requests.request('POST', app_url, data=payload, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=401)
)
assert response.status_code == 401, 'Expected a HTTP status code 401'
assert len(response.text) > 0, 'Expected data in the response'
log.info('End')
def test_step_03_create_record_with_auth(self):
'''Create a pressure record with JWT token.'''
log = logging.getLogger('TestCase.test_step_03_create_record_with_auth')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
record_data = get_random_record_data()
payload = '{{\n' \
'"value": {value},\n' \
'"value_units": "{value_units}",\n' \
'"value_error_range": {value_error_range},\n' \
'"latitude": {latitude},\n' \
'"latitude_public": {latitude_public},\n' \
'"longitude": {longitude},\n' \
'"longitude_public": {longitude_public},\n' \
'"city": "{city}",\n' \
'"province": "{province}",\n' \
'"country": "{country}",\n' \
'"elevation": {elevation},\n' \
'"elevation_units": "{elevation_units}",\n' \
'"timestamp": "{timestamp}"\n' \
'}}'.format(value=record_data['value'],
value_units=record_data['value_units'],
value_error_range=record_data['value_error_range'],
timestamp=str(record_data['timestamp'].strftime('%Y-%m-%dT%H:%M:%S')),
latitude=record_data['latitude'],
latitude_public=float(int(record_data['latitude'] * 1000)) / 1000,
longitude=record_data['longitude'],
longitude_public=float(int(record_data['longitude'] * 1000)) / 1000,
city=record_data['city'],
province=record_data['province'],
country=record_data['country'],
elevation=record_data['elevation'],
elevation_units=record_data['elevation_units'])
log.debug('payload= {payload}'.format(payload=payload))
headers = {
'content-type': 'application/json',
'authorization': 'JWT {token}'.format(token=TestCaseProtectedPressure.token),
'cache-control': 'no-cache'
}
response = requests.request('POST', app_url, data=payload, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=201)
)
assert response.status_code == 201, 'Expected a HTTP status code 201'
assert len(response.text) > 0, 'Expected data in the response'
json_data = json.loads(response.text)
assert int(json_data['id']) > 0, 'Returned id is greater than 0'
self.assertEqual(
json_data['value'],
record_data['value']), 'Returned value is the same'
self.assertEqual(
json_data['value_units'],
record_data['value_units']), 'Returned value_units is the same'
self.assertEqual(
json_data['value_error_range'],
record_data['value_error_range']), 'Returned value_error_range is the same'
self.assertEqual(
json_data['timestamp'],
str(record_data['timestamp'].strftime('%Y-%m-%dT%H:%M:%S'))), 'Returned timestamp is the same'
self.assertEqual(
json_data['elevation'],
record_data['elevation']), 'Returned elevation is the same'
self.assertEqual(
json_data['elevation_units'],
record_data['elevation_units']), 'Returned elevation_units is the same'
self.assertEqual(
json_data['latitude'],
record_data['latitude']), 'Returned latitude is the same'
self.assertEqual(
json_data['latitude_public'],
float(int(record_data['latitude'] * 1000)) / 1000), 'Returned latitude_public is the same'
self.assertEqual(
json_data['longitude'],
record_data['longitude']), 'Returned longitude is the same'
self.assertEqual(
json_data['longitude_public'],
float(int(record_data['longitude'] * 1000)) / 1000), 'Returned longitude_public is the same'
self.assertEqual(
json_data['city'],
record_data['city']), 'Returned city is the same'
self.assertEqual(
json_data['province'],
record_data['province']), 'Returned province is the same'
self.assertEqual(
json_data['country'],
record_data['country']), 'Returned country is the same'
TestCaseProtectedPressure.last_id = json_data['id']
TestCaseProtectedPressure.previous_record = json_data
log.info('End')
def test_step_03_1_create_record_with_auth_out_of_range_latitude(self):
'''Create a pressure record with JWT token but with out of range latitude.'''
log = logging.getLogger('TestCase.test_step_03_1_create_record_with_auth_out_of_range_latitude')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
record_data = get_random_record_data()
record_data['latitude'] = -91
payload = '{{\n' \
'"value": {value},\n' \
'"value_units": "{value_units}",\n' \
'"value_error_range": {value_error_range},\n' \
'"latitude": {latitude},\n' \
'"latitude_public": {latitude_public},\n' \
'"longitude": {longitude},\n' \
'"longitude_public": {longitude_public},\n' \
'"city": "{city}",\n' \
'"province": "{province}",\n' \
'"country": "{country}",\n' \
'"elevation": {elevation},\n' \
'"elevation_units": "{elevation_units}",\n' \
'"timestamp": "{timestamp}"\n' \
'}}'.format(value=record_data['value'],
value_units=record_data['value_units'],
value_error_range=record_data['value_error_range'],
timestamp=str(record_data['timestamp'].strftime('%Y-%m-%dT%H:%M:%S')),
latitude=record_data['latitude'],
latitude_public=float(int(record_data['latitude'] * 1000)) / 1000,
longitude=record_data['longitude'],
longitude_public=float(int(record_data['longitude'] * 1000)) / 1000,
city=record_data['city'],
province=record_data['province'],
country=record_data['country'],
elevation=record_data['elevation'],
elevation_units=record_data['elevation_units'])
log.debug('payload= {payload}'.format(payload=payload))
headers = {
'content-type': 'application/json',
'authorization': 'JWT {token}'.format(token=TestCaseProtectedPressure.token),
'cache-control': 'no-cache'
}
response = requests.request('POST', app_url, data=payload, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=400)
)
assert response.status_code == 400, 'Expected a HTTP status code 400'
assert len(response.text) > 0, 'Expected data in the response'
log.info('End')
def test_step_03_2_create_record_with_auth_out_of_range_latitude(self):
'''Create a pressure record with JWT token but with out of range latitude.'''
log = logging.getLogger('TestCase.test_step_03_2_create_record_with_auth_out_of_range_latitude')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
record_data = get_random_record_data()
record_data['latitude'] = 91
payload = '{{\n' \
'"value": {value},\n' \
'"value_units": "{value_units}",\n' \
'"value_error_range": {value_error_range},\n' \
'"latitude": {latitude},\n' \
'"latitude_public": {latitude_public},\n' \
'"longitude": {longitude},\n' \
'"longitude_public": {longitude_public},\n' \
'"city": "{city}",\n' \
'"province": "{province}",\n' \
'"country": "{country}",\n' \
'"elevation": {elevation},\n' \
'"elevation_units": "{elevation_units}",\n' \
'"timestamp": "{timestamp}"\n' \
'}}'.format(value=record_data['value'],
value_units=record_data['value_units'],
value_error_range=record_data['value_error_range'],
timestamp=str(record_data['timestamp'].strftime('%Y-%m-%dT%H:%M:%S')),
latitude=record_data['latitude'],
latitude_public=float(int(record_data['latitude'] * 1000)) / 1000,
longitude=record_data['longitude'],
longitude_public=float(int(record_data['longitude'] * 1000)) / 1000,
city=record_data['city'],
province=record_data['province'],
country=record_data['country'],
elevation=record_data['elevation'],
elevation_units=record_data['elevation_units'])
log.debug('payload= {payload}'.format(payload=payload))
headers = {
'content-type': 'application/json',
'authorization': 'JWT {token}'.format(token=TestCaseProtectedPressure.token),
'cache-control': 'no-cache'
}
response = requests.request('POST', app_url, data=payload, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=400)
)
assert response.status_code == 400, 'Expected a HTTP status code 400'
assert len(response.text) > 0, 'Expected data in the response'
log.info('End')
def test_step_03_3_create_record_with_auth_out_of_range_longitude(self):
'''Create a pressure record with JWT token but with out of range longitude.'''
log = logging.getLogger('TestCase.test_step_03_3_create_record_with_auth_out_of_range_longitude')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
record_data = get_random_record_data()
record_data['longitude'] = -181
payload = '{{\n' \
'"value": {value},\n' \
'"value_units": "{value_units}",\n' \
'"value_error_range": {value_error_range},\n' \
'"latitude": {latitude},\n' \
'"latitude_public": {latitude_public},\n' \
'"longitude": {longitude},\n' \
'"longitude_public": {longitude_public},\n' \
'"city": "{city}",\n' \
'"province": "{province}",\n' \
'"country": "{country}",\n' \
'"elevation": {elevation},\n' \
'"elevation_units": "{elevation_units}",\n' \
'"timestamp": "{timestamp}"\n' \
'}}'.format(value=record_data['value'],
value_units=record_data['value_units'],
value_error_range=record_data['value_error_range'],
timestamp=str(record_data['timestamp'].strftime('%Y-%m-%dT%H:%M:%S')),
latitude=record_data['latitude'],
latitude_public=float(int(record_data['latitude'] * 1000)) / 1000,
longitude=record_data['longitude'],
longitude_public=float(int(record_data['longitude'] * 1000)) / 1000,
city=record_data['city'],
province=record_data['province'],
country=record_data['country'],
elevation=record_data['elevation'],
elevation_units=record_data['elevation_units'])
log.debug('payload= {payload}'.format(payload=payload))
headers = {
'content-type': 'application/json',
'authorization': 'JWT {token}'.format(token=TestCaseProtectedPressure.token),
'cache-control': 'no-cache'
}
response = requests.request('POST', app_url, data=payload, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=400)
)
assert response.status_code == 400, 'Expected a HTTP status code 400'
assert len(response.text) > 0, 'Expected data in the response'
log.info('End')
def test_step_03_4_create_record_with_auth_out_of_range_longitude(self):
'''Create a pressure record with JWT token but with out of range latitude.'''
log = logging.getLogger('TestCase.test_step_03_4_create_record_with_auth_out_of_range_longitude')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
record_data = get_random_record_data()
record_data['longitude'] = 181
payload = '{{\n' \
'"value": {value},\n' \
'"value_units": "{value_units}",\n' \
'"value_error_range": {value_error_range},\n' \
'"latitude": {latitude},\n' \
'"latitude_public": {latitude_public},\n' \
'"longitude": {longitude},\n' \
'"longitude_public": {longitude_public},\n' \
'"city": "{city}",\n' \
'"province": "{province}",\n' \
'"country": "{country}",\n' \
'"elevation": {elevation},\n' \
'"elevation_units": "{elevation_units}",\n' \
'"timestamp": "{timestamp}"\n' \
'}}'.format(value=record_data['value'],
value_units=record_data['value_units'],
value_error_range=record_data['value_error_range'],
timestamp=str(record_data['timestamp'].strftime('%Y-%m-%dT%H:%M:%S')),
latitude=record_data['latitude'],
latitude_public=float(int(record_data['latitude'] * 1000)) / 1000,
longitude=record_data['longitude'],
longitude_public=float(int(record_data['longitude'] * 1000)) / 1000,
city=record_data['city'],
province=record_data['province'],
country=record_data['country'],
elevation=record_data['elevation'],
elevation_units=record_data['elevation_units'])
log.debug('payload= {payload}'.format(payload=payload))
headers = {
'content-type': 'application/json',
'authorization': 'JWT {token}'.format(token=TestCaseProtectedPressure.token),
'cache-control': 'no-cache'
}
response = requests.request('POST', app_url, data=payload, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=400)
)
assert response.status_code == 400, 'Expected a HTTP status code 400'
assert len(response.text) > 0, 'Expected data in the response'
log.info('End')
def test_step_04_get_record_without_auth(self):
'''Get a pressure record without JWT token.'''
log = logging.getLogger('TestCase.test_step_04_get_record_without_auth')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/{last_id}'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource,
last_id=TestCaseProtectedPressure.last_id
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
headers = {
'cache-control': 'no-cache'
}
response = requests.request('GET', app_url, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=401)
)
assert response.status_code == 401, 'Expected a HTTP status code 401'
log.info('End')
def test_step_05_get_record_with_auth(self):
'''Get a pressure record with JWT token.'''
log = logging.getLogger('TestCase.test_step_05_get_record_with_auth')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/{last_id}'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource,
last_id=TestCaseProtectedPressure.last_id
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
headers = {
'authorization': 'JWT {token}'.format(token=TestCaseProtectedPressure.token),
'cache-control': 'no-cache'
}
response = requests.request('GET', app_url, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=200)
)
assert response.status_code == 200, 'Expected a HTTP status code 200'
json_data = json.loads(response.text)
self.assertEqual(
int(json_data['id']),
int(TestCaseProtectedPressure.last_id)), 'Returned id is the same'
self.assertEqual(
json_data['value'],
TestCaseProtectedPressure.previous_record['value']), 'Returned value is the same'
self.assertEqual(
json_data['value_units'],
TestCaseProtectedPressure.previous_record['value_units']), 'Returned value_units is the same'
self.assertEqual(
json_data['value_error_range'],
TestCaseProtectedPressure.previous_record['value_error_range']), 'Returned value_error_range is the same'
self.assertEqual(
json_data['timestamp'],
TestCaseProtectedPressure.previous_record['timestamp']), 'Returned timestamp is the same'
self.assertEqual(
json_data['elevation'],
TestCaseProtectedPressure.previous_record['elevation']), 'Returned elevation is the same'
self.assertEqual(
json_data['elevation_units'],
TestCaseProtectedPressure.previous_record['elevation_units']), 'Returned elevation_units is the same'
self.assertEqual(
json_data['latitude'],
TestCaseProtectedPressure.previous_record['latitude']), 'Returned latitude is the same'
self.assertEqual(
json_data['latitude_public'],
TestCaseProtectedPressure.previous_record['latitude_public']), 'Returned latitude_public is the same'
self.assertEqual(
json_data['longitude'],
TestCaseProtectedPressure.previous_record['longitude']), 'Returned longitude is the same'
self.assertEqual(
json_data['longitude_public'],
TestCaseProtectedPressure.previous_record['longitude_public']), 'Returned longitude_public is the same'
self.assertEqual(
json_data['city'],
TestCaseProtectedPressure.previous_record['city']), 'Returned city is the same'
self.assertEqual(
json_data['province'],
TestCaseProtectedPressure.previous_record['province']), 'Returned province is the same'
self.assertEqual(
json_data['country'],
TestCaseProtectedPressure.previous_record['country']), 'Returned country is the same'
log.info('End')
def test_step_06_update_record_without_auth(self):
'''Update a pressure record without JWT token.'''
log = logging.getLogger('TestCase.test_step_06_update_record_without_auth')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/{last_id}'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource,
last_id=TestCaseProtectedPressure.last_id
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
record_data = get_random_record_data()
payload = '{{\n' \
'"value": {value},\n' \
'"value_units": "{value_units}",\n' \
'"value_error_range": {value_error_range},\n' \
'"latitude": {latitude},\n' \
'"latitude_public": {latitude_public},\n' \
'"longitude": {longitude},\n' \
'"longitude_public": {longitude_public},\n' \
'"city": "{city}",\n' \
'"province": "{province}",\n' \
'"country": "{country}",\n' \
'"elevation": {elevation},\n' \
'"elevation_units": "{elevation_units}",\n' \
'"timestamp": "{timestamp}"\n' \
'}}'.format(value=record_data['value'],
value_units=record_data['value_units'],
value_error_range=record_data['value_error_range'],
timestamp=str(record_data['timestamp'].strftime('%Y-%m-%dT%H:%M:%S')),
latitude=record_data['latitude'],
latitude_public=float(int(record_data['latitude'] * 1000)) / 1000,
longitude=record_data['longitude'],
longitude_public=float(int(record_data['longitude'] * 1000)) / 1000,
city=record_data['city'],
province=record_data['province'],
country=record_data['country'],
elevation=record_data['elevation'],
elevation_units=record_data['elevation_units'])
log.debug('payload= {payload}'.format(payload=payload))
headers = {
'content-type': 'application/json',
'cache-control': 'no-cache'
}
response = requests.request('PUT', app_url, data=payload, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=401)
)
assert response.status_code == 401, 'Expected a HTTP status code 401'
log.info('End')
def test_step_07_update_record_with_auth(self):
'''Update a pressure record with JWT token.'''
log = logging.getLogger('TestCase.test_step_07_update_record_with_auth')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/{last_id}'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource,
last_id=TestCaseProtectedPressure.last_id
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
record_data = get_random_record_data()
payload = '{{\n' \
'"value": {value},\n' \
'"value_units": "{value_units}",\n' \
'"value_error_range": {value_error_range},\n' \
'"latitude": {latitude},\n' \
'"latitude_public": {latitude_public},\n' \
'"longitude": {longitude},\n' \
'"longitude_public": {longitude_public},\n' \
'"city": "{city}",\n' \
'"province": "{province}",\n' \
'"country": "{country}",\n' \
'"elevation": {elevation},\n' \
'"elevation_units": "{elevation_units}",\n' \
'"timestamp": "{timestamp}"\n' \
'}}'.format(value=record_data['value'],
value_units=record_data['value_units'],
value_error_range=record_data['value_error_range'],
timestamp=str(record_data['timestamp'].strftime('%Y-%m-%dT%H:%M:%S')),
latitude=record_data['latitude'],
latitude_public=float(int(record_data['latitude'] * 1000)) / 1000,
longitude=record_data['longitude'],
longitude_public=float(int(record_data['longitude'] * 1000)) / 1000,
city=record_data['city'],
province=record_data['province'],
country=record_data['country'],
elevation=record_data['elevation'],
elevation_units=record_data['elevation_units'])
log.debug('payload= {payload}'.format(payload=payload))
headers = {
'content-type': 'application/json',
'authorization': 'JWT {token}'.format(token=TestCaseProtectedPressure.token),
'cache-control': 'no-cache'
}
response = requests.request('PUT', app_url, data=payload, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=204)
)
assert response.status_code == 204, 'Expected a HTTP status code 204'
self.assertEqual(len(response.text), 0), 'Expected data in the response'
log.info('End')
def test_step_08_get_all_records_without_auth(self):
'''Get all pressure records without JWT token.'''
log = logging.getLogger('TestCase.test_step_08_get_all_records_without_auth')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
headers = {
'cache-control': 'no-cache'
}
querystring = {
"start": "0001-01-01",
"end": "9999-12-31",
"city": "Edmonton",
"province": "AB",
"country": "CA"
}
response = requests.request('GET', app_url, headers=headers, data='', params=querystring)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=401)
)
assert response.status_code == 401, 'Expected a HTTP status code 401'
json_data = json.loads(response.text)
for item in json_data:
log.debug(str(item))
log.info('End')
def test_step_09_get_all_records_with_auth(self):
'''Get all pressure records with JWT token.'''
log = logging.getLogger('TestCase.test_step_09_get_all_records_with_auth')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
headers = {
'authorization': 'JWT {token}'.format(token=TestCaseProtectedPressure.token),
'cache-control': 'no-cache'
}
querystring = {
"start": "0001-01-01",
"end": "9999-12-31",
"city": "Edmonton",
"province": "AB",
"country": "CA"
}
response = requests.request('GET', app_url, headers=headers, data='', params=querystring)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=200)
)
assert response.status_code == 200, 'Expected a HTTP status code 200'
json_data = json.loads(response.text)
for item in json_data:
log.debug(str(item))
log.info('End')
def test_step_10_delete_record_without_auth(self):
'''Delete a pressure record without JWT token.'''
log = logging.getLogger('TestCase.test_step_10_delete_record_without_auth')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/{last_id}'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource,
last_id=TestCaseProtectedPressure.last_id
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
payload = ''
log.debug('payload= {payload}'.format(payload=payload))
headers = {
'cache-control': 'no-cache'
}
response = requests.request('DELETE', app_url, data=payload, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=401)
)
assert response.status_code == 401, 'Expected a HTTP status code 401'
log.info('End')
def test_step_11_delete_record_with_auth(self):
'''Delete a pressure record with JWT token.'''
log = logging.getLogger('TestCase.test_step_11_delete_record_with_auth')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/{last_id}'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource,
last_id=TestCaseProtectedPressure.last_id
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
payload = ''
log.debug('payload= {payload}'.format(payload=payload))
headers = {
'authorization': 'JWT {token}'.format(token=TestCaseProtectedPressure.token),
'cache-control': 'no-cache'
}
response = requests.request('DELETE', app_url, data=payload, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=204)
)
assert response.status_code == 204, 'Expected a HTTP status code 204'
self.assertEqual(len(response.text), 0), 'Expected data in the response'
log.info('End')
def test_step_12_get_deleted_record_with_auth(self):
'''Get a deleted pressure record with JWT token.'''
log = logging.getLogger('TestCase.test_step_12_get_deleted_record_with_auth')
log.info('Start')
app_url = '{base_url}/{context}/{resource}/{last_id}'.format(
base_url=self.base_url,
context=self.context,
resource=self.resource,
last_id=TestCaseProtectedPressure.last_id
)
log.debug('base_url= {url}'.format(url=self.base_url))
log.debug('app_url= {url}'.format(url=app_url))
headers = {
'authorization': 'JWT {token}'.format(token=TestCaseProtectedPressure.token),
'cache-control': 'no-cache'
}
response = requests.request('GET', app_url, headers=headers)
log.debug('Got {response_code} - expected {expected_code}'.format(
response_code=response.status_code,
expected_code=404)
)
assert response.status_code == 404, 'Expected a HTTP status code 404'
log.info('End')
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr)
logging.getLogger('TestCase.test_step_00_login_fail').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_01_login').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_02_create_record_without_auth').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_03_create_record_with_auth').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_03_1_create_record_with_auth_out_of_range_latitude').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_03_2_create_record_with_auth_out_of_range_latitude').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_03_3_create_record_with_auth_out_of_range_longitude').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_03_4_create_record_with_auth_out_of_range_longitude').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_04_get_record_without_auth').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_05_get_record_with_auth').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_06_update_record_without_auth').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_07_update_record_with_auth').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_08_get_all_records_without_auth').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_09_get_all_records_with_auth').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_10_delete_record_without_auth').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_11_delete_record_with_auth').setLevel(logging.DEBUG)
logging.getLogger('TestCase.test_step_12_get_deleted_record_with_auth').setLevel(logging.DEBUG)
unittest.main()
| {
"content_hash": "19e08c69911ff5e1d24da20592ee02c1",
"timestamp": "",
"source": "github",
"line_count": 1110,
"max_line_length": 119,
"avg_line_length": 40.31171171171171,
"alnum_prop": 0.5480042908863362,
"repo_name": "Fyzel/weather-data-flaskapi",
"id": "dcbe1cf0ff90a097995569aeb7da4ff8e7eb4bb1",
"size": "44746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/tests/protected_pressure_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "232258"
}
],
"symlink_target": ""
} |
from .utils import InputExample, InputFeatures, DataProcessor
from .glue import glue_output_modes, glue_processors, glue_tasks_num_labels, glue_convert_examples_to_features
from .squad import squad_convert_examples_to_features, SquadFeatures, SquadExample, SquadV1Processor, SquadV2Processor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | {
"content_hash": "bd07a9b27c8ab58c3f371c5e2e339df2",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 118,
"avg_line_length": 91.75,
"alnum_prop": 0.8337874659400545,
"repo_name": "HLTCHKUST/Xpersona",
"id": "0f1b24893ae961a7c286c882c1dd50a84b76a35c",
"size": "367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multilingual/transformers/data/processors/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "5230"
},
{
"name": "Python",
"bytes": "2391176"
},
{
"name": "Shell",
"bytes": "27554"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('profiles', '0008_auto_20150801_1847'),
('messaging', '0002_auto_20150801_1852'),
]
operations = [
migrations.AddField(
model_name='musicianmusicianconversation',
name='initializer',
field=models.ForeignKey(related_name='initializer', default='', blank=True, to='profiles.Musician'),
preserve_default=False,
),
]
| {
"content_hash": "447743875479259e61e5377285f026c0",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 112,
"avg_line_length": 27.6,
"alnum_prop": 0.6231884057971014,
"repo_name": "lancekrogers/music-network",
"id": "b31a9687402ccc34e0c49b6dbff9875cf46cce25",
"size": "576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cleff/messaging/migrations/0003_musicianmusicianconversation_initializer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "50628"
},
{
"name": "HTML",
"bytes": "48099"
},
{
"name": "JavaScript",
"bytes": "668599"
},
{
"name": "Python",
"bytes": "141988"
}
],
"symlink_target": ""
} |
import datetime as dt
import numpy as np
from numpy.linalg import eigh
def parse_range(daterange):
if 'd' in daterange:
return dt.timedelta(days=int(daterange.replace('d', '')))
elif 'w' in daterange:
return dt.timedelta(weeks=int(daterange.replace('w', '')))
elif 'm' in daterange:
return dt.timedelta(weeks=4*int(daterange.replace('m', '')))
elif 'y' in daterange:
return dt.timedelta(weeks=48*int(daterange.replace('y', '')))
else:
raise
def parse_dates(end_date, daterange):
end_date = dt.datetime.strptime(end_date, '%Y-%m-%d')
start_date = end_date - parse_range(daterange)
return start_date.isoformat(), end_date.isoformat()
def modify_record(record, append=None, update=None):
if append != None:
record.update(append)
if update != None:
record[update[0]] = update[1]
return record
| {
"content_hash": "018700da05bccc8ed70c7036f08b5206",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 69,
"avg_line_length": 33.03703703703704,
"alnum_prop": 0.6446188340807175,
"repo_name": "ldkge/visfx",
"id": "8be65e46489edecae85590b95e2a25e495a9da2c",
"size": "892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spark/recommendations/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2030"
},
{
"name": "HTML",
"bytes": "19420"
},
{
"name": "JavaScript",
"bytes": "234546"
},
{
"name": "Python",
"bytes": "35611"
},
{
"name": "Shell",
"bytes": "1207"
},
{
"name": "TeX",
"bytes": "83089"
}
],
"symlink_target": ""
} |
"""
Variation on the "promise" design pattern.
Promises make it easier to handle asynchronous operations correctly.
"""
import time
import logging
LOGGER = logging.getLogger(__name__)
class BrokenPromise(Exception):
"""
The promise was not satisfied within the time constraints.
"""
def __init__(self, promise):
"""
Configure the broken promise error.
Args:
promise (Promise): The promise that was not satisfied.
"""
super().__init__()
self._promise = promise
def __str__(self):
return f"Promise not satisfied: {self._promise}"
class Promise:
"""
Check that an asynchronous action completed, blocking until it does
or timeout / try limits are reached.
"""
def __init__(self, check_func, description, try_limit=None, try_interval=0.5, timeout=30):
"""
Configure the `Promise`.
The `Promise` will poll `check_func()` until either:
* The promise is satisfied
* The promise runs out of tries (checks more than `try_limit` times)
* The promise runs out of time (takes longer than `timeout` seconds)
If the try_limit or timeout is reached without success, then the promise is "broken" and
an exception will be raised.
Note that if you specify a try_limit but not a timeout, the default timeout is still used.
This is to prevent an inadvertent infinite loop. If you want to make sure that the
try_limit expires first (and thus that many attempts will be made), then you should also
pass in a larger value for timeout.
`description` is a string that will be included in the exception to make debugging easier.
Example:
.. code:: python
# Dummy check function that indicates the promise is always satisfied
check_func = lambda: (True, "Hello world!")
# Check up to 5 times if the operation has completed
result = Promise(check_func, "Operation has completed", try_limit=5).fulfill()
Args:
check_func (callable): A function that accepts no arguments and returns a `(is_satisfied, result)` tuple,
where `is_satisfied` is a boolean indiating whether the promise was satisfied, and `result`
is a value to return from the fulfilled `Promise`.
description (str): Description of the `Promise`, used in log messages.
Keyword Args:
try_limit (int or None): Number of attempts to make to satisfy the `Promise`.
Can be `None` to disable the limit.
try_interval (float): Number of seconds to wait between attempts.
timeout (float): Maximum number of seconds to wait for the `Promise` to be satisfied before timing out.
Returns:
Promise
"""
self._check_func = check_func
self._description = description
self._try_limit = try_limit
self._try_interval = try_interval
self._timeout = timeout
self._num_tries = 0
def fulfill(self):
"""
Evaluate the promise and return the result.
Returns:
The result of the `Promise` (second return value from the `check_func`)
Raises:
BrokenPromise: the `Promise` was not satisfied within the time or attempt limits.
"""
is_fulfilled, result = self._check_fulfilled()
if is_fulfilled:
return result
raise BrokenPromise(self)
def __str__(self):
return str(self._description)
def _check_fulfilled(self):
"""
Return tuple `(is_fulfilled, result)` where
`is_fulfilled` is a boolean indicating whether the promise has been fulfilled
and `result` is the value to pass to the `with` block.
"""
is_fulfilled = False
result = None
start_time = time.time()
# Check whether the promise has been fulfilled until we run out of time or attempts
while self._has_time_left(start_time) and self._has_more_tries():
# Keep track of how many attempts we've made so far
self._num_tries += 1
is_fulfilled, result = self._check_func()
# If the promise is satisfied, then continue execution
if is_fulfilled:
break
# Delay between checks
time.sleep(self._try_interval)
return is_fulfilled, result
def _has_time_left(self, start_time):
"""
Return True if the elapsed time is less than the timeout.
"""
return time.time() - start_time < self._timeout
def _has_more_tries(self):
"""
Return True if the promise has additional tries.
If `_try_limit` is `None`, always return True.
"""
if self._try_limit is None:
return True
return self._num_tries < self._try_limit
class EmptyPromise(Promise):
"""
A promise that has no result value.
"""
def __init__(self, check_func, description, **kwargs):
"""
Configure the promise.
Unlike a regular `Promise`, the `check_func()` does NOT return a tuple
with a result value. That's why the promise is "empty" -- you don't get anything back.
Example usage:
.. code:: python
# This will block until `is_done` returns `True` or we reach the timeout limit.
EmptyPromise(lambda: is_done('test'), "Test operation is done").fulfill()
Args:
check_func (callable): Function that accepts no arguments and
returns a boolean indicating whether the promise is fulfilled.
description (str): Description of the Promise, used in log messages.
Returns:
EmptyPromise
"""
full_check_func = lambda: (check_func(), None)
super().__init__(full_check_func, description, **kwargs)
| {
"content_hash": "be2adb92522a8ba6a0eac47b1a013cf6",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 117,
"avg_line_length": 33.31666666666667,
"alnum_prop": 0.6074704018676005,
"repo_name": "edx/bok-choy",
"id": "7ef66711f0bd064c8892888a3893f77ea5940136",
"size": "5997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bok_choy/promise.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11992"
},
{
"name": "JavaScript",
"bytes": "1203"
},
{
"name": "Makefile",
"bytes": "2497"
},
{
"name": "Python",
"bytes": "181702"
}
],
"symlink_target": ""
} |
from ..cw_model import CWModel
class ExpenseEntry(CWModel):
def __init__(self, json_dict=None):
self.id = None # (Integer)
self.company = None # **(CompanyReference)
self.chargeToId = None # (Integer)
self.chargeToType = None # **(Enum)
self.type = None # *(ExpenseTypeReference)
self.member = None # (MemberReference)
self.paymentMethod = None # (PaymentMethodReference)
self.classification = None # (ClassificationReference)
self.amount = None # *(Number)
self.billableOption = None # *(Enum)
self.date = None # *(String)
self.locationId = None # (Integer)
self.businessUnitId = None # (Integer)
self.notes = None # (String)
self.agreement = None # (AgreementReference)
self.invoiceAmount = None # (Number)
self.taxes = None # (ExpenseTax[])
self.invoice = None # (InvoiceReference)
self._info = None # (Metadata)
# initialize object with json dict
super().__init__(json_dict)
| {
"content_hash": "66782f4c71ac660931892db89878f86f",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 63,
"avg_line_length": 39.357142857142854,
"alnum_prop": 0.5762250453720508,
"repo_name": "joshuamsmith/ConnectPyse",
"id": "8058352e8e1025b9c39b593adc41b94758599897",
"size": "1102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expense/expense_entry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "158372"
}
],
"symlink_target": ""
} |
from .threading_decorators import Threaded_Function, as_thread, check_stop
del(threading_decorators)
try:
import IPython
if IPython.get_ipython() is not None: # We are in an IPython session
try:
from .matplotlib_qt_signals import Main_Loop_Caller
gui_safe = Main_Loop_Caller
except:
pass
else:
del(matplotlib_qt_signals)
except:
pass
else:
del(IPython)
| {
"content_hash": "c48b70b970d514064cba04b1d15e63d2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6261261261261262,
"repo_name": "Vrekrer/threading_decorators",
"id": "4539ebe3499dfda815675689ad0e85f7c9c127a4",
"size": "554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2585"
}
],
"symlink_target": ""
} |
import pymongo
import bottle
import sys
@bottle.get("/hw1/<n>")
def get_hw1(n):
connection = pymongo.MongoClient("mongodb://localhost")
n = int(n)
db = connection.m101
collection = db.funnynumbers
magic = 0
try:
iter = collection.find({},limit=1, skip=n).sort('value', direction=1)
for item in iter:
return str(int(item['value'])) + "\n"
except:
print "Error trying to read collection:", sys.exc_info()[0]
bottle.debug(True)
bottle.run(host='localhost', port=8080)
| {
"content_hash": "e8335899daf07f882427662dc4ef8833",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 77,
"avg_line_length": 19.62962962962963,
"alnum_prop": 0.6264150943396226,
"repo_name": "MaximAbramchuck/mongodb-university-homeworks",
"id": "027b821a860de4cd9ab88f6374eed32c8dd9d140",
"size": "530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chapter_1/homework1.3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1092"
},
{
"name": "Python",
"bytes": "2963"
}
],
"symlink_target": ""
} |
"""
Django settings for django_content_toolkit project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(5)2%n3-b0#5e5z0g@^)98pa$=tc&$6kgngrv3@u*17xnut_fe'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'example',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_content_toolkit.urls'
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
) | {
"content_hash": "f4ac515c462ece5754e3e558a31db3a5",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 71,
"avg_line_length": 24.454545454545453,
"alnum_prop": 0.7221189591078067,
"repo_name": "esistgut/django-content-toolkit",
"id": "08902eadd2e556f2d5564994562516d89f82a404",
"size": "2152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "46"
},
{
"name": "HTML",
"bytes": "494"
},
{
"name": "JavaScript",
"bytes": "2103"
},
{
"name": "Python",
"bytes": "42244"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OtherName.type'
db.add_column('portal_othername', 'type', self.gf('django.db.models.fields.CharField')(default='other', max_length=10), keep_default=False)
def backwards(self, orm):
# Deleting field 'OtherName.type'
db.delete_column('portal_othername', 'type')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'portal.authority': {
'Meta': {'object_name': 'Authority', '_ormbases': ['portal.Resource']},
'dates_of_existence': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'functions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'general_context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'institution_responsible_identifier': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'internal_structures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'legal_status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'lod': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'mandates': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'places': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['portal.Resource']", 'unique': 'True', 'primary_key': 'True'}),
'revision_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'db_index': 'True'}),
'sources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'type_of_entity': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'portal.collection': {
'Meta': {'object_name': 'Collection', '_ormbases': ['portal.Resource']},
'access_conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'accruals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'acquisition': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'alternate_title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'appraisal': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'archival_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'arrangement': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Authority']", 'null': 'True', 'blank': 'True'}),
'edition': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extent_and_medium': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'finding_aids': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'institution_responsible_identifier': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'location_of_copies': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'location_of_originals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'lod': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'physical_characteristics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'related_units_of_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Repository']"}),
'reproduction_conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['portal.Resource']", 'unique': 'True', 'primary_key': 'True'}),
'revision_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rules': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'scope_and_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'db_index': 'True'}),
'sources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'portal.contact': {
'Meta': {'object_name': 'Contact'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'contact_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'repository': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Repository']"}),
'street_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'portal.fuzzydate': {
'Meta': {'object_name': 'FuzzyDate'},
'circa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'date_set'", 'to': "orm['portal.Collection']"}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'precision': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'start_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'})
},
'portal.othername': {
'Meta': {'object_name': 'OtherName'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'portal.place': {
'Meta': {'object_name': 'Place'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"})
},
'portal.property': {
'Meta': {'object_name': 'Property'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'portal.relation': {
'Meta': {'object_name': 'Relation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['portal.Resource']"}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['portal.Resource']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'portal.repository': {
'Meta': {'object_name': 'Repository', '_ormbases': ['portal.Resource']},
'access_conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'buildings': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'collecting_policies': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dates_of_existence': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disabled_access': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'finding_aids': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'functions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'general_context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geocultural_context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'holdings': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'internal_structures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'legal_status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'lod': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'logo': ('portal.thumbs.ImageWithThumbsField', [], {'name': "'logo'", 'sizes': '((100, 100), (300, 300))', 'max_length': '100', 'blank': 'True', 'null': 'True'}),
'maintenance_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'mandates': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'opening_times': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'places': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reproduction_services': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'research_services': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['portal.Resource']", 'unique': 'True', 'primary_key': 'True'}),
'rules': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'db_index': 'True'}),
'sources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'type_of_entity': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'portal.resource': {
'Meta': {'object_name': 'Resource'},
'created_on': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'portal.resourceimage': {
'Meta': {'object_name': 'ResourceImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('portal.thumbs.ImageWithThumbsField', [], {'max_length': '100', 'name': "'image'", 'sizes': '((100, 100), (300, 300))'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['portal.Resource']"})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['portal']
| {
"content_hash": "ad079eaf7453d31701381a09558349c8",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 174,
"avg_line_length": 83.24615384615385,
"alnum_prop": 0.5414895583071521,
"repo_name": "mikesname/ehri-collections",
"id": "0c280ffa677d01b78182fdc78bea17d914ede564",
"size": "16251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ehriportal/portal/migrations/0006_auto__add_field_othername_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "3957"
},
{
"name": "JavaScript",
"bytes": "26562"
},
{
"name": "Python",
"bytes": "460117"
},
{
"name": "Shell",
"bytes": "204"
}
],
"symlink_target": ""
} |
import datetime
import os
import re
import unittest
import uuid
from decimal import Decimal
import pytest
from django.http import QueryDict
from django.test import TestCase, override_settings
from django.utils import six, timezone
import rest_framework
from rest_framework import serializers
from rest_framework.fields import is_simple_callable
try:
import typings
except ImportError:
typings = False
# Tests for helper functions.
# ---------------------------
class TestIsSimpleCallable:
def test_method(self):
class Foo:
@classmethod
def classmethod(cls):
pass
def valid(self):
pass
def valid_kwargs(self, param='value'):
pass
def valid_vargs_kwargs(self, *args, **kwargs):
pass
def invalid(self, param):
pass
assert is_simple_callable(Foo.classmethod)
# unbound methods
assert not is_simple_callable(Foo.valid)
assert not is_simple_callable(Foo.valid_kwargs)
assert not is_simple_callable(Foo.valid_vargs_kwargs)
assert not is_simple_callable(Foo.invalid)
# bound methods
assert is_simple_callable(Foo().valid)
assert is_simple_callable(Foo().valid_kwargs)
assert is_simple_callable(Foo().valid_vargs_kwargs)
assert not is_simple_callable(Foo().invalid)
def test_function(self):
def simple():
pass
def valid(param='value', param2='value'):
pass
def valid_vargs_kwargs(*args, **kwargs):
pass
def invalid(param, param2='value'):
pass
assert is_simple_callable(simple)
assert is_simple_callable(valid)
assert is_simple_callable(valid_vargs_kwargs)
assert not is_simple_callable(invalid)
def test_4602_regression(self):
from django.db import models
class ChoiceModel(models.Model):
choice_field = models.CharField(
max_length=1, default='a',
choices=(('a', 'A'), ('b', 'B')),
)
class Meta:
app_label = 'tests'
assert is_simple_callable(ChoiceModel().get_choice_field_display)
@unittest.skipUnless(typings, 'requires python 3.5')
def test_type_annotation(self):
# The annotation will otherwise raise a syntax error in python < 3.5
exec("def valid(param: str='value'): pass", locals())
valid = locals()['valid']
assert is_simple_callable(valid)
# Tests for field keyword arguments and core functionality.
# ---------------------------------------------------------
class TestEmpty:
"""
Tests for `required`, `allow_null`, `allow_blank`, `default`.
"""
def test_required(self):
"""
By default a field must be included in the input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation()
assert exc_info.value.detail == ['This field is required.']
def test_not_required(self):
"""
If `required=False` then a field may be omitted from the input.
"""
field = serializers.IntegerField(required=False)
with pytest.raises(serializers.SkipField):
field.run_validation()
def test_disallow_null(self):
"""
By default `None` is not a valid input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(None)
assert exc_info.value.detail == ['This field may not be null.']
def test_allow_null(self):
"""
If `allow_null=True` then `None` is a valid input.
"""
field = serializers.IntegerField(allow_null=True)
output = field.run_validation(None)
assert output is None
def test_disallow_blank(self):
"""
By default '' is not a valid input.
"""
field = serializers.CharField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation('')
assert exc_info.value.detail == ['This field may not be blank.']
def test_allow_blank(self):
"""
If `allow_blank=True` then '' is a valid input.
"""
field = serializers.CharField(allow_blank=True)
output = field.run_validation('')
assert output == ''
def test_default(self):
"""
If `default` is set, then omitted values get the default input.
"""
field = serializers.IntegerField(default=123)
output = field.run_validation()
assert output is 123
class TestSource:
def test_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='other')
serializer = ExampleSerializer(data={'example_field': 'abc'})
assert serializer.is_valid()
assert serializer.validated_data == {'other': 'abc'}
def test_redundant_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_field')
with pytest.raises(AssertionError) as exc_info:
ExampleSerializer().fields
assert str(exc_info.value) == (
"It is redundant to specify `source='example_field'` on field "
"'CharField' in serializer 'ExampleSerializer', because it is the "
"same as the field name. Remove the `source` keyword argument."
)
def test_callable_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable')
class ExampleInstance(object):
def example_callable(self):
return 'example callable value'
serializer = ExampleSerializer(ExampleInstance())
assert serializer.data['example_field'] == 'example callable value'
def test_callable_source_raises(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable', read_only=True)
class ExampleInstance(object):
def example_callable(self):
raise AttributeError('method call failed')
with pytest.raises(ValueError) as exc_info:
serializer = ExampleSerializer(ExampleInstance())
serializer.data.items()
assert 'method call failed' in str(exc_info.value)
class TestReadOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
read_only = serializers.ReadOnlyField()
writable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_read_only(self):
"""
Read-only serializers.should not be included in validation.
"""
data = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'writable': 456}
def test_serialize_read_only(self):
"""
Read-only serializers.should be serialized.
"""
instance = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'read_only': 123, 'writable': 456}
class TestWriteOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
write_only = serializers.IntegerField(write_only=True)
readable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_write_only(self):
"""
Write-only serializers.should be included in validation.
"""
data = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'write_only': 123, 'readable': 456}
def test_serialize_write_only(self):
"""
Write-only serializers.should not be serialized.
"""
instance = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'readable': 456}
class TestInitial:
def setup(self):
class TestSerializer(serializers.Serializer):
initial_field = serializers.IntegerField(initial=123)
blank_field = serializers.IntegerField()
self.serializer = TestSerializer()
def test_initial(self):
"""
Initial values should be included when serializing a new representation.
"""
assert self.serializer.data == {
'initial_field': 123,
'blank_field': None
}
class TestInitialWithCallable:
def setup(self):
def initial_value():
return 123
class TestSerializer(serializers.Serializer):
initial_field = serializers.IntegerField(initial=initial_value)
self.serializer = TestSerializer()
def test_initial_should_accept_callable(self):
"""
Follows the default ``Field.initial`` behaviour where they accept a
callable to produce the initial value"""
assert self.serializer.data == {
'initial_field': 123,
}
class TestLabel:
def setup(self):
class TestSerializer(serializers.Serializer):
labeled = serializers.IntegerField(label='My label')
self.serializer = TestSerializer()
def test_label(self):
"""
A field's label may be set with the `label` argument.
"""
fields = self.serializer.fields
assert fields['labeled'].label == 'My label'
class TestInvalidErrorKey:
def setup(self):
class ExampleField(serializers.Field):
def to_native(self, data):
self.fail('incorrect')
self.field = ExampleField()
def test_invalid_error_key(self):
"""
If a field raises a validation error, but does not have a corresponding
error message, then raise an appropriate assertion error.
"""
with pytest.raises(AssertionError) as exc_info:
self.field.to_native(123)
expected = (
'ValidationError raised by `ExampleField`, but error key '
'`incorrect` does not exist in the `error_messages` dictionary.'
)
assert str(exc_info.value) == expected
class TestBooleanHTMLInput:
def test_empty_html_checkbox(self):
"""
HTML checkboxes do not send any value, but should be treated
as `False` by BooleanField.
"""
class TestSerializer(serializers.Serializer):
archived = serializers.BooleanField()
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {'archived': False}
def test_empty_html_checkbox_not_required(self):
"""
HTML checkboxes do not send any value, but should be treated
as `False` by BooleanField, even if the field is required=False.
"""
class TestSerializer(serializers.Serializer):
archived = serializers.BooleanField(required=False)
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {'archived': False}
class TestHTMLInput:
def test_empty_html_charfield_with_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(default='happy')
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {'message': 'happy'}
def test_empty_html_charfield_without_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_blank=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_charfield_without_default_not_required(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_blank=True, required=False)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_integerfield(self):
class TestSerializer(serializers.Serializer):
message = serializers.IntegerField(default=123)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': 123}
def test_empty_html_uuidfield_with_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.UUIDField(default=uuid.uuid4)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert list(serializer.validated_data.keys()) == ['message']
def test_empty_html_uuidfield_with_optional(self):
class TestSerializer(serializers.Serializer):
message = serializers.UUIDField(required=False)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert list(serializer.validated_data.keys()) == []
def test_empty_html_charfield_allow_null(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': None}
def test_empty_html_datefield_allow_null(self):
class TestSerializer(serializers.Serializer):
expiry = serializers.DateField(allow_null=True)
serializer = TestSerializer(data=QueryDict('expiry='))
assert serializer.is_valid()
assert serializer.validated_data == {'expiry': None}
def test_empty_html_charfield_allow_null_allow_blank(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True, allow_blank=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_charfield_required_false(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(required=False)
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {}
def test_querydict_list_input(self):
class TestSerializer(serializers.Serializer):
scores = serializers.ListField(child=serializers.IntegerField())
serializer = TestSerializer(data=QueryDict('scores=1&scores=3'))
assert serializer.is_valid()
assert serializer.validated_data == {'scores': [1, 3]}
def test_querydict_list_input_only_one_input(self):
class TestSerializer(serializers.Serializer):
scores = serializers.ListField(child=serializers.IntegerField())
serializer = TestSerializer(data=QueryDict('scores=1&'))
assert serializer.is_valid()
assert serializer.validated_data == {'scores': [1]}
class TestCreateOnlyDefault:
def setup(self):
default = serializers.CreateOnlyDefault('2001-01-01')
class TestSerializer(serializers.Serializer):
published = serializers.HiddenField(default=default)
text = serializers.CharField()
self.Serializer = TestSerializer
def test_create_only_default_is_provided(self):
serializer = self.Serializer(data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example', 'published': '2001-01-01'
}
def test_create_only_default_is_not_provided_on_update(self):
instance = {
'text': 'example', 'published': '2001-01-01'
}
serializer = self.Serializer(instance, data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example',
}
def test_create_only_default_callable_sets_context(self):
"""
CreateOnlyDefault instances with a callable default should set_context
on the callable if possible
"""
class TestCallableDefault:
def set_context(self, serializer_field):
self.field = serializer_field
def __call__(self):
return "success" if hasattr(self, 'field') else "failure"
class TestSerializer(serializers.Serializer):
context_set = serializers.CharField(default=serializers.CreateOnlyDefault(TestCallableDefault()))
serializer = TestSerializer(data={})
assert serializer.is_valid()
assert serializer.validated_data['context_set'] == 'success'
# Tests for field input and output values.
# ----------------------------------------
def get_items(mapping_or_list_of_two_tuples):
# Tests accept either lists of two tuples, or dictionaries.
if isinstance(mapping_or_list_of_two_tuples, dict):
# {value: expected}
return mapping_or_list_of_two_tuples.items()
# [(value, expected), ...]
return mapping_or_list_of_two_tuples
class FieldValues:
"""
Base class for testing valid and invalid input values.
"""
def test_valid_inputs(self):
"""
Ensure that valid values return the expected validated data.
"""
for input_value, expected_output in get_items(self.valid_inputs):
assert self.field.run_validation(input_value) == expected_output
def test_invalid_inputs(self):
"""
Ensure that invalid values raise the expected validation error.
"""
for input_value, expected_failure in get_items(self.invalid_inputs):
with pytest.raises(serializers.ValidationError) as exc_info:
self.field.run_validation(input_value)
assert exc_info.value.detail == expected_failure
def test_outputs(self):
for output_value, expected_output in get_items(self.outputs):
assert self.field.to_representation(output_value) == expected_output
# Boolean types...
class TestBooleanField(FieldValues):
"""
Valid and invalid values for `BooleanField`.
"""
valid_inputs = {
'true': True,
'false': False,
'1': True,
'0': False,
1: True,
0: False,
True: True,
False: False,
}
invalid_inputs = {
'foo': ['"foo" is not a valid boolean.'],
None: ['This field may not be null.']
}
outputs = {
'true': True,
'false': False,
'1': True,
'0': False,
1: True,
0: False,
True: True,
False: False,
'other': True
}
field = serializers.BooleanField()
def test_disallow_unhashable_collection_types(self):
inputs = (
[],
{},
)
field = serializers.BooleanField()
for input_value in inputs:
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(input_value)
expected = ['"{0}" is not a valid boolean.'.format(input_value)]
assert exc_info.value.detail == expected
class TestNullBooleanField(FieldValues):
"""
Valid and invalid values for `BooleanField`.
"""
valid_inputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None
}
invalid_inputs = {
'foo': ['"foo" is not a valid boolean.'],
}
outputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None,
'other': True
}
field = serializers.NullBooleanField()
# String types...
class TestCharField(FieldValues):
"""
Valid and invalid values for `CharField`.
"""
valid_inputs = {
1: '1',
'abc': 'abc'
}
invalid_inputs = {
(): ['Not a valid string.'],
True: ['Not a valid string.'],
'': ['This field may not be blank.']
}
outputs = {
1: '1',
'abc': 'abc'
}
field = serializers.CharField()
def test_trim_whitespace_default(self):
field = serializers.CharField()
assert field.to_internal_value(' abc ') == 'abc'
def test_trim_whitespace_disabled(self):
field = serializers.CharField(trim_whitespace=False)
assert field.to_internal_value(' abc ') == ' abc '
def test_disallow_blank_with_trim_whitespace(self):
field = serializers.CharField(allow_blank=False, trim_whitespace=True)
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(' ')
assert exc_info.value.detail == ['This field may not be blank.']
class TestEmailField(FieldValues):
"""
Valid and invalid values for `EmailField`.
"""
valid_inputs = {
'example@example.com': 'example@example.com',
' example@example.com ': 'example@example.com',
}
invalid_inputs = {
'examplecom': ['Enter a valid email address.']
}
outputs = {}
field = serializers.EmailField()
class TestRegexField(FieldValues):
"""
Valid and invalid values for `RegexField`.
"""
valid_inputs = {
'a9': 'a9',
}
invalid_inputs = {
'A9': ["This value does not match the required pattern."]
}
outputs = {}
field = serializers.RegexField(regex='[a-z][0-9]')
class TestiCompiledRegexField(FieldValues):
"""
Valid and invalid values for `RegexField`.
"""
valid_inputs = {
'a9': 'a9',
}
invalid_inputs = {
'A9': ["This value does not match the required pattern."]
}
outputs = {}
field = serializers.RegexField(regex=re.compile('[a-z][0-9]'))
class TestSlugField(FieldValues):
"""
Valid and invalid values for `SlugField`.
"""
valid_inputs = {
'slug-99': 'slug-99',
}
invalid_inputs = {
'slug 99': ['Enter a valid "slug" consisting of letters, numbers, underscores or hyphens.']
}
outputs = {}
field = serializers.SlugField()
class TestURLField(FieldValues):
"""
Valid and invalid values for `URLField`.
"""
valid_inputs = {
'http://example.com': 'http://example.com',
}
invalid_inputs = {
'example.com': ['Enter a valid URL.']
}
outputs = {}
field = serializers.URLField()
class TestUUIDField(FieldValues):
"""
Valid and invalid values for `UUIDField`.
"""
valid_inputs = {
'825d7aeb-05a9-45b5-a5b7-05df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'),
'825d7aeb05a945b5a5b705df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'),
'urn:uuid:213b7d9b-244f-410d-828c-dabce7a2615d': uuid.UUID('213b7d9b-244f-410d-828c-dabce7a2615d'),
284758210125106368185219588917561929842: uuid.UUID('d63a6fb6-88d5-40c7-a91c-9edf73283072')
}
invalid_inputs = {
'825d7aeb-05a9-45b5-a5b7': ['"825d7aeb-05a9-45b5-a5b7" is not a valid UUID.'],
(1, 2, 3): ['"(1, 2, 3)" is not a valid UUID.']
}
outputs = {
uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'): '825d7aeb-05a9-45b5-a5b7-05df87923cda'
}
field = serializers.UUIDField()
def _test_format(self, uuid_format, formatted_uuid_0):
field = serializers.UUIDField(format=uuid_format)
assert field.to_representation(uuid.UUID(int=0)) == formatted_uuid_0
assert field.to_internal_value(formatted_uuid_0) == uuid.UUID(int=0)
def test_formats(self):
self._test_format('int', 0)
self._test_format('hex_verbose', '00000000-0000-0000-0000-000000000000')
self._test_format('urn', 'urn:uuid:00000000-0000-0000-0000-000000000000')
self._test_format('hex', '0' * 32)
class TestIPAddressField(FieldValues):
"""
Valid and invalid values for `IPAddressField`
"""
valid_inputs = {
'127.0.0.1': '127.0.0.1',
'192.168.33.255': '192.168.33.255',
'2001:0db8:85a3:0042:1000:8a2e:0370:7334': '2001:db8:85a3:42:1000:8a2e:370:7334',
'2001:cdba:0:0:0:0:3257:9652': '2001:cdba::3257:9652',
'2001:cdba::3257:9652': '2001:cdba::3257:9652'
}
invalid_inputs = {
'127001': ['Enter a valid IPv4 or IPv6 address.'],
'127.122.111.2231': ['Enter a valid IPv4 or IPv6 address.'],
'2001:::9652': ['Enter a valid IPv4 or IPv6 address.'],
'2001:0db8:85a3:0042:1000:8a2e:0370:73341': ['Enter a valid IPv4 or IPv6 address.'],
1000: ['Enter a valid IPv4 or IPv6 address.'],
}
outputs = {}
field = serializers.IPAddressField()
class TestIPv4AddressField(FieldValues):
"""
Valid and invalid values for `IPAddressField`
"""
valid_inputs = {
'127.0.0.1': '127.0.0.1',
'192.168.33.255': '192.168.33.255',
}
invalid_inputs = {
'127001': ['Enter a valid IPv4 address.'],
'127.122.111.2231': ['Enter a valid IPv4 address.'],
}
outputs = {}
field = serializers.IPAddressField(protocol='IPv4')
class TestIPv6AddressField(FieldValues):
"""
Valid and invalid values for `IPAddressField`
"""
valid_inputs = {
'2001:0db8:85a3:0042:1000:8a2e:0370:7334': '2001:db8:85a3:42:1000:8a2e:370:7334',
'2001:cdba:0:0:0:0:3257:9652': '2001:cdba::3257:9652',
'2001:cdba::3257:9652': '2001:cdba::3257:9652'
}
invalid_inputs = {
'2001:::9652': ['Enter a valid IPv4 or IPv6 address.'],
'2001:0db8:85a3:0042:1000:8a2e:0370:73341': ['Enter a valid IPv4 or IPv6 address.'],
}
outputs = {}
field = serializers.IPAddressField(protocol='IPv6')
class TestFilePathField(FieldValues):
"""
Valid and invalid values for `FilePathField`
"""
valid_inputs = {
__file__: __file__,
}
invalid_inputs = {
'wrong_path': ['"wrong_path" is not a valid path choice.']
}
outputs = {
}
field = serializers.FilePathField(
path=os.path.abspath(os.path.dirname(__file__))
)
# Number types...
class TestIntegerField(FieldValues):
"""
Valid and invalid values for `IntegerField`.
"""
valid_inputs = {
'1': 1,
'0': 0,
1: 1,
0: 0,
1.0: 1,
0.0: 0,
'1.0': 1
}
invalid_inputs = {
0.5: ['A valid integer is required.'],
'abc': ['A valid integer is required.'],
'0.5': ['A valid integer is required.']
}
outputs = {
'1': 1,
'0': 0,
1: 1,
0: 0,
1.0: 1,
0.0: 0
}
field = serializers.IntegerField()
class TestMinMaxIntegerField(FieldValues):
"""
Valid and invalid values for `IntegerField` with min and max limits.
"""
valid_inputs = {
'1': 1,
'3': 3,
1: 1,
3: 3,
}
invalid_inputs = {
0: ['Ensure this value is greater than or equal to 1.'],
4: ['Ensure this value is less than or equal to 3.'],
'0': ['Ensure this value is greater than or equal to 1.'],
'4': ['Ensure this value is less than or equal to 3.'],
}
outputs = {}
field = serializers.IntegerField(min_value=1, max_value=3)
class TestFloatField(FieldValues):
"""
Valid and invalid values for `FloatField`.
"""
valid_inputs = {
'1': 1.0,
'0': 0.0,
1: 1.0,
0: 0.0,
1.0: 1.0,
0.0: 0.0,
}
invalid_inputs = {
'abc': ["A valid number is required."]
}
outputs = {
'1': 1.0,
'0': 0.0,
1: 1.0,
0: 0.0,
1.0: 1.0,
0.0: 0.0,
}
field = serializers.FloatField()
class TestMinMaxFloatField(FieldValues):
"""
Valid and invalid values for `FloatField` with min and max limits.
"""
valid_inputs = {
'1': 1,
'3': 3,
1: 1,
3: 3,
1.0: 1.0,
3.0: 3.0,
}
invalid_inputs = {
0.9: ['Ensure this value is greater than or equal to 1.'],
3.1: ['Ensure this value is less than or equal to 3.'],
'0.0': ['Ensure this value is greater than or equal to 1.'],
'3.1': ['Ensure this value is less than or equal to 3.'],
}
outputs = {}
field = serializers.FloatField(min_value=1, max_value=3)
class TestDecimalField(FieldValues):
"""
Valid and invalid values for `DecimalField`.
"""
valid_inputs = {
'12.3': Decimal('12.3'),
'0.1': Decimal('0.1'),
10: Decimal('10'),
0: Decimal('0'),
12.3: Decimal('12.3'),
0.1: Decimal('0.1'),
'2E+1': Decimal('20'),
}
invalid_inputs = (
('abc', ["A valid number is required."]),
(Decimal('Nan'), ["A valid number is required."]),
(Decimal('Inf'), ["A valid number is required."]),
('12.345', ["Ensure that there are no more than 3 digits in total."]),
(200000000000.0, ["Ensure that there are no more than 3 digits in total."]),
('0.01', ["Ensure that there are no more than 1 decimal places."]),
(123, ["Ensure that there are no more than 2 digits before the decimal point."]),
('2E+2', ["Ensure that there are no more than 2 digits before the decimal point."])
)
outputs = {
'1': '1.0',
'0': '0.0',
'1.09': '1.1',
'0.04': '0.0',
1: '1.0',
0: '0.0',
Decimal('1.0'): '1.0',
Decimal('0.0'): '0.0',
Decimal('1.09'): '1.1',
Decimal('0.04'): '0.0'
}
field = serializers.DecimalField(max_digits=3, decimal_places=1)
class TestMinMaxDecimalField(FieldValues):
"""
Valid and invalid values for `DecimalField` with min and max limits.
"""
valid_inputs = {
'10.0': Decimal('10.0'),
'20.0': Decimal('20.0'),
}
invalid_inputs = {
'9.9': ['Ensure this value is greater than or equal to 10.'],
'20.1': ['Ensure this value is less than or equal to 20.'],
}
outputs = {}
field = serializers.DecimalField(
max_digits=3, decimal_places=1,
min_value=10, max_value=20
)
class TestNoMaxDigitsDecimalField(FieldValues):
field = serializers.DecimalField(
max_value=100, min_value=0,
decimal_places=2, max_digits=None
)
valid_inputs = {
'10': Decimal('10.00')
}
invalid_inputs = {}
outputs = {}
class TestNoStringCoercionDecimalField(FieldValues):
"""
Output values for `DecimalField` with `coerce_to_string=False`.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
1.09: Decimal('1.1'),
0.04: Decimal('0.0'),
'1.09': Decimal('1.1'),
'0.04': Decimal('0.0'),
Decimal('1.09'): Decimal('1.1'),
Decimal('0.04'): Decimal('0.0'),
}
field = serializers.DecimalField(
max_digits=3, decimal_places=1,
coerce_to_string=False
)
class TestLocalizedDecimalField(TestCase):
@override_settings(USE_L10N=True, LANGUAGE_CODE='pl')
def test_to_internal_value(self):
field = serializers.DecimalField(max_digits=2, decimal_places=1, localize=True)
assert field.to_internal_value('1,1') == Decimal('1.1')
@override_settings(USE_L10N=True, LANGUAGE_CODE='pl')
def test_to_representation(self):
field = serializers.DecimalField(max_digits=2, decimal_places=1, localize=True)
assert field.to_representation(Decimal('1.1')) == '1,1'
def test_localize_forces_coerce_to_string(self):
field = serializers.DecimalField(max_digits=2, decimal_places=1, coerce_to_string=False, localize=True)
assert isinstance(field.to_representation(Decimal('1.1')), six.string_types)
class TestQuantizedValueForDecimal(TestCase):
def test_int_quantized_value_for_decimal(self):
field = serializers.DecimalField(max_digits=4, decimal_places=2)
value = field.to_internal_value(12).as_tuple()
expected_digit_tuple = (0, (1, 2, 0, 0), -2)
assert value == expected_digit_tuple
def test_string_quantized_value_for_decimal(self):
field = serializers.DecimalField(max_digits=4, decimal_places=2)
value = field.to_internal_value('12').as_tuple()
expected_digit_tuple = (0, (1, 2, 0, 0), -2)
assert value == expected_digit_tuple
def test_part_precision_string_quantized_value_for_decimal(self):
field = serializers.DecimalField(max_digits=4, decimal_places=2)
value = field.to_internal_value('12.0').as_tuple()
expected_digit_tuple = (0, (1, 2, 0, 0), -2)
assert value == expected_digit_tuple
class TestNoDecimalPlaces(FieldValues):
valid_inputs = {
'0.12345': Decimal('0.12345'),
}
invalid_inputs = {
'0.1234567': ['Ensure that there are no more than 6 digits in total.']
}
outputs = {
'1.2345': '1.2345',
'0': '0',
'1.1': '1.1',
}
field = serializers.DecimalField(max_digits=6, decimal_places=None)
# Date & time serializers...
class TestDateField(FieldValues):
"""
Valid and invalid values for `DateField`.
"""
valid_inputs = {
'2001-01-01': datetime.date(2001, 1, 1),
datetime.date(2001, 1, 1): datetime.date(2001, 1, 1),
}
invalid_inputs = {
'abc': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'],
'2001-99-99': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'],
datetime.datetime(2001, 1, 1, 12, 00): ['Expected a date but got a datetime.'],
}
outputs = {
datetime.date(2001, 1, 1): '2001-01-01',
'2001-01-01': '2001-01-01',
six.text_type('2016-01-10'): '2016-01-10',
None: None,
'': None,
}
field = serializers.DateField()
class TestCustomInputFormatDateField(FieldValues):
"""
Valid and invalid values for `DateField` with a custom input format.
"""
valid_inputs = {
'1 Jan 2001': datetime.date(2001, 1, 1),
}
invalid_inputs = {
'2001-01-01': ['Date has wrong format. Use one of these formats instead: DD [Jan-Dec] YYYY.']
}
outputs = {}
field = serializers.DateField(input_formats=['%d %b %Y'])
class TestCustomOutputFormatDateField(FieldValues):
"""
Values for `DateField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.date(2001, 1, 1): '01 Jan 2001'
}
field = serializers.DateField(format='%d %b %Y')
class TestNoOutputFormatDateField(FieldValues):
"""
Values for `DateField` with no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.date(2001, 1, 1): datetime.date(2001, 1, 1)
}
field = serializers.DateField(format=None)
class TestDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField`.
"""
valid_inputs = {
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
'2001-01-01T13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
'2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
# Django 1.4 does not support timezone string parsing.
'2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC())
}
invalid_inputs = {
'abc': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
'2001-99-99T99:00': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
datetime.date(2001, 1, 1): ['Expected a datetime but got a date.'],
}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '2001-01-01T13:00:00',
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): '2001-01-01T13:00:00Z',
'2001-01-01T00:00:00': '2001-01-01T00:00:00',
six.text_type('2016-01-10T00:00:00'): '2016-01-10T00:00:00',
None: None,
'': None,
}
field = serializers.DateTimeField(default_timezone=timezone.UTC())
class TestCustomInputFormatDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField` with a custom input format.
"""
valid_inputs = {
'1:35pm, 1 Jan 2001': datetime.datetime(2001, 1, 1, 13, 35, tzinfo=timezone.UTC()),
}
invalid_inputs = {
'2001-01-01T20:50': ['Datetime has wrong format. Use one of these formats instead: hh:mm[AM|PM], DD [Jan-Dec] YYYY.']
}
outputs = {}
field = serializers.DateTimeField(default_timezone=timezone.UTC(), input_formats=['%I:%M%p, %d %b %Y'])
class TestCustomOutputFormatDateTimeField(FieldValues):
"""
Values for `DateTimeField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '01:00PM, 01 Jan 2001',
}
field = serializers.DateTimeField(format='%I:%M%p, %d %b %Y')
class TestNoOutputFormatDateTimeField(FieldValues):
"""
Values for `DateTimeField` with no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00),
}
field = serializers.DateTimeField(format=None)
class TestNaiveDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField` with naive datetimes.
"""
valid_inputs = {
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00),
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00),
}
invalid_inputs = {}
outputs = {}
field = serializers.DateTimeField(default_timezone=None)
class TestTimeField(FieldValues):
"""
Valid and invalid values for `TimeField`.
"""
valid_inputs = {
'13:00': datetime.time(13, 00),
datetime.time(13, 00): datetime.time(13, 00),
}
invalid_inputs = {
'abc': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
'99:99': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
}
outputs = {
datetime.time(13, 0): '13:00:00',
datetime.time(0, 0): '00:00:00',
'00:00:00': '00:00:00',
None: None,
'': None,
}
field = serializers.TimeField()
class TestCustomInputFormatTimeField(FieldValues):
"""
Valid and invalid values for `TimeField` with a custom input format.
"""
valid_inputs = {
'1:00pm': datetime.time(13, 00),
}
invalid_inputs = {
'13:00': ['Time has wrong format. Use one of these formats instead: hh:mm[AM|PM].'],
}
outputs = {}
field = serializers.TimeField(input_formats=['%I:%M%p'])
class TestCustomOutputFormatTimeField(FieldValues):
"""
Values for `TimeField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.time(13, 00): '01:00PM'
}
field = serializers.TimeField(format='%I:%M%p')
class TestNoOutputFormatTimeField(FieldValues):
"""
Values for `TimeField` with a no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.time(13, 00): datetime.time(13, 00)
}
field = serializers.TimeField(format=None)
class TestDurationField(FieldValues):
"""
Valid and invalid values for `DurationField`.
"""
valid_inputs = {
'13': datetime.timedelta(seconds=13),
'3 08:32:01.000123': datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
'08:01': datetime.timedelta(minutes=8, seconds=1),
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
3600: datetime.timedelta(hours=1),
}
invalid_inputs = {
'abc': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
'3 08:32 01.123': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
}
outputs = {
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): '3 08:32:01.000123',
}
field = serializers.DurationField()
# Choice types...
class TestChoiceField(FieldValues):
"""
Valid and invalid values for `ChoiceField`.
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'amazing': ['"amazing" is not a valid choice.']
}
outputs = {
'good': 'good',
'': '',
'amazing': 'amazing',
}
field = serializers.ChoiceField(
choices=[
('poor', 'Poor quality'),
('medium', 'Medium quality'),
('good', 'Good quality'),
]
)
def test_allow_blank(self):
"""
If `allow_blank=True` then '' is a valid input.
"""
field = serializers.ChoiceField(
allow_blank=True,
choices=[
('poor', 'Poor quality'),
('medium', 'Medium quality'),
('good', 'Good quality'),
]
)
output = field.run_validation('')
assert output == ''
def test_allow_null(self):
"""
If `allow_null=True` then '' on HTML forms is treated as None.
"""
field = serializers.ChoiceField(
allow_null=True,
choices=[
1, 2, 3
]
)
field.field_name = 'example'
value = field.get_value(QueryDict('example='))
assert value is None
output = field.run_validation(None)
assert output is None
def test_iter_options(self):
"""
iter_options() should return a list of options and option groups.
"""
field = serializers.ChoiceField(
choices=[
('Numbers', ['integer', 'float']),
('Strings', ['text', 'email', 'url']),
'boolean'
]
)
items = list(field.iter_options())
assert items[0].start_option_group
assert items[0].label == 'Numbers'
assert items[1].value == 'integer'
assert items[2].value == 'float'
assert items[3].end_option_group
assert items[4].start_option_group
assert items[4].label == 'Strings'
assert items[5].value == 'text'
assert items[6].value == 'email'
assert items[7].value == 'url'
assert items[8].end_option_group
assert items[9].value == 'boolean'
class TestChoiceFieldWithType(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses an integer type,
instead of a char type.
"""
valid_inputs = {
'1': 1,
3: 3,
}
invalid_inputs = {
5: ['"5" is not a valid choice.'],
'abc': ['"abc" is not a valid choice.']
}
outputs = {
'1': 1,
1: 1
}
field = serializers.ChoiceField(
choices=[
(1, 'Poor quality'),
(2, 'Medium quality'),
(3, 'Good quality'),
]
)
class TestChoiceFieldWithListChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a flat list for the
choices, rather than a list of pairs of (`value`, `description`).
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(choices=('poor', 'medium', 'good'))
class TestChoiceFieldWithGroupedChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a grouped list for the
choices, rather than a list of pairs of (`value`, `description`).
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(
choices=[
(
'Category',
(
('poor', 'Poor quality'),
('medium', 'Medium quality'),
),
),
('good', 'Good quality'),
]
)
class TestChoiceFieldWithMixedChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a single paired or
grouped.
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(
choices=[
(
'Category',
(
('poor', 'Poor quality'),
),
),
'medium',
('good', 'Good quality'),
]
)
class TestMultipleChoiceField(FieldValues):
"""
Valid and invalid values for `MultipleChoiceField`.
"""
valid_inputs = {
(): set(),
('aircon',): set(['aircon']),
('aircon', 'manual'): set(['aircon', 'manual']),
}
invalid_inputs = {
'abc': ['Expected a list of items but got type "str".'],
('aircon', 'incorrect'): ['"incorrect" is not a valid choice.']
}
outputs = [
(['aircon', 'manual', 'incorrect'], set(['aircon', 'manual', 'incorrect']))
]
field = serializers.MultipleChoiceField(
choices=[
('aircon', 'AirCon'),
('manual', 'Manual drive'),
('diesel', 'Diesel'),
]
)
def test_against_partial_and_full_updates(self):
field = serializers.MultipleChoiceField(choices=(('a', 'a'), ('b', 'b')))
field.partial = False
assert field.get_value(QueryDict({})) == []
field.partial = True
assert field.get_value(QueryDict({})) == rest_framework.fields.empty
class TestEmptyMultipleChoiceField(FieldValues):
"""
Invalid values for `MultipleChoiceField(allow_empty=False)`.
"""
valid_inputs = {
}
invalid_inputs = (
([], ['This selection may not be empty.']),
)
outputs = [
]
field = serializers.MultipleChoiceField(
choices=[
('consistency', 'Consistency'),
('availability', 'Availability'),
('partition', 'Partition tolerance'),
],
allow_empty=False
)
# File serializers...
class MockFile:
def __init__(self, name='', size=0, url=''):
self.name = name
self.size = size
self.url = url
def __eq__(self, other):
return (
isinstance(other, MockFile) and
self.name == other.name and
self.size == other.size and
self.url == other.url
)
class TestFileField(FieldValues):
"""
Values for `FileField`.
"""
valid_inputs = [
(MockFile(name='example', size=10), MockFile(name='example', size=10))
]
invalid_inputs = [
('invalid', ['The submitted data was not a file. Check the encoding type on the form.']),
(MockFile(name='example.txt', size=0), ['The submitted file is empty.']),
(MockFile(name='', size=10), ['No filename could be determined.']),
(MockFile(name='x' * 100, size=10), ['Ensure this filename has at most 10 characters (it has 100).'])
]
outputs = [
(MockFile(name='example.txt', url='/example.txt'), '/example.txt'),
('', None)
]
field = serializers.FileField(max_length=10)
class TestFieldFieldWithName(FieldValues):
"""
Values for `FileField` with a filename output instead of URLs.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = [
(MockFile(name='example.txt', url='/example.txt'), 'example.txt')
]
field = serializers.FileField(use_url=False)
# Stub out mock Django `forms.ImageField` class so we don't *actually*
# call into it's regular validation, or require PIL for testing.
class FailImageValidation(object):
def to_python(self, value):
raise serializers.ValidationError(self.error_messages['invalid_image'])
class PassImageValidation(object):
def to_python(self, value):
return value
class TestInvalidImageField(FieldValues):
"""
Values for an invalid `ImageField`.
"""
valid_inputs = {}
invalid_inputs = [
(MockFile(name='example.txt', size=10), ['Upload a valid image. The file you uploaded was either not an image or a corrupted image.'])
]
outputs = {}
field = serializers.ImageField(_DjangoImageField=FailImageValidation)
class TestValidImageField(FieldValues):
"""
Values for an valid `ImageField`.
"""
valid_inputs = [
(MockFile(name='example.txt', size=10), MockFile(name='example.txt', size=10))
]
invalid_inputs = {}
outputs = {}
field = serializers.ImageField(_DjangoImageField=PassImageValidation)
# Composite serializers...
class TestListField(FieldValues):
"""
Values for `ListField` with IntegerField as child.
"""
valid_inputs = [
([1, 2, 3], [1, 2, 3]),
(['1', '2', '3'], [1, 2, 3]),
([], [])
]
invalid_inputs = [
('not a list', ['Expected a list of items but got type "str".']),
([1, 2, 'error'], ['A valid integer is required.']),
({'one': 'two'}, ['Expected a list of items but got type "dict".'])
]
outputs = [
([1, 2, 3], [1, 2, 3]),
(['1', '2', '3'], [1, 2, 3])
]
field = serializers.ListField(child=serializers.IntegerField())
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
serializers.ListField(child=serializers.IntegerField(source='other'))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
def test_collection_types_are_invalid_input(self):
field = serializers.ListField(child=serializers.CharField())
input_value = ({'one': 'two'})
with pytest.raises(serializers.ValidationError) as exc_info:
field.to_internal_value(input_value)
assert exc_info.value.detail == ['Expected a list of items but got type "dict".']
class TestEmptyListField(FieldValues):
"""
Values for `ListField` with allow_empty=False flag.
"""
valid_inputs = {}
invalid_inputs = [
([], ['This list may not be empty.'])
]
outputs = {}
field = serializers.ListField(child=serializers.IntegerField(), allow_empty=False)
class TestUnvalidatedListField(FieldValues):
"""
Values for `ListField` with no `child` argument.
"""
valid_inputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
invalid_inputs = [
('not a list', ['Expected a list of items but got type "str".']),
]
outputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
field = serializers.ListField()
class TestDictField(FieldValues):
"""
Values for `ListField` with CharField as child.
"""
valid_inputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
]
invalid_inputs = [
({'a': 1, 'b': None}, ['This field may not be null.']),
('not a dict', ['Expected a dictionary of items but got type "str".']),
]
outputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
]
field = serializers.DictField(child=serializers.CharField())
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
serializers.DictField(child=serializers.CharField(source='other'))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
def test_allow_null(self):
"""
If `allow_null=True` then `None` is a valid input.
"""
field = serializers.DictField(allow_null=True)
output = field.run_validation(None)
assert output is None
class TestDictFieldWithNullChild(FieldValues):
"""
Values for `ListField` with allow_null CharField as child.
"""
valid_inputs = [
({'a': None, 'b': '2', 3: 3}, {'a': None, 'b': '2', '3': '3'}),
]
invalid_inputs = [
]
outputs = [
({'a': None, 'b': '2', 3: 3}, {'a': None, 'b': '2', '3': '3'}),
]
field = serializers.DictField(child=serializers.CharField(allow_null=True))
class TestUnvalidatedDictField(FieldValues):
"""
Values for `ListField` with no `child` argument.
"""
valid_inputs = [
({'a': 1, 'b': [4, 5, 6], 1: 123}, {'a': 1, 'b': [4, 5, 6], '1': 123}),
]
invalid_inputs = [
('not a dict', ['Expected a dictionary of items but got type "str".']),
]
outputs = [
({'a': 1, 'b': [4, 5, 6]}, {'a': 1, 'b': [4, 5, 6]}),
]
field = serializers.DictField()
class TestJSONField(FieldValues):
"""
Values for `JSONField`.
"""
valid_inputs = [
({
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': None
}, {
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': None
}),
]
invalid_inputs = [
({'a': set()}, ['Value must be valid JSON.']),
]
outputs = [
({
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': 3
}, {
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': 3
}),
]
field = serializers.JSONField()
def test_html_input_as_json_string(self):
"""
HTML inputs should be treated as a serialized JSON string.
"""
class TestSerializer(serializers.Serializer):
config = serializers.JSONField()
data = QueryDict(mutable=True)
data.update({'config': '{"a":1}'})
serializer = TestSerializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'config': {"a": 1}}
class TestBinaryJSONField(FieldValues):
"""
Values for `JSONField` with binary=True.
"""
valid_inputs = [
(b'{"a": 1, "3": null, "b": ["some", "list", true, 1.23]}', {
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': None
}),
]
invalid_inputs = [
('{"a": "unterminated string}', ['Value must be valid JSON.']),
]
outputs = [
(['some', 'list', True, 1.23], b'["some", "list", true, 1.23]'),
]
field = serializers.JSONField(binary=True)
# Tests for FieldField.
# ---------------------
class MockRequest:
def build_absolute_uri(self, value):
return 'http://example.com' + value
class TestFileFieldContext:
def test_fully_qualified_when_request_in_context(self):
field = serializers.FileField(max_length=10)
field._context = {'request': MockRequest()}
obj = MockFile(name='example.txt', url='/example.txt')
value = field.to_representation(obj)
assert value == 'http://example.com/example.txt'
# Tests for SerializerMethodField.
# --------------------------------
class TestSerializerMethodField:
def test_serializer_method_field(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.SerializerMethodField()
def get_example_field(self, obj):
return 'ran get_example_field(%d)' % obj['example_field']
serializer = ExampleSerializer({'example_field': 123})
assert serializer.data == {
'example_field': 'ran get_example_field(123)'
}
def test_redundant_method_name(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.SerializerMethodField('get_example_field')
with pytest.raises(AssertionError) as exc_info:
ExampleSerializer().fields
assert str(exc_info.value) == (
"It is redundant to specify `get_example_field` on "
"SerializerMethodField 'example_field' in serializer "
"'ExampleSerializer', because it is the same as the default "
"method name. Remove the `method_name` argument."
)
| {
"content_hash": "f4d599988d81f0247637427e63fce70e",
"timestamp": "",
"source": "github",
"line_count": 1860,
"max_line_length": 163,
"avg_line_length": 31.144086021505377,
"alnum_prop": 0.5831376881646182,
"repo_name": "uploadcare/django-rest-framework",
"id": "069ba879d4b5c4687f08e5852263e30cc9dd887c",
"size": "57928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_fields.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "10024"
},
{
"name": "HTML",
"bytes": "61792"
},
{
"name": "JavaScript",
"bytes": "6805"
},
{
"name": "Python",
"bytes": "1061728"
}
],
"symlink_target": ""
} |
from tartist.data.rflow import control, InputPipe
import time
q = InputPipe('tart.pipe.test')
with control(pipes=[q]):
for i in range(10):
print(q.get()['current'])
time.sleep(1)
| {
"content_hash": "ceb026088b15a1910cde61d23ef45c6e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 49,
"avg_line_length": 25,
"alnum_prop": 0.65,
"repo_name": "vacancy/TensorArtist",
"id": "4295f7ccbd9b1156c52b8ee34e77d7f1f758451f",
"size": "354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/remote_flow/in.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "497134"
},
{
"name": "Shell",
"bytes": "630"
}
],
"symlink_target": ""
} |
from flask import Blueprint, render_template, current_app, redirect, g, get_template_attribute
from flask_application.controllers import TemplateView
from flask_application.profiles.models import Profile
from flask_application.users.models import User
from flask.ext.mobility.decorators import mobilized
import re
users = Blueprint('users', __name__)
class ControlPanelView(TemplateView):
blueprint = users
def get(self):
user = self._get_current_user()
self.make_dropbox_coherent(user)
profiles = user.profiles
return render_template('users/controlpanel.html', user=user,
profiles=profiles, maximum_profiles=User.maximum_profiles)
@mobilized(get)
def get(self):
user = self._get_current_user()
profiles = user.profiles
return render_template('mobile/users/controlpanel.html', user=user,
profiles=profiles, maximum_profiles=User.maximum_profiles)
def create_new_refurence_handler(self, obj_response, content):
profile_name = content['name']
user = self._get_current_user()
err_macro = get_template_attribute('users/_macros.html', 'render_error')
if len(user.profiles) >= User.maximum_profiles:
err_html = err_macro('cannot create more refurences: maximum reached')
obj_response.html('#error-msg', err_html)
return
try:
ControlPanelView.validate_refurence_name(profile_name)
except Exception as e:
err_html = err_macro("error: " + str(e))
obj_response.html('#error-msg', err_html)
return
profile_name = profile_name.lower()
if not Profile.is_name_valid(profile_name):
err_html = err_macro('name: "' + profile_name + '" already taken')
obj_response.html('#error-msg', err_html)
return
profile = Profile(username=profile_name)
profile = Profile.initialize_to_default(profile)
profile.owner_email = user.email
profile.save()
profile.dropbox_root().share()
user.profiles.append(profile)
user.save()
profiles = user.profiles
profile_macro = get_template_attribute('users/_macros.html', 'render_profile_list')
obj_response.html('#error-msg', '')
obj_response.html('#profile-list', profile_macro(profiles,
User.maximum_profiles))
def delete_refurence_handler(self, obj_response, content):
profile_name = content['name']
err_macro = get_template_attribute('users/_macros.html', 'render_error')
if not profile_name:
err_html = err_macro('cannot delete blank profile')
obj_response.html('#error-msg', err_html)
return
try:
profile = Profile.objects.get(username=profile_name)
except Exception:
err_html = err_macro('could not find profile: ' + profile_name)
obj_response.html('#error-msg', err_html)
return
user = self._get_current_user()
for idx in range(0, len(user.profiles)):
p = user.profiles[idx]
if p == profile:
del user.profiles[idx]
break
user.save()
profile.delete()
profile.save()
profiles = user.profiles
profile_macro = get_template_attribute('users/_macros.html', 'render_profile_list')
obj_response.html('#error-msg', '')
obj_response.html('#profile-list', profile_macro(profiles,
User.maximum_profiles))
def make_dropbox_coherent(self, user):
refurences = current_app.dropbox.client.metadata('/')['contents']
for p in user.profiles:
pass
def register_sijax(self):
g.sijax.register_callback('create_new_refurence', self.create_new_refurence_handler)
g.sijax.register_callback('delete_refurence', self.delete_refurence_handler)
def _get_current_user(self):
if not current_app.dropbox.is_authenticated:
return redirect('404')
try:
dropbox_email = current_app.dropbox.account_info['email']
except Exception as e:
return redirect('404')
try:
user = User.objects.get(email=dropbox_email)
except Exception:
user = User(email=dropbox_email, username=dropbox_email)
user.save()
return user
@staticmethod
def validate_refurence_name(name):
if not name:
raise Exception('name cannot be empty')
if re.match('^[\w-]+$', name) is None:
raise Exception('name can only contain alphanumeric characters or dashes')
if len(name) > 80:
raise Exception('name is too long')
users.add_url_rule('/controlpanel/', view_func=ControlPanelView.as_view('controlpanel'))
| {
"content_hash": "083a1ae4c0a3b14b6fe129d06a829a11",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 94,
"avg_line_length": 35.63503649635037,
"alnum_prop": 0.6188037689471528,
"repo_name": "dbdeadbeat/refurence",
"id": "0da361c826a605a10b0b782d0797dffa9e6d7922",
"size": "4882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_application/users/controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "54963"
},
{
"name": "HTML",
"bytes": "88838"
},
{
"name": "JavaScript",
"bytes": "273639"
},
{
"name": "Python",
"bytes": "95517"
},
{
"name": "Shell",
"bytes": "49"
}
],
"symlink_target": ""
} |
'''
Created on Oct 1, 2016
@author: rtorres
'''
import datetime
from flaskiwsapp.database import SurrogatePK, Model, db, reference_col, relationship, Column
class Ticket(SurrogatePK, Model):
"""A ticket of the app."""
__tablename__ = 'tickets'
request_id = reference_col('requests', nullable=False, unique=True)
request = relationship('Request', backref='ticket', uselist=False)
user_id = reference_col('users', nullable=False)
user = relationship('User', backref='ticket')
detail = Column(db.Text(), nullable=False)
created_at = Column(db.DateTime(), default=datetime.datetime.utcnow)
def __init__(self, **kwargs):
"""Create instance."""
db.Model.__init__(self, **kwargs)
def __str__(self):
"""String representation of the tickets. Shows the id and id request."""
return '%s - %s' % (self.id, self.request_id)
def get_id(self):
return self.id
| {
"content_hash": "b4e28787aa315128d257227fac542794",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 92,
"avg_line_length": 30.193548387096776,
"alnum_prop": 0.6442307692307693,
"repo_name": "rafasis1986/EngineeringMidLevel",
"id": "06b1003d6d79e60c09e2a6a77a4ac335dada28cb",
"size": "936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flaskiwsapp/projects/models/ticket.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2736"
},
{
"name": "HTML",
"bytes": "56955"
},
{
"name": "JavaScript",
"bytes": "8835"
},
{
"name": "Mako",
"bytes": "436"
},
{
"name": "Nginx",
"bytes": "1468"
},
{
"name": "Python",
"bytes": "125961"
},
{
"name": "Shell",
"bytes": "7203"
},
{
"name": "TypeScript",
"bytes": "71432"
}
],
"symlink_target": ""
} |
import sys
import os
from optparse import OptionParser
from _version import __version__
from platypus.frontend import get_ast
from platypus.cfg import get_cfg
from platypus.simulator import get_ir
class Shell:
def __init__(self, source_filename):
try:
source_file = open(source_filename, 'r')
except IOError:
print 'error in opening file ', source_filename
quit()
source = ""
for line in source_file:
source += line
func_ast = get_ast(source)
func_cfg = get_cfg(func_ast)
try:
os.remove("parser.out")
os.remove("parsetab.py")
os.remove("parsetab.pyc")
except:
pass
self.func_sim = get_ir(func_cfg)
def run(self):
self.running = True
while self.running is True:
cmd = raw_input('platypus $ ')
self.handle_cmd(cmd)
def handle_cmd(self, cmd):
if (cmd == 'exit'):
self.running = False
elif (cmd == ''):
self.running = True
elif (cmd == 'summary'):
print ''
print self.func_sim.summary
print ''
elif (cmd == 'help'):
print ''
print 'summary : show summary of function as written in platypus program'
print 'exit : exit simulator'
print 'asm : display intermediate code that is being simulated'
print 'clear : clear screen'
print ''
elif (cmd == 'clear'):
os.system('clear')
elif (cmd == 'asm'):
print ''
self.func_sim.show()
print ''
else:
input_values_string = cmd.split(' ')
input_values = []
valid_input = True
for value_string in input_values_string:
try:
input_values.append(int(value_string))
except:
valid_input = False
if valid_input:
if len(input_values) == len(self.func_sim.input_variables):
print self.func_sim.execute(input_values)
else:
print 'expected ', len(self.func_sim.input_variables), ' arguments'
else:
print 'please enter only integers as arguments'
def run():
parser = OptionParser("usage: %platypus [options] filename",
version=__version__)
parser.add_option("-i", "--input_file", action="store", dest="source_filename",
default="source.platypus", help="enter path of the source code ")
(args, options) = parser.parse_args()
if args.source_filename == "source.platypus":
print 'please mention source path'
shell = Shell(args.source_filename)
shell.run()
if __name__ == "__main__":
run()
| {
"content_hash": "951e4532d41070f1361663b0378be320",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 87,
"avg_line_length": 31.021505376344088,
"alnum_prop": 0.5237435008665511,
"repo_name": "anayjoshi/platypus",
"id": "8c4793e30a727fa5b0925a04b7f32c509eb35f35",
"size": "2885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "platypus/shell.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "54768"
},
{
"name": "Shell",
"bytes": "6705"
}
],
"symlink_target": ""
} |
import os
import shutil
from tests import add, TestCase
from mutagen.easymp4 import EasyMP4, error as MP4Error
from tempfile import mkstemp
class TEasyMP4(TestCase):
def setUp(self):
fd, self.filename = mkstemp('.mp4')
os.close(fd)
empty = os.path.join('tests', 'data', 'has-tags.m4a')
shutil.copy(empty, self.filename)
self.mp4 = EasyMP4(self.filename)
self.mp4.delete()
def test_pprint(self):
self.mp4["artist"] = "baz"
self.mp4.pprint()
def test_has_key(self):
self.failIf(self.mp4.has_key("foo"))
def test_empty_file(self):
empty = os.path.join('tests', 'data', 'emptyfile.mp3')
self.assertRaises(MP4Error, EasyMP4, filename=empty)
def test_nonexistent_file(self):
empty = os.path.join('tests', 'data', 'does', 'not', 'exist')
self.assertRaises(IOError, EasyMP4, filename=empty)
def test_write_single(self):
for key in EasyMP4.Get:
if key in ["tracknumber", "discnumber", "date", "bpm"]:
continue
# Test creation
self.mp4[key] = "a test value"
self.mp4.save(self.filename)
mp4 = EasyMP4(self.filename)
self.failUnlessEqual(mp4[key], ["a test value"])
self.failUnlessEqual(mp4.keys(), [key])
# And non-creation setting.
self.mp4[key] = "a test value"
self.mp4.save(self.filename)
mp4 = EasyMP4(self.filename)
self.failUnlessEqual(mp4[key], ["a test value"])
self.failUnlessEqual(mp4.keys(), [key])
del(self.mp4[key])
def test_write_double(self):
for key in EasyMP4.Get:
if key in ["tracknumber", "discnumber", "date", "bpm"]:
continue
self.mp4[key] = ["a test", "value"]
self.mp4.save(self.filename)
mp4 = EasyMP4(self.filename)
self.failUnlessEqual(mp4.get(key), ["a test", "value"])
self.failUnlessEqual(mp4.keys(), [key])
self.mp4[key] = ["a test", "value"]
self.mp4.save(self.filename)
mp4 = EasyMP4(self.filename)
self.failUnlessEqual(mp4.get(key), ["a test", "value"])
self.failUnlessEqual(mp4.keys(), [key])
del(self.mp4[key])
def test_write_date(self):
self.mp4["date"] = "2004"
self.mp4.save(self.filename)
mp4 = EasyMP4(self.filename)
self.failUnlessEqual(mp4["date"], ["2004"])
self.mp4["date"] = "2004"
self.mp4.save(self.filename)
mp4 = EasyMP4(self.filename)
self.failUnlessEqual(mp4["date"], ["2004"])
def test_date_delete(self):
self.mp4["date"] = "2004"
self.failUnlessEqual(self.mp4["date"], ["2004"])
del(self.mp4["date"])
self.failIf("date" in self.mp4)
def test_write_date_double(self):
self.mp4["date"] = ["2004", "2005"]
self.mp4.save(self.filename)
mp4 = EasyMP4(self.filename)
self.failUnlessEqual(mp4["date"], ["2004", "2005"])
self.mp4["date"] = ["2004", "2005"]
self.mp4.save(self.filename)
mp4 = EasyMP4(self.filename)
self.failUnlessEqual(mp4["date"], ["2004", "2005"])
def test_write_invalid(self):
self.failUnlessRaises(ValueError, self.mp4.__getitem__, "notvalid")
self.failUnlessRaises(ValueError, self.mp4.__delitem__, "notvalid")
self.failUnlessRaises(
ValueError, self.mp4.__setitem__, "notvalid", "tests")
def test_numeric(self):
for tag in ["bpm"]:
self.mp4[tag] = "3"
self.failUnlessEqual(self.mp4[tag], ["3"])
self.mp4.save()
mp4 = EasyMP4(self.filename)
self.failUnlessEqual(mp4[tag], ["3"])
del(mp4[tag])
self.failIf(tag in mp4)
self.failUnlessRaises(KeyError, mp4.__delitem__, tag)
self.failUnlessRaises(
ValueError, self.mp4.__setitem__, tag, "hello")
def test_numeric_pairs(self):
for tag in ["tracknumber", "discnumber"]:
self.mp4[tag] = "3"
self.failUnlessEqual(self.mp4[tag], ["3"])
self.mp4.save()
mp4 = EasyMP4(self.filename)
self.failUnlessEqual(mp4[tag], ["3"])
del(mp4[tag])
self.failIf(tag in mp4)
self.failUnlessRaises(KeyError, mp4.__delitem__, tag)
self.mp4[tag] = "3/10"
self.failUnlessEqual(self.mp4[tag], ["3/10"])
self.mp4.save()
mp4 = EasyMP4(self.filename)
self.failUnlessEqual(mp4[tag], ["3/10"])
del(mp4[tag])
self.failIf(tag in mp4)
self.failUnlessRaises(KeyError, mp4.__delitem__, tag)
self.failUnlessRaises(
ValueError, self.mp4.__setitem__, tag, "hello")
def tearDown(self):
os.unlink(self.filename)
add(TEasyMP4)
| {
"content_hash": "4d3c48a07b596f198db9d1efa611d733",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 75,
"avg_line_length": 33.61744966442953,
"alnum_prop": 0.5546017169095628,
"repo_name": "hanvo/MusicCloud",
"id": "8c9d7801dff071fee5d6e2b5e93d73ec221c8106",
"size": "5009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Crawler/Install Files/mutagen-1.22/tests/test_easymp4.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2593783"
},
{
"name": "C++",
"bytes": "27120"
},
{
"name": "CSS",
"bytes": "18398"
},
{
"name": "D",
"bytes": "4176145"
},
{
"name": "Java",
"bytes": "654098"
},
{
"name": "JavaScript",
"bytes": "3992"
},
{
"name": "Objective-C",
"bytes": "530579"
},
{
"name": "PHP",
"bytes": "5512"
},
{
"name": "Python",
"bytes": "6888570"
},
{
"name": "Shell",
"bytes": "4583"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from demo_app.models import Post
class PostTests(APITestCase):
def test_post_list(self):
url = reverse('post_list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_create(self):
url = reverse('post_list')
data = {'title': 'new post', 'body': 'awesome post'}
response = self.client.post(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_post_get(self):
Post.objects.create(**{'title': 'new post', 'body': 'awesome post'})
url = reverse('post_detail', kwargs={'pk': 1})
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.content, '{"id": 1, "title": "new post", "body": "awesome post"}')
def test_post_update(self):
Post.objects.create(**{'title': 'new post', 'body': 'awesome post'})
url = reverse('post_detail', kwargs={'pk': 1})
data = {'title': 'new post updated', 'body': 'awesome post updated'}
response = self.client.put(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.content, '{"id": 1, "title": "new post updated", "body": "awesome post updated"}')
def test_post_delete(self):
Post.objects.create(**{'title': 'new post', 'body': 'awesome post'})
url = reverse('post_detail', kwargs={'pk': 1})
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(Post.objects.filter(pk=1).count() == 0)
| {
"content_hash": "944006c2c6a86e086f46da977021024c",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 116,
"avg_line_length": 48.05128205128205,
"alnum_prop": 0.6392742796157951,
"repo_name": "chris-ramon/django_angularjs_demo_fbv",
"id": "d6ff54131307742146cf7825cebeeeeaf6763400",
"size": "1874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/demo_app/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1411"
},
{
"name": "JavaScript",
"bytes": "15258"
},
{
"name": "Python",
"bytes": "13906"
}
],
"symlink_target": ""
} |
from django.db import models
from helpers import create_uid,roll
class Author(models.Model):
access_token = models.CharField(max_length=18,null=True)
last_access = models.DateTimeField(auto_now=True)
username = models.CharField(max_length=50,primary_key=True)
password = models.CharField(max_length=32)
character = models.ForeignKey('Character',null=True)
class Item(models.Model):
uid = models.CharField(max_length=12,default=create_uid(),primary_key=True)
health = models.IntegerField(default=990)
power = models.IntegerField(default=10)
otype = models.IntegerField(default=-1)
mtype = models.IntegerField(default=-1)
name = models.CharField(max_length=50)
value = models.IntegerField(default=0)
def data(self):
return {'name':self.name,'hp':self.health,'pow':self.power,'otype':self.otype,
'mtype':self.mtype,'val':self.value,'uid':self.uid}
class Spell(models.Model):
stype = models.IntegerField(default=-1)
name = models.CharField(max_length=50)
power = models.IntegerField(default=0)
class Creature(models.Model):
uid = models.CharField(max_length=12,default=create_uid(),primary_key=True)
name = models.CharField(max_length=25)
health = models.IntegerField(default=10)
power = models.IntegerField(default=2)
def data(self):
return {'name':self.name,'hp':self.hp,'power':self.power,'uid':self.uid}
class Character(models.Model):
uid = models.CharField(max_length=12,default=create_uid(),primary_key=True)
name = models.CharField(max_length=25)
'''race = models.CharField(max_length=10)
x = models.IntegerField(default=0)
y = models.IntegerField(default=0)'''
st = models.IntegerField(default=1)
dx = models.IntegerField(default=1)
iq = models.IntegerField(default=1)
ht = models.IntegerField(default=1)
xp = models.IntegerField(default=100)
hp = models.IntegerField(default=10)
gold = models.IntegerField(default=50)
state = models.IntegerField(default=0)
timer = models.DateTimeField(null=True)
left = models.ForeignKey(Item, related_name='+',null=True)
right = models.ForeignKey(Item, related_name='+',null=True)
armor = models.ForeignKey(Item, related_name='+',null=True)
inventory = models.ManyToManyField(Item)
place = models.ForeignKey('Place')
def data(self):
tdata = self.timer.strftime('%d/%m/%Y %H:%M') if self.timer else 'None'
lhand = self.left.uid if self.left else 'None'
rhand = self.right.uid if self.right else 'None'
armor = self.armor.uid if self.armor else 'None'
return {'name':self.name,'hp':self.hp,'stats':(self.st,self.dx,self.iq,self.ht),'gold':self.gold,
'state':self.state,'timer':tdata,'lhand':lhand,
'rhand':rhand,'armor':armor,'place':self.place.uid,'uid':self.uid}
class Place(models.Model):
uid = models.CharField(max_length=12,default=create_uid(),primary_key=True)
name = models.CharField(max_length=50)
people = models.ManyToManyField(Character,related_name='people')
arena = models.BooleanField(default=False)
gates = models.ManyToManyField('Place',related_name='gates',symmetrical=True)
biome = models.CharField(default='000000',max_length=6)
def data(self):
return {'name':self.name,'people':[p.uid for p in people],'isarena':self.arena,'biome':self.biome,'uid':self.uid} | {
"content_hash": "73454918a60229b08f47b20ebae42170",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 121,
"avg_line_length": 41.89156626506024,
"alnum_prop": 0.6804716709807305,
"repo_name": "hgeg/nightblade-prelude",
"id": "0c5a5455fe221db91cae2dbbed7831b7282f7f8b",
"size": "3477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nightblade_prelude_server/prelude/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "134"
},
{
"name": "Python",
"bytes": "19843"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import time
from raiden.settings import (
DEFAULT_SETTLE_TIMEOUT,
DEFAULT_POLL_TIMEOUT,
DEFAULT_REVEAL_TIMEOUT,
)
from raiden.tests.utils.tester_client import (
BlockChainServiceTesterMock,
)
from raiden.tests.fixtures.tester import (
tester_blockgas_limit,
tester_channelmanager_library_address,
tester_nettingchannel_library_address,
tester_registry_address,
tester_state,
)
from raiden.network.transport import UDPTransport
from raiden.tests.utils.network import create_network
from raiden.utils import sha3
from raiden.tests.benchmark.utils import (
print_serialization,
print_slow_function,
print_slow_path,
)
def transfer_speed(num_transfers=100, max_locked=100): # pylint: disable=too-many-locals
channels_per_node = 1
num_nodes = 2
num_tokens = 1
private_keys = [
sha3('speed:{}'.format(position))
for position in range(num_nodes)
]
tokens = [
sha3('token:{}'.format(number))[:20]
for number in range(num_tokens)
]
amounts = [
a % 100 + 1
for a in range(1, num_transfers + 1)
]
deposit = sum(amounts)
secrets = [
str(i)
for i in range(num_transfers)
]
blockchain_services = list()
tester = tester_state(
private_keys[0],
private_keys,
tester_blockgas_limit(),
)
nettingchannel_library_address = tester_nettingchannel_library_address(
tester_state,
)
channelmanager_library_address = tester_channelmanager_library_address(
tester_state,
nettingchannel_library_address,
)
registry_address = tester_registry_address(
tester_state,
channelmanager_library_address,
)
for privkey in private_keys:
blockchain = BlockChainServiceTesterMock(
privkey,
tester,
registry_address,
)
blockchain_services.append(blockchain)
registry = blockchain_services[0].registry(registry_address)
for token in tokens:
registry.add_token(token)
apps = create_network(
private_keys,
tokens,
registry_address,
channels_per_node,
deposit,
DEFAULT_SETTLE_TIMEOUT,
DEFAULT_POLL_TIMEOUT,
UDPTransport,
BlockChainServiceTesterMock,
)
app0, app1 = apps # pylint: disable=unbalanced-tuple-unpacking
channel0 = app0.raiden.channelgraphs[tokens[0]].address_channel.values()[0]
channel1 = app1.raiden.channelgraphs[tokens[0]].address_channel.values()[0]
expiration = app0.raiden.chain.block_number() + DEFAULT_REVEAL_TIMEOUT + 3
start = time.time()
for i, amount in enumerate(amounts):
hashlock = sha3(secrets[i])
locked_transfer = channel0.create_lockedtransfer(
amount=amount,
identifier=1, # TODO: fill in identifier
expiration=expiration,
hashlock=hashlock,
)
app0.raiden.sign(locked_transfer)
channel0.register_transfer(locked_transfer)
channel1.register_transfer(locked_transfer)
if i > max_locked:
idx = i - max_locked
secret = secrets[idx]
channel0.register_secret(secret)
channel1.register_secret(secret)
elapsed = time.time() - start
print('%d transfers per second' % (num_transfers / elapsed))
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--transfers', default=10000, type=int)
parser.add_argument('--max-locked', default=100, type=int)
parser.add_argument('-p', '--profile', default=False, action='store_true')
args = parser.parse_args()
if args.profile:
import GreenletProfiler
GreenletProfiler.set_clock_type('cpu')
GreenletProfiler.start()
transfer_speed(
num_transfers=args.transfers,
max_locked=args.max_locked,
)
if args.profile:
GreenletProfiler.stop()
stats = GreenletProfiler.get_func_stats()
pstats = GreenletProfiler.convert2pstats(stats)
print_serialization(pstats)
print_slow_path(pstats)
print_slow_function(pstats)
pstats.sort_stats('time').print_stats()
# stats.save('profile.callgrind', type='callgrind')
if __name__ == '__main__':
main()
| {
"content_hash": "35e534af5b62a019493a79e2a3a029fb",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 89,
"avg_line_length": 27.304347826086957,
"alnum_prop": 0.6392174704276615,
"repo_name": "charles-cooper/raiden",
"id": "9fedfa511374106ff010992663152b4377b276d9",
"size": "4420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raiden/tests/benchmark/speed_transfer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5202"
},
{
"name": "Python",
"bytes": "952454"
},
{
"name": "Shell",
"bytes": "4384"
}
],
"symlink_target": ""
} |
import re
from collections import deque
from collections import defaultdict
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.remote.webdriver import WebDriver
def analyze(url=None, driver=None, mode="all", unique=False):
"""
Analyze a given webpage and return list of elements with the same actions.
Raise ValueError if url is None and condition equals True.
:param url: url of website that should be analyzed for same action elements. If none provided, loaded page of driver is used instead.
:type url: str
:param driver: selenium driver with or without loaded page
:type driver: WebDriver
:param mode: parameter to define what elements of loaded page should be scanned "all": Analyze all, "href": analyze only href links, "onclick": analyze only onclick location change
:type mode: string
:param unique: If True, analyze would also return elements with actions which occurred only once on loaded page
:type unique: bool
:return: defaultdict with elements with same actions. Key is target href and values are deque of xpaths.
:rtype: defaultdict(deque)
:raises: ValueError
"""
def get_xpath(node, url):
"""
:param node: WebElement took from selenium
:type node: WebElement
:return: xpath to a node
:rtype: str
"""
id = node.get_attribute("id")
if id:
return '//*[@id="%s"]' % id
xnodes = [node.get_attribute("href"), node.tag_name]
target = node
while node.tag_name != "html":
node = node.find_element_by_xpath('..')
xnodes.append(node.tag_name)
xpath = []
for n, i in enumerate(reversed(xnodes)):
if n < len(xnodes) - 1:
xpath.append("/%s" % i)
else:
if i is not None:
xpath.append("[contains(@href, \"%s\")]" % i.replace(url, ''))
xpath = "".join(xpath)
test = node.find_elements_by_xpath(xpath)
if len(test) == 1:
return xpath
return get_exact_xpath(target)
def get_exact_xpath(node, force_children=False):
"""
:param node: WebElement took from selenium
:type node: WebElement
:param force_children: Forces function to perform full scan of childrens to get xpath.
:type force_children: bool
:return: xpath to a unique node
:rtype: str
"""
path = deque()
check_children = True
last_tag = node.tag_name
array_used = False
while node.tag_name != 'html':
p = node.find_element_by_xpath("..")
if check_children or force_children:
d = defaultdict(int)
elements = p.find_elements_by_xpath(last_tag)
if len(elements) > 1:
array_used = True
for j in elements:
d[j.tag_name] += 1
if j.text == node.text and j.location == node.location:
path.appendleft("%s%s" % (j.tag_name, "[%s]" % d[j.tag_name]))
break
else:
path.appendleft(elements[0].tag_name)
if p.tag_name != 'html' and array_used: check_children = False
last_tag = p.tag_name
del d
else: path.appendleft(node.tag_name)
node = p
path.appendleft(node.tag_name)
return "/" + "/".join(path)
#------------------------------------------------------
def decorate_url(url, link):
"""
:param url: loaded page url
:type url: string
:param link: link target
:type link: string
:return: link target with loaded page domain
:rtype: str
"""
if not link.startswith("http://"):
if url.endswith("/"):
link = url + link
else:
link = url.rsplit('/', 1)[0] + "/" + link
return link
#------------------------------------------------------
selfdriver = False
if driver is None:
# no parameter provided, create the default driver
driver = webdriver.Chrome()
selfdriver = True
condition = True
if isinstance(driver, webdriver.Chrome):
condition = driver.current_url == u'data:,'
elif isinstance(driver, webdriver.Firefox):
condition = driver.current_url == u'about:blank'
if url is None and condition:
raise ValueError("Provided URL is empty!")
elif url:
driver.get(url)
# when server does a redirect the url is mismatched with actual site
url = driver.current_url
links = defaultdict(deque)
if mode is "all" or mode is "href":
nodes = driver.find_elements_by_tag_name('a')
for node in nodes:
links[node.get_attribute('href')].append(get_xpath(node, url))
if mode is "all" or mode is "onclick":
onclicks = driver.find_elements_by_xpath('//*[@onclick]')
for script in onclicks:
found = re.findall(r"location[ ]*=[ ]*'[^']+'", script.get_attribute('onclick'))
for loc in found:
href = loc.split("'")
links[decorate_url(url, href[1])].append(get_xpath(script, url))
found = re.findall(r'location[ ]*=[ ]*"[^"]+"', script.get_attribute('onclick'))
for loc in found:
href = loc.split('"')
links[decorate_url(url, href[1])].append(get_xpath(script, url))
if selfdriver:
driver.quit()
if not unique:
removable = []
for key in links:
if len(links[key]) < 2: removable.append(key)
for key in removable:
del links[key]
return links
def get_same(url=None, driver=None, id=None, xpath=None, mode="all"):
"""
Analyze a given webpage and return a deque with elements that have the same action as the one in id/xpath.
Raise ValueError if url is None and condition equals True.
Raise ValueError if both id and xpath are None.
Raise ValueError if href is None.
Raise ValueError if it can't find id
Raise ValueError if it can't find xpath target
Raise ValueError if id and xpath is None.
:param url: url of website that should be analyzed for same action elements. If none provided, loaded page of driver is used instead.
:type url: str
:param driver: selenium driver with or without loaded page
:type driver: WebDriver
:param id: id of an element in a webpage
:type id: str
:param xpath: xpath to an element in a webpage (using selenium notation)
:type xpath: str
:param mode: parameter to define what elements of loaded page should be scanned "all": Analyze all, "href": analyze only href links, "onclick": analyze only onclick location change
:type mode: string
:return: deque with elements with the same actions as the one in parameters
:rtype: deque
:raises: ValueError
"""
selfdriver = False
if driver is None:
# no parameter provided, create the default driver
driver = webdriver.Chrome()
selfdriver = True
condition = True
if isinstance(driver, webdriver.Chrome):
condition = driver.current_url == u'data:,'
elif isinstance(driver, webdriver.Firefox):
condition = driver.current_url == u'about:blank'
if url is None and condition:
raise ValueError("Provided URL is empty!")
elif url:
driver.get(url)
# when server does a redirect the url is mismatched with actual site
url = driver.current_url
if id is not None:
try:
element = driver.find_element_by_id(id)
except NoSuchElementException:
raise ValueError("Id not found.")
elif xpath is not None:
try:
element = driver.find_element_by_xpath(xpath)
except NoSuchElementException:
raise ValueError("Xpath target not found.")
else:
raise ValueError("No id or xpath were provided.")
href = element.get_attribute('href')
if href is None:
found = re.findall(r"location[ ]*=[ ]*'[^']+'", element.get_attribute('onclick'))
for loc in found:
splited = loc.split("'")
href = splited[1]
if href is None:
found = re.findall(r'location[ ]*=[ ]*"[^"]+"', element.get_attribute('onclick'))
for loc in found:
splited = loc.split('"')
href = splited[1]
if href is None:
raise ValueError("Selected element is not a link.")
if not href.startswith("http://"):
href = url + href
links = analyze(url, driver, mode, True)
same = links[href]
if selfdriver:
driver.quit()
return same
| {
"content_hash": "a359eafd55fdb66522c224af54434dbf",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 184,
"avg_line_length": 35.80859375,
"alnum_prop": 0.5669248390967602,
"repo_name": "perfidia/selesame",
"id": "c9fdc57034f0edec1edf694ce808395917f89803",
"size": "9215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/selesame.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24757"
},
{
"name": "Shell",
"bytes": "5100"
}
],
"symlink_target": ""
} |
import unittest
from pathlib import Path
from striprtf.striprtf import rtf_to_text
RTF_DIR = Path.cwd() / "tests" / "rtf"
TEXT_DIR = Path.cwd() / "tests" / "text"
class TestUnicodeJapanese(unittest.TestCase):
def test_empty(self):
example_rtf = RTF_DIR / "issue_15.rtf"
example_txt = TEXT_DIR / "issue_15.txt"
with example_rtf.open() as source:
result = rtf_to_text(source.read(), errors="ignore")
with example_txt.open(encoding="utf-8") as destination:
self.assertEqual(destination.read(), result)
| {
"content_hash": "b3af056d0d4ad4c271c70115897208ff",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 64,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.6471631205673759,
"repo_name": "joshy/striprtf",
"id": "4d41be63919fda930c9ec57886bf8d48af0b805c",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_issue_15.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "23378"
},
{
"name": "Rich Text Format",
"bytes": "3039954"
}
],
"symlink_target": ""
} |
import os
import sys
import tempfile
import unittest
# Mutates sys.path.
import test_env
from utils import file_path
from utils import fs
def write_content(path, content):
with fs.open(path, 'wb') as f:
f.write(content)
class FSTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not file_path.enable_symlink():
raise Exception('Failed to enable symlink support')
def setUp(self):
super(FSTest, self).setUp()
self._tempdir = None
def tearDown(self):
try:
if self._tempdir:
file_path.rmtree(self._tempdir)
finally:
super(FSTest, self).tearDown()
@property
def tempdir(self):
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(prefix='fs_test')
return self._tempdir
def test_symlink_relative(self):
# A symlink to a relative path is valid.
# /dir
# /dir/file
# /ld -> /dir
# /lf -> /ld/file
dirpath = os.path.join(self.tempdir, 'dir')
filepath = os.path.join(dirpath, 'file')
fs.mkdir(dirpath)
write_content(filepath, b'hello')
linkfile = os.path.join(self.tempdir, 'lf')
linkdir = os.path.join(self.tempdir, 'ld')
dstfile = os.path.join('ld', 'file')
fs.symlink(dstfile, linkfile)
fs.symlink('dir', linkdir)
self.assertEqual(True, fs.islink(linkfile))
self.assertEqual(True, fs.islink(linkdir))
self.assertEqual(dstfile, fs.readlink(linkfile))
self.assertEqual('dir', fs.readlink(linkdir))
self.assertEqual(['file'], fs.listdir(linkdir))
# /lf resolves to /dir/file.
with fs.open(linkfile) as f:
self.assertEqual('hello', f.read())
# Ensures that followlinks is respected in walk().
expected = [
(self.tempdir, ['dir', 'ld'], ['lf']),
(dirpath, [], ['file']),
]
actual = [
(r, sorted(d), sorted(f))
for r, d, f in sorted(fs.walk(self.tempdir, followlinks=False))
]
self.assertEqual(expected, actual)
expected = [
(self.tempdir, ['dir', 'ld'], ['lf']),
(dirpath, [], ['file']),
(linkdir, [], ['file']),
]
actual = [
(r, sorted(d), sorted(f))
for r, d, f in sorted(fs.walk(self.tempdir, followlinks=True))
]
self.assertEqual(expected, actual)
def test_symlink_absolute(self):
# A symlink to an absolute path is valid.
# /dir
# /dir/file
# /ld -> /dir
# /lf -> /ld/file
dirpath = os.path.join(self.tempdir, 'dir')
filepath = os.path.join(dirpath, 'file')
fs.mkdir(dirpath)
write_content(filepath, b'hello')
linkfile = os.path.join(self.tempdir, 'lf')
linkdir = os.path.join(self.tempdir, 'ld')
dstfile = os.path.join(linkdir, 'file')
fs.symlink(dstfile, linkfile)
fs.symlink(dirpath, linkdir)
self.assertEqual(True, fs.islink(linkfile))
self.assertEqual(True, fs.islink(linkdir))
self.assertEqual(dstfile, fs.readlink(linkfile))
self.assertEqual(dirpath, fs.readlink(linkdir))
self.assertEqual(['file'], fs.listdir(linkdir))
# /lf resolves to /dir/file.
with fs.open(linkfile) as f:
self.assertEqual('hello', f.read())
# Ensures that followlinks is respected in walk().
expected = [
(self.tempdir, ['dir', 'ld'], ['lf']),
(dirpath, [], ['file']),
]
actual = [
(r, sorted(d), sorted(f))
for r, d, f in sorted(fs.walk(self.tempdir, followlinks=False))
]
self.assertEqual(expected, actual)
expected = [
(self.tempdir, ['dir', 'ld'], ['lf']),
(dirpath, [], ['file']),
(linkdir, [], ['file']),
]
actual = [
(r, sorted(d), sorted(f))
for r, d, f in sorted(fs.walk(self.tempdir, followlinks=True))
]
self.assertEqual(expected, actual)
def test_symlink_missing_destination_rel(self):
# A symlink to a missing destination is valid and can be read back.
filepath = 'file'
linkfile = os.path.join(self.tempdir, 'lf')
fs.symlink(filepath, linkfile)
self.assertEqual(True, fs.islink(linkfile))
self.assertEqual(filepath, fs.readlink(linkfile))
def test_symlink_missing_destination_abs(self):
# A symlink to a missing destination is valid and can be read back.
filepath = os.path.join(self.tempdir, 'file')
linkfile = os.path.join(self.tempdir, 'lf')
fs.symlink(filepath, linkfile)
self.assertEqual(True, fs.islink(linkfile))
self.assertEqual(filepath, fs.readlink(linkfile))
def test_symlink_existing(self):
# Creating a symlink that overrides a file fails.
filepath = os.path.join(self.tempdir, 'file')
linkfile = os.path.join(self.tempdir, 'lf')
write_content(linkfile, b'hello')
with self.assertRaises(OSError):
fs.symlink(filepath, linkfile)
def test_readlink_fail(self):
# Reading a non-existing symlink fails. Obvious but it's to make sure the
# Windows part acts the same.
with self.assertRaises(OSError):
fs.readlink(os.path.join(self.tempdir, 'not_there'))
def test_remove_invalid_symlink(self):
src = os.path.join(self.tempdir, 'src')
dst = os.path.join(self.tempdir, 'dst')
os.symlink(src, dst) # This is invalid symlink.
fs.remove(dst)
if __name__ == '__main__':
test_env.main()
| {
"content_hash": "554bd5145384ae61294521c200adfcad",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 77,
"avg_line_length": 29.674285714285713,
"alnum_prop": 0.6323897554400154,
"repo_name": "luci/luci-py",
"id": "897436ce8fa55a3a5a7fcb8893fb7e737cf2cb02",
"size": "5406",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "client/tests/fs_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5576"
},
{
"name": "HTML",
"bytes": "1900972"
},
{
"name": "JavaScript",
"bytes": "113046"
},
{
"name": "Makefile",
"bytes": "11718"
},
{
"name": "Python",
"bytes": "5885612"
},
{
"name": "Shell",
"bytes": "5183"
}
],
"symlink_target": ""
} |
"""(Re)installs pyxb compiling the opengis bundle in order to access OGC
binding classes.
"""
import argparse
import logging
import os
import shlex
import shutil
import subprocess
import tarfile
import tempfile
import pathlib2
try:
from pyxb.bundles import opengis
PYXB_AVAILABLE = True
except ImportError as err:
PYXB_AVAILABLE = False
logger = logging.getLogger(__name__)
def get_parser():
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument("--verbose", "-v", action="store_true")
parser.add_argument("--force", "-f", action="store_true",
help="Reinstall PyXB even if it is already "
"installed with the OGC bindings")
return parser
def main():
parser = get_parser()
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.verbose else logging.WARNING)
if not PYXB_AVAILABLE or args.force:
logger.debug("Installing PyXB with the OGC bindings...")
download_dir = pathlib2.Path(tempfile.mkdtemp())
try:
install_pyxb(download_dir)
except Exception as err:
print(err)
else:
print("Successfully installed PyXB with the opengis bundle!")
finally:
shutil.rmtree(str(download_dir))
else:
logger.debug("PyXB is already installed with the OGC bindings. "
"Use the -f flag if you want to force re-installation.")
def install_pyxb(download_dir):
download_command = "pip download {} --dest {}".format(
_get_declared_pyxb_version(),
str(download_dir)
)
logger.debug("download_command: {}".format(download_command))
subprocess.check_call(shlex.split(download_command))
for sub_path in download_dir.iterdir():
if sub_path.is_file() and "PYXB" in sub_path.name.upper():
downloaded = sub_path
break
else:
raise RuntimeError("Could not download pyxb to the "
"temporary directory")
_untar_file(downloaded, download_dir)
pyxb_dir = str(download_dir / downloaded.name.replace(".tar.gz", ""))
env = os.environ.copy()
env["PYXB_ROOT"] = pyxb_dir
genbundles_command = ("/bin/bash {}/maintainer/genbundles common wssplat "
"saml20 dc opengis".format(pyxb_dir))
subprocess.check_call(shlex.split(genbundles_command),
cwd=pyxb_dir, env=env)
subprocess.check_call(["python", "setup.py", "install"],
cwd=pyxb_dir, env=env)
def _get_declared_pyxb_version():
return "pyxb"
def _untar_file(path, destination_dir):
if path.is_file() and path.name.endswith(".tar.gz"):
tar_object = tarfile.open(str(path))
tar_object.extractall(path=str(destination_dir))
tar_object.close()
| {
"content_hash": "c284d1a643d81669a5af2fb422b6ce5b",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 31.25,
"alnum_prop": 0.6229565217391304,
"repo_name": "pyoseo/django-oseoserver",
"id": "f564eafd95c80e60e9e21e1a18c02de58e83b44f",
"size": "2875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oseoserver/scripts/install_pyxb_ogc_bindings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5653"
},
{
"name": "Python",
"bytes": "282717"
},
{
"name": "Shell",
"bytes": "17370"
}
],
"symlink_target": ""
} |
import os
import click
import logging
import pandas as pd
import glob
from dotenv import find_dotenv, load_dotenv
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('fine_csv_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, fine_csv_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('Enriching Dataset')
fines_df = pd.read_csv(fine_csv_filepath, encoding="utf-8-sig")
fines_df.columns = [col.lower() for col in fines_df.columns]
fines_df.drop('shortdesc', axis=1, inplace=True)
fines_df.rename(columns={'code': 'violation_code'}, inplace=True)
df = pd.read_csv(input_filepath, sep='\t')
df = df.merge(fines_df) # on='violation_code')
columns_to_drop = ['desc']
df.drop(columns_to_drop, inplace=True, axis=1, errors='ignore')
logger.info('Saving Enriched Dataset')
df.to_csv(output_filepath, sep='\t', index=False)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| {
"content_hash": "68b049a3f656ca4aa187f4abca63809f",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 36.62222222222222,
"alnum_prop": 0.683252427184466,
"repo_name": "ndanielsen/dc_parking_violations_data",
"id": "c5eed6c1ed96b547e8936ff9a1a545c77bdeafcc",
"size": "1672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/data/add_fines_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6303474"
},
{
"name": "Makefile",
"bytes": "5981"
},
{
"name": "Python",
"bytes": "18819"
}
],
"symlink_target": ""
} |
from tests.helpers import BaseTestCase
class TestCommitCommand(BaseTestCase):
def test_can_commit(self):
"""
Default use case where user invokes `et commit` with minimal parameters
"""
self.fail('Not Implemented')
def test_does_not_work_outside_of_a_linked_project(self):
"""
The users cwd must be inside of a project
"""
self.fail('Not Implemented')
| {
"content_hash": "dd8f2ee8ec48e703fd2ca79abeff7104",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 79,
"avg_line_length": 28.4,
"alnum_prop": 0.6314553990610329,
"repo_name": "pnw/env-tracker",
"id": "fbdb37b353126186339d87a080afd86c9c67da44",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_commit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28335"
}
],
"symlink_target": ""
} |
""" Cisco_IOS_XR_Ethernet_SPAN_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR Ethernet\-SPAN package configuration.
This module contains definitions
for the following management objects\:
span\-monitor\-session\: none
This YANG module augments the
Cisco\-IOS\-XR\-ifmgr\-cfg,
Cisco\-IOS\-XR\-l2vpn\-cfg
modules with configuration data.
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class SpanDestinationEnum(Enum):
"""
SpanDestinationEnum
Span destination
.. data:: interface = 0
Destination Interface
.. data:: pseudowire = 1
Destination Pseudowire
.. data:: ipv4_address = 2
Destination next-hop IPv4 address
.. data:: ipv6_address = 3
Destination next-hop IPv6 address
"""
interface = 0
pseudowire = 1
ipv4_address = 2
ipv6_address = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_Ethernet_SPAN_cfg as meta
return meta._meta_table['SpanDestinationEnum']
class SpanMirrorIntervalEnum(Enum):
"""
SpanMirrorIntervalEnum
Span mirror interval
.. data:: Y_512 = 1
Mirror 1 in every 512 packets
.. data:: Y_1k = 2
Mirror 1 in every 1024 packets
.. data:: Y_2k = 3
Mirror 1 in every 2048 packets
.. data:: Y_4k = 4
Mirror 1 in every 4096 packets
.. data:: Y_8k = 5
Mirror 1 in every 8192 packets
.. data:: Y_16k = 6
Mirror 1 in every 16384 packets
"""
Y_512 = 1
Y_1k = 2
Y_2k = 3
Y_4k = 4
Y_8k = 5
Y_16k = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_Ethernet_SPAN_cfg as meta
return meta._meta_table['SpanMirrorIntervalEnum']
class SpanTrafficDirectionEnum(Enum):
"""
SpanTrafficDirectionEnum
Span traffic direction
.. data:: rx_only = 1
Replicate only received (ingress) traffic
.. data:: tx_only = 2
Replicate only transmitted (egress) traffic
"""
rx_only = 1
tx_only = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_Ethernet_SPAN_cfg as meta
return meta._meta_table['SpanTrafficDirectionEnum']
class SpanMonitorSession(object):
"""
none
.. attribute:: sessions
Monitor\-session configuration commands
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_cfg.SpanMonitorSession.Sessions>`
"""
_prefix = 'ethernet-span-cfg'
_revision = '2015-11-09'
def __init__(self):
self.sessions = SpanMonitorSession.Sessions()
self.sessions.parent = self
class Sessions(object):
"""
Monitor\-session configuration commands
.. attribute:: session
Configuration for a particular Monitor Session
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_cfg.SpanMonitorSession.Sessions.Session>`
"""
_prefix = 'ethernet-span-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.session = YList()
self.session.parent = self
self.session.name = 'session'
class Session(object):
"""
Configuration for a particular Monitor Session
.. attribute:: session <key>
Session Name
**type**\: str
**length:** 1..79
.. attribute:: class_
Enable a Monitor Session. Setting this item causes the Monitor Session to be created
**type**\: :py:class:`SpanSessionClassEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_datatypes.SpanSessionClassEnum>`
**default value**\: ethernet
.. attribute:: destination
Specify a destination
**type**\: :py:class:`Destination <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_cfg.SpanMonitorSession.Sessions.Session.Destination>`
"""
_prefix = 'ethernet-span-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.session = None
self.class_ = None
self.destination = SpanMonitorSession.Sessions.Session.Destination()
self.destination.parent = self
class Destination(object):
"""
Specify a destination
.. attribute:: destination_interface_name
Specify the destination interface name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: destination_ipv4_address
Specify the destination next\-hop IPv4 address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: destination_ipv6_address
Specify the destination next\-hop IPv6 address
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: destination_type
Specify the type of destination
**type**\: :py:class:`SpanDestinationEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_Ethernet_SPAN_cfg.SpanDestinationEnum>`
"""
_prefix = 'ethernet-span-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.destination_interface_name = None
self.destination_ipv4_address = None
self.destination_ipv6_address = None
self.destination_type = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-Ethernet-SPAN-cfg:destination'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.destination_interface_name is not None:
return True
if self.destination_ipv4_address is not None:
return True
if self.destination_ipv6_address is not None:
return True
if self.destination_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_Ethernet_SPAN_cfg as meta
return meta._meta_table['SpanMonitorSession.Sessions.Session.Destination']['meta_info']
@property
def _common_path(self):
if self.session is None:
raise YPYModelError('Key property session is None')
return '/Cisco-IOS-XR-Ethernet-SPAN-cfg:span-monitor-session/Cisco-IOS-XR-Ethernet-SPAN-cfg:sessions/Cisco-IOS-XR-Ethernet-SPAN-cfg:session[Cisco-IOS-XR-Ethernet-SPAN-cfg:session = ' + str(self.session) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.session is not None:
return True
if self.class_ is not None:
return True
if self.destination is not None and self.destination._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_Ethernet_SPAN_cfg as meta
return meta._meta_table['SpanMonitorSession.Sessions.Session']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-Ethernet-SPAN-cfg:span-monitor-session/Cisco-IOS-XR-Ethernet-SPAN-cfg:sessions'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.session is not None:
for child_ref in self.session:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_Ethernet_SPAN_cfg as meta
return meta._meta_table['SpanMonitorSession.Sessions']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-Ethernet-SPAN-cfg:span-monitor-session'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.sessions is not None and self.sessions._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_Ethernet_SPAN_cfg as meta
return meta._meta_table['SpanMonitorSession']['meta_info']
| {
"content_hash": "54d8bbdb132c611872e9757caaccc0a6",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 297,
"avg_line_length": 28.453580901856764,
"alnum_prop": 0.5314626643050248,
"repo_name": "111pontes/ydk-py",
"id": "1ee832945e1e0d4373b8c57240d37a1a8a1b7d9d",
"size": "10727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_Ethernet_SPAN_cfg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117948"
}
],
"symlink_target": ""
} |
import csv
import re
from encoding import safe_encode, safe_decode
newline_regex = re.compile('\r\n|\r|\n')
csv.register_dialect('tsv_no_quote', delimiter='\t', quoting=csv.QUOTE_NONE, quotechar='')
def tsv_string(s):
return safe_encode(newline_regex.sub(u', ', safe_decode(s).strip()).replace(u'\t', u' '))
def unicode_csv_reader(filename, **kw):
for line in csv.reader(filename, **kw):
yield [unicode(c, 'utf-8') for c in line]
| {
"content_hash": "4f2358b0fd37b6377c240cd1397bd7c9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 93,
"avg_line_length": 28.25,
"alnum_prop": 0.665929203539823,
"repo_name": "openvenues/libpostal",
"id": "783d77925924b979629eab8aa4badef84ecaaf9a",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/geodata/csv_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "39379640"
},
{
"name": "C++",
"bytes": "78873"
},
{
"name": "M4",
"bytes": "14888"
},
{
"name": "Makefile",
"bytes": "7817"
},
{
"name": "Python",
"bytes": "799608"
},
{
"name": "Shell",
"bytes": "32951"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
AUTHOR = u'Nithesh Mittapally'
SITENAME = u'Code Opinions by Nithesh Mittapally'
SITEURL = ''
PATH = 'content'
THEME = 'opinions'
TIMEZONE = 'America/New_York'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),)
# Social widget
SOCIAL = (('Github', 'https://www.github.com/nithesh'),
('LinkedIn', 'https://www.linkedin.com/in/nitheshchandra'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
# RELATIVE_URLS = True
STATIC_PATHS = ['images']
EXTRA_PATH_METADATA = {'images/favicon.ico': {'path': 'favicon.ico'}}
| {
"content_hash": "acd5b58b3ea351113d4e176cec8a0f03",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 77,
"avg_line_length": 25.833333333333332,
"alnum_prop": 0.6870967741935484,
"repo_name": "nithesh/blog",
"id": "6df311ae25c566c8e311233fd416b87998b61367",
"size": "978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pelicanconf.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
""" twitter related helper functions .. uses tweepy. """
## jsb imports
from jsb.utils.exception import handle_exception
from jsb.utils.pdol import Pdol
from jsb.utils.textutils import html_unescape
from jsb.utils.generic import waitforqueue, strippedtxt, splittxt
from jsb.lib.persist import PlugPersist
from jsb.lib.datadir import getdatadir
from jsb.lib.jsbimport import _import_byfile
from jsb.lib.persist import Persist
from jsb.utils.tinyurl import parseurl, get_tinyurl
## tweepy imports
from jsb.imports import gettweepy
tweepy = gettweepy()
import tweepy.oauth
import tweepy.auth
## basic imports
import logging
import os
import urllib2
import types
## defines
go = False
auth = None
users = None
## twitterapi function
def twitterapi(CONSUMER_KEY, CONSUMER_SECRET, token=None, force=False, *args, **kwargs):
""" return twitter API object - with or without access token. """
global auth
if not auth: auth = tweepy.auth.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
if token: auth.set_access_token(token.key, token.secret)
tweetapi = tweepy.API(auth, *args, **kwargs)
if not tweetapi: raise Exception("no api returned - %s" % str(auth))
logging.warn("twitter api returned: %s" % str(tweetapi))
return tweetapi
## twittertoken function
def twittertoken(CONSUMER_KEY, CONSUMER_SECRET, twitteruser, username):
""" get access token from stored token string. """
token = twitteruser.data.get(username)
if not token: raise Exception("no token found for %s" % username)
return tweepy.oauth.OAuthToken(CONSUMER_KEY, CONSUMER_SECRET).from_string(token)
## credentials
def getcreds(datadir=None):
if not datadir: datadir = getdatadir()
try:
mod = _import_byfile("credentials", datadir + os.sep + "config" + os.sep + "credentials.py")
global go
go = True
except (IOError, ImportError):
logging.warn("the twitter plugin needs the credentials.py file in the %s/config dir. see %s/examples" % (datadir, datadir))
return (None, None)
logging.warn("found credentials.py")
return mod.CONSUMER_KEY, mod.CONSUMER_SECRET
def getauth(datadir=None):
""" get auth structure from datadir. """
if not datadir: datadir = getdatadir()
key, secret = getcreds(datadir)
auth = tweepy.OAuthHandler(key, secret)
return auth
## TwitterUsers class
class TwitterUsers(Persist):
""" manage users tokens. """
def add(self, user, token):
""" add a user with his token. """
user = user.strip().lower()
self.data[user] = token
self.save()
def remove(self, user):
""" remove a user. """
user = user.strip().lower()
if user in self.data:
del self.data[user]
self.save()
def size(self):
""" return size of twitter users. """
return len(self.data)
def __contains__(self, user):
""" check if user exists. """
user = user.strip().lower()
return user in self.data
def get_users():
global users
if users: return users
users = TwitterUsers(getdatadir() + os.sep + "twitter" + os.sep + "users")
return users
def get_token(username):
global users
if not users: users = TwitterUsers(getdatadir() + os.sep + "twitter" + os.sep + "users")
if not users: raise Exception("can't get twitter users object") ; return
key, secret = getcreds(getdatadir())
if not key: raise Exception(getdatadir())
if not secret: raise Exception(getdatadir())
token = twittertoken(key, secret, users, username)
if not token: raise Exception("%s %s" % (str(user), username))
api = twitterapi(key, secret, token)
if not api: raise Exception("%s %s" % (str(user), name))
return (token, api)
def twitter_out(username, txt, event=None):
""" post a message on twitter. """
if event and event.chan:
taglist = event.chan.data.taglist
if taglist:
for tag in taglist:
txt += " %s" % tag
url = parseurl(txt)
if url:
tiny = get_tinyurl(url)
if tiny:
tinyurl = tiny[0]
if tinyurl: txt = txt.replace(url, tinyurl)
if len(txt) > 140: logging.error("size of twitter message > 140 chars: %s" % txt) ; return
token, api = get_token(username)
if token and api:
status = api.update_status(txt)
logging.warn("posted 1 tweet (%s chars) for %s" % (len(txt), username))
else: logging.error("no token or API available")
return status
| {
"content_hash": "7fd6e4e47b0fc7de25bd19a965e6a377",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 131,
"avg_line_length": 31.727272727272727,
"alnum_prop": 0.6537359488648887,
"repo_name": "Petraea/jsonbot",
"id": "a0a3ebe05db68896f1786ed5cadba37dc9f0550f",
"size": "4565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsb/utils/twitter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36140"
},
{
"name": "JavaScript",
"bytes": "42430"
},
{
"name": "Python",
"bytes": "3234788"
},
{
"name": "Shell",
"bytes": "1874"
}
],
"symlink_target": ""
} |
import numpy as np
def bellman_ford(n, edges):
op = [0, 0, 0]
distance = np.empty(n)
distance[:] = np.infty
predecessor = np.empty(n, dtype='int')
predecessor[:] = -1
op[2] += 4
while True:
# start from random vertice as we don't care about actual path
distance[edges[0][0]] = 0
predecessor[edges[0][0]] = edges[0][0]
op[2] += 2
for i in range(n):
# introduce counter to stop when no changes done
changes = 0
op[2] += 2
for v1, v2, weight in edges:
op[2] += 1
op[0] += 1
if distance[v1] + weight < distance[v2]:
distance[v2] = distance[v1] + weight
predecessor[v2] = v1
changes += 1
op[2] += 4
# early stop if all distances processed
op[0] += 1
if changes == 0:
break
for v1, v2, weight in edges:
op[2] += 1
op[0] += 1
if distance[v1] + weight < distance[v2]:
# negative cycle found
current_vertice = v2
current_vertice_predecessor = predecessor[current_vertice]
cycle = [current_vertice]
op[2] += 4
while current_vertice_predecessor not in cycle:
cycle.append(current_vertice_predecessor)
current_vertice = current_vertice_predecessor
current_vertice_predecessor = predecessor[current_vertice]
op[2] += 4
current_vertice_predecessor_index = cycle.index(current_vertice_predecessor)
cycle = cycle[current_vertice_predecessor_index:]
cycle[::-1]
op[2] += 3
return cycle, op
# when no cycles found, check if there any vertices with distance infinity (invisible from starting point)
edges2 = []
op[2] += 1
for edge in edges:
op[2] += 1
op[0] += 1
if distance[edge[0]] == np.infty:
edges2.append(edge)
op[2] += 1
edges = edges2
op[2] += 1
op[0] += 1
# if no invisible vertices
if len(edges) == 0:
break
# if yes run again
n = 0
op[1] += 1
for d in distance:
op[2] += 1
op[0] += 1
if d == np.infty:
n += 1
op[2] += 1
return None, op
def floyd_warshall(adj_matrix):
op = [0, 0, 0]
# init working structures
n = adj_matrix.shape[0]
distance = np.copy(adj_matrix)
predecessor = np.empty((n, n), dtype='int')
predecessor[:] = -1
op[2] += 4
# initialization
for v1 in range(n):
for v2 in range(n):
weight = adj_matrix[v1, v2]
op[2] += 1
op[0] += 1
if weight != np.infty:
predecessor[v1, v2] = v1
op[2] += 1
distance[v1, v1] = 0
op[2] += 1
# next steps
need_stop = False
diagonal = -1
op[2] += 2
for pitstop in range(n):
op[2] += 1
for v1 in range(n):
op[2] += 1
for v2 in range(n):
distance_with_pitstop = distance[v1, pitstop] + distance[pitstop, v2]
op[2] += 2
op[0] += 1
if distance_with_pitstop < distance[v1, v2]:
distance[v1, v2] = distance_with_pitstop
predecessor[v1, v2] = pitstop
op[2] += 2
# early stop if diagonal have negative value
op[0] += 2
if v1 == v2 and distance[v1, v2] < 0:
need_stop = True
diagonal = v1
op[2] += 2
break
if need_stop:
break
if need_stop:
break
# check the actual negative cycle
op[0] += 1
if diagonal != -1:
current_vertice_index = diagonal
previous_vertice_index = diagonal
cycle = [current_vertice_index]
op[2] += 3
while predecessor[current_vertice_index, previous_vertice_index] not in cycle:
previous_vertice_index = current_vertice_index
current_vertice_index = predecessor[current_vertice_index, previous_vertice_index]
cycle.append(current_vertice_index)
op[2] += 4
return cycle, op
return None, op
| {
"content_hash": "1a6a7a150acbfcbdfe013828ea7dfe52",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 114,
"avg_line_length": 27.75595238095238,
"alnum_prop": 0.4625777396525842,
"repo_name": "rudnitskih/ucu-adc-coursework",
"id": "cf161cfce5a3b77d172ccb7aeeae4d634d7bb031",
"size": "4663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphs/negative-cycle/negative_cycle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "13861"
},
{
"name": "Jupyter Notebook",
"bytes": "435683"
},
{
"name": "Python",
"bytes": "32002"
}
],
"symlink_target": ""
} |
from google.cloud import monitoring_v3
def sample_list_notification_channel_descriptors():
# Create a client
client = monitoring_v3.NotificationChannelServiceClient()
# Initialize request argument(s)
request = monitoring_v3.ListNotificationChannelDescriptorsRequest(
name="name_value",
)
# Make the request
page_result = client.list_notification_channel_descriptors(request=request)
# Handle the response
for response in page_result:
print(response)
# [END monitoring_v3_generated_NotificationChannelService_ListNotificationChannelDescriptors_sync]
| {
"content_hash": "7e3a2881eafabaad5575fc38ce8fcca4",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 98,
"avg_line_length": 30.35,
"alnum_prop": 0.7528830313014827,
"repo_name": "googleapis/python-monitoring",
"id": "e3dc263cddfc4e4ec26307dba414daad6ea9ba2e",
"size": "2053",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/monitoring_v3_generated_notification_channel_service_list_notification_channel_descriptors_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2375818"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
} |
"""
Class for spherical harmonic coefficients of the gravitational potential.
"""
import numpy as _np
import matplotlib as _mpl
import matplotlib.pyplot as _plt
from mpl_toolkits.axes_grid1 import make_axes_locatable as _make_axes_locatable
import copy as _copy
import warnings as _warnings
import xarray as _xr
from scipy.special import factorial as _factorial
import gzip as _gzip
import shutil as _shutil
from .shcoeffs import SHCoeffs as _SHCoeffs
from .shcoeffs import SHRealCoeffs as _SHRealCoeffs
from .shgrid import DHRealGrid as _DHRealGrid
from .shgravgrid import SHGravGrid as _SHGravGrid
from .shtensor import SHGravTensor as _SHGravTensor
from .shgeoid import SHGeoid as _SHGeoid
from ..constants import G as _G
from ..spectralanalysis import spectrum as _spectrum
from ..spectralanalysis import cross_spectrum as _cross_spectrum
from ..shio import convert as _convert
from ..shio import shread as _shread
from ..shio import shwrite as _shwrite
from ..shio import read_dov as _read_dov
from ..shio import write_dov as _write_dov
from ..shio import read_bshc as _read_bshc
from ..shio import write_bshc as _write_bshc
from ..shio import read_icgem_gfc as _read_icgem_gfc
from ..shio import write_icgem_gfc as _write_icgem_gfc
from ..shtools import CilmPlusRhoHDH as _CilmPlusRhoHDH
from ..shtools import CilmPlusDH as _CilmPlusDH
from ..shtools import MakeGravGridDH as _MakeGravGridDH
from ..shtools import MakeGravGradGridDH as _MakeGravGradGridDH
from ..shtools import MakeGeoidGridDH as _MakeGeoidGridDH
from ..shtools import djpi2 as _djpi2
from ..shtools import MakeGravGridPoint as _MakeGravGridPoint
from ..backends import backend_module
from ..backends import preferred_backend
class SHGravCoeffs(object):
"""
Spherical harmonic coefficients class for the gravitational potential.
The coefficients of this class can be initialized using one of the four
constructor methods:
x = SHGravCoeffs.from_array(array, gm, r0)
x = SHGravCoeffs.from_random(powerspectrum, gm, r0)
x = SHGravCoeffs.from_zeros(lmax, gm, r0)
x = SHGravCoeffs.from_file('fname.dat')
x = SHGravCoeffs.from_netcdf('ncname.nc')
x = SHGravCoeffs.from_shape(grid, rho, gm)
The normalization convention of the input coefficents is specified
by the optional normalization and csphase parameters, which take the
following values:
normalization : '4pi' (default), geodesy 4-pi normalized.
: 'ortho', orthonormalized.
: 'schmidt', Schmidt semi-normalized.
: 'unnorm', unnormalized.
csphase : 1 (default), exlcude the Condon-Shortley phase factor.
: -1, include the Condon-Shortley phase factor.
See the documentation for each constructor method for further options.
Once initialized, each class instance defines the following class
attributes:
lmax : The maximum spherical harmonic degree of the coefficients.
coeffs : The raw coefficients with the specified normalization and
csphase conventions.
errors : The uncertainties of the spherical harmonic coefficients.
error_kind : An arbitrary string describing the kind of errors, such as
'unknown', 'unspecified', 'calibrated', 'formal' or None.
gm : The gravitational constant times the mass times that is
associated with the gravitational potential coefficients.
r0 : The reference radius of the gravitational potential
coefficients.
omega : The angular rotation rate of the body.
normalization : The normalization of the coefficients: '4pi', 'ortho',
'schmidt', or 'unnorm'.
csphase : Defines whether the Condon-Shortley phase is used (1)
or not (-1).
mask : A boolean mask that is True for the permissible values of
degree l and order m.
kind : The coefficient data type (only 'real' is permissible).
name : The name of the dataset.
epoch : The epoch time of the spherical harmonic coefficients.
header : A list of values (of type str) from the header line of the
input file used to initialize the class (for 'shtools'
and 'dov' formatted files only).
header2 : A list of values (of type str) from the second header line
of the input file used to initialize the class (for
'shtools' and 'dov' formatted files only).
Each class instance provides the following methods:
degrees() : Return an array listing the spherical harmonic
degrees from 0 to lmax.
spectrum() : Return the spectrum of the function as a function
of spherical harmonic degree.
admittance() : Return the admittance with an input topography
function.
correlation() : Return the spectral correlation with another
function.
admitcorr() : Return the admittance and spectral correlation with
an input topography function.
set_omega() : Set the angular rotation rate of the body.
set_coeffs() : Set coefficients in-place to specified values.
change_ref() : Return a new class instance referenced to a
different gm, or r0.
rotate() : Rotate the coordinate system used to express the
spherical harmonic coefficients and return a new
class instance.
convert() : Return a new class instance using a different
normalization convention.
pad() : Return a new class instance that is zero padded or
truncated to a different lmax.
expand() : Calculate the three vector components of the
gravity field, the total field, and the
gravitational potential, and return an SHGravGrid
class instance.
mass : Return the mass of the planet.
center_of_mass : Return coordinates of the center of mass of the
planet.
inertia_tensor() : Return an array of the inertia tensor.
tensor() : Calculate the 9 components of the gravity tensor
and return an SHGravTensor class instance.
geoid() : Calculate the height of the geoid and return an
SHGeoid class instance.
plot_spectrum() : Plot the spectrum as a function of spherical
harmonic degree.
plot_spectrum2d() : Plot the 2D spectrum of all spherical harmonic
degrees and orders.
plot_correlation() : Plot the spectral correlation with another
function.
plot_admittance() : Plot the admittance with an input topography
function.
plot_admitcorr() : Plot the admittance and spectral correlation with
an input topography function.
to_array() : Return an array of spherical harmonic coefficients
with a different normalization convention.
to_file() : Save the spherical harmonic coefficients as a file.
to_netcdf() : Save raw spherical harmonic coefficients as a
netcdf file.
copy() : Return a copy of the class instance.
info() : Print a summary of the data stored in the
SHGravCoeffs instance.
"""
def __init__(self):
"""Unused constructor of the super class."""
print('Initialize the class using one of the class methods:\n'
'>>> pyshtools.SHGravCoeffs.from_array\n'
'>>> pyshtools.SHGravCoeffs.from_random\n'
'>>> pyshtools.SHGravCoeffs.from_zeros\n'
'>>> pyshtools.SHGravCoeffs.from_file\n'
'>>> pyshtools.SHGravCoeffs.from_netcdf\n'
'>>> pyshtools.SHGravCoeffs.from_shape\n')
# ---- Factory methods ----
@classmethod
def from_array(self, coeffs, gm, r0, omega=None, errors=None,
error_kind=None, normalization='4pi', csphase=1, lmax=None,
set_degree0=True, name=None, epoch=None, copy=True):
"""
Initialize the class with spherical harmonic coefficients from an input
array.
Usage
-----
x = SHGravCoeffs.from_array(array, gm, r0, [omega, errors, error_kind,
normalization, csphase,
lmax, set_degree0, name,
epoch, copy])
Returns
-------
x : SHGravCoeffs class instance.
Parameters
----------
array : ndarray, shape (2, lmaxin+1, lmaxin+1).
The input spherical harmonic coefficients.
gm : float
The gravitational constant times the mass that is associated with
the gravitational potential coefficients.
mass : float
The mass of the planet in kg.
r0 : float
The reference radius of the spherical harmonic coefficients.
omega : float, optional, default = None
The angular rotation rate of the body.
errors : ndarray, optional, default = None
The uncertainties of the spherical harmonic coefficients.
error_kind : str, optional, default = None
An arbitrary string describing the kind of errors, such as None,
'unspecified', 'calibrated' or 'formal'.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax : int, optional, default = None
The maximum spherical harmonic degree to include in the returned
class instance. This must be less than or equal to lmaxin.
set_degree0 : bool, optional, default = True
If the degree-0 coefficient is zero, set this to 1.
name : str, optional, default = None
The name of the dataset.
epoch : str or float, optional, default = None
The epoch time of the spherical harmonic coefficients as given by
the format YYYYMMDD.DD.
copy : bool, optional, default = True
If True, make a copy of array when initializing the class instance.
If False, initialize the class instance with a reference to array.
Notes
-----
If the degree-0 term of the input array is equal to zero, it will be
set to 1.
"""
if _np.iscomplexobj(coeffs):
raise TypeError('The input array must be real.')
if type(normalization) != str:
raise ValueError('normalization must be a string. '
'Input type is {:s}.'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', "
"or 'unnorm'. Input value is {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value is {:s}."
.format(repr(csphase))
)
if errors is not None:
if coeffs.shape != errors.shape:
raise ValueError(
"The shape of coeffs and errors must be the same."
"Shape of coeffs = {:s}, shape of errors = {:s}."
.format(repr(coeffs.shape), repr(coeffs.errors))
)
if error_kind is None:
error_kind = 'unspecified'
lmaxin = coeffs.shape[1] - 1
if lmax is None:
lmax = lmaxin
else:
if lmax > lmaxin:
lmax = lmaxin
if normalization.lower() == 'unnorm' and lmax > 85:
_warnings.warn("Calculations using unnormalized coefficients "
"are stable only for degrees less than or equal "
"to 85. lmax for the coefficients will be set to "
"85. Input value is {:d}.".format(lmax),
category=RuntimeWarning)
lmax = 85
if coeffs[0, 0, 0] == 0 and set_degree0:
coeffs[0, 0, 0] = 1.0
if errors is not None:
clm = SHGravRealCoeffs(coeffs[:, 0:lmax+1, 0:lmax+1], gm=gm, r0=r0,
omega=omega, errors=errors[:, 0:lmax+1,
0:lmax+1],
error_kind=error_kind,
normalization=normalization.lower(),
csphase=csphase, name=name, epoch=epoch,
copy=copy)
else:
clm = SHGravRealCoeffs(coeffs[:, 0:lmax+1, 0:lmax+1], gm=gm, r0=r0,
omega=omega,
normalization=normalization.lower(),
csphase=csphase, name=name, epoch=epoch,
copy=copy)
return clm
@classmethod
def from_zeros(self, lmax, gm, r0, omega=None, errors=None,
error_kind=None, normalization='4pi', csphase=1,
name=None, epoch=None):
"""
Initialize the class with spherical harmonic coefficients set to zero
from degree 1 to lmax, and set the degree 0 term to 1.
Usage
-----
x = SHGravCoeffs.from_zeros(lmax, gm, r0, [omega, errors, error_kind,
normalization, csphase,
name, epoch])
Returns
-------
x : SHGravCoeffs class instance.
Parameters
----------
lmax : int
The maximum spherical harmonic degree l of the coefficients.
gm : float
The gravitational constant times the mass that is associated with
the gravitational potential coefficients.
r0 : float
The reference radius of the spherical harmonic coefficients.
omega : float, optional, default = None
The angular rotation rate of the body.
errors : bool, optional, default = None
If True, initialize the attribute errors with zeros.
error_kind : str, optional, default = None
An arbitrary string describing the kind of errors, such as None,
'unspecified', 'calibrated' or 'formal'.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
name : str, optional, default = None
The name of the dataset.
epoch : str or float, optional, default = None
The epoch time of the spherical harmonic coefficients as given by
the format YYYYMMDD.DD.
"""
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', "
"or 'unnorm'. Input value is {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value is {:s}."
.format(repr(csphase))
)
if normalization.lower() == 'unnorm' and lmax > 85:
_warnings.warn("Calculations using unnormalized coefficients "
"are stable only for degrees less than or equal "
"to 85. lmax for the coefficients will be set to "
"85. Input value is {:d}.".format(lmax),
category=RuntimeWarning)
lmax = 85
coeffs = _np.zeros((2, lmax + 1, lmax + 1))
coeffs[0, 0, 0] = 1.0
if errors is True:
error_coeffs = _np.zeros((2, lmax + 1, lmax + 1))
if error_kind is None:
error_kind = 'unspecified'
else:
error_coeffs = None
clm = SHGravRealCoeffs(coeffs, gm=gm, r0=r0, omega=omega,
errors=error_coeffs, error_kind=error_kind,
normalization=normalization.lower(),
csphase=csphase, name=name, epoch=epoch)
return clm
@classmethod
def from_file(self, fname, format='shtools', gm=None, r0=None,
omega=None, lmax=None, normalization='4pi', skip=0,
header=True, header2=False, errors=None, error_kind=None,
csphase=1, r0_index=0, gm_index=1, omega_index=None,
header_units='m', set_degree0=True, name=None, epoch=None,
encoding=None, quiet=False, **kwargs):
"""
Initialize the class with spherical harmonic coefficients from a file.
Usage
-----
x = SHGravCoeffs.from_file(filename, [format='shtools' or 'dov', gm,
r0, omega, lmax, normalization, csphase,
skip, header, header2, errors, error_kind,
gm_index, r0_index, omega_index,
header_units, set_degree0, name, encoding])
x = SHGravCoeffs.from_file(filename, format='icgem', [lmax, omega,
normalization, csphase, errors, set_degree0,
name, name, epoch, encoding, quiet])
x = SHGravCoeffs.from_file(filename, format='bshc', gm, r0, [lmax,
omega, normalization, csphase, set_degree0,
name])
x = SHGravCoeffs.from_file(filename, format='npy', gm, r0, [lmax,
omega, normalization, csphase, set_degree0,
name, **kwargs])
Returns
-------
x : SHGravCoeffs class instance.
Parameters
----------
filename : str
File name or URL containing the spherical harmonic coefficients.
filename will be treated as a URL if it starts with 'http://',
'https://', or 'ftp://'. For 'shtools', 'icgem' and 'bshc'
formatted files, if filename ends with '.gz' or '.zip' (or if the
path contains '/zip/'), the file will be uncompressed before
parsing.
format : str, optional, default = 'shtools'
'shtools' for generic text files, 'dov' for [degree, order, value]
text files, 'icgem' for ICGEM GFC formatted files, 'bshc' for
binary spherical harmonic coefficient files, or 'npy' for binary
numpy files.
lmax : int, optional, default = None
The maximum spherical harmonic degree to read from the file. The
default is to read the entire file.
header : bool, optional, default = True
If True, read a list of values from the header line of an 'shtools'
or 'dov' formatted file. If two header lines are present, the
second contains values for r0, gm, and omega.
header2 : bool, optional, default = False
If True, read a list of values from a second header line of an
'shtools' or 'dov' formatted file. If two header lines are present,
the second contains values for r0, gm, and omega.
errors : bool or str, optional, default = None
For 'shtools' or 'dov' formatted files: if True, read and return
the spherical harmonic coefficients of the errors. For 'icgem'
formatted files, specify the type of error to return: 'calibrated'
or 'formal'.
error_kind : str, optional, default = None
For 'shtools' and 'dov' formatted files: An arbitrary string
describing the kind of errors, such as None, 'unspecified',
'calibrated' or 'formal'.
r0_index : int, optional, default = 0
For 'shtools' and 'dov' formatted files, r0 will be set using the
value from the last header line with this index.
gm_index : int, optional, default = 1
For 'shtools' and 'dov' formatted files, gm will be set using the
value from the last header line with this index.
omega_index : int, optional, default = None
For 'shtools' and 'dov' formatted files, omega will be set using
the value from the last header line with this index.
gm : float, optional, default = None
The gravitational constant time the mass that is associated with
the gravitational potential coefficients.
r0 : float, optional, default = None
The reference radius of the spherical harmonic coefficients.
omega : float, optional, default = None
The angular rotation rate of the body.
header_units : str, optional, default = 'm'
The units used for r0 and gm in the header line of an 'shtools'
formatted file: 'm' or 'km'. If 'km', the values of r0 and gm will
be converted to meters.
set_degree0 : bool, optional, default = True
If the degree-0 coefficient is zero, set this to 1.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
skip : int, optional, default = 0
Number of lines to skip at the beginning of the file for 'shtools'
formatted files.
name : str, optional, default = None
The name of the dataset.
epoch : str or float, optional, default = None
The epoch time of the spherical harmonic coefficients as given by
the format YYYYMMDD.DD. If format is 'icgem' and epoch is None,
the reference epoch t0 of the model will be used. Epoch is required
for 'icgem' v2.0 formatted files.
encoding : str, optional, default = None
Encoding of the input file when format is 'shtools', 'dov' or
'icgem'. The default is to use the system default.
quiet : bool, default = False
If True, suppress warnings about undefined keywords when reading
ICGEM formatted files.
**kwargs : keyword argument list, optional for format = 'npy'
Keyword arguments of numpy.load() when format is 'npy'.
Notes
-----
Supported file formats:
'shtools' (see pyshtools.shio.shread)
'dov' (see pyshtools.shio.read_dov)
'icgem' (see pyshtools.shio.read_icgem_gfc)
'bshc' (see pyshtools.shio.read_bshc)
'npy' (see numpy.load)
If the degree 0 term of the file is zero (or not specified), this will
by default be set to 1.
For 'shtools', 'dov', 'icgem' and 'bshc' formatted files, if filename
starts with 'http://', 'https://', or 'ftp://', the file will be
treated as a URL. In this case, the file will be downloaded in its
entirety before it is parsed. If the filename ends with '.gz' or '.zip'
(or if the path contains '/zip/'), the file will be automatically
uncompressed before parsing. For zip files, archives with only a single
file are supported. Note that reading '.gz' and '.zip' files in
'shtools' format will be extremely slow if lmax is not specified.
For 'shtools' and 'dov' formatted files, the optional parameter `skip`
specifies how many lines should be skipped before attempting to parse
the file, the optional parameter `header` and `header2` specifies
whether to read a list of values from one or two header lines, and the
optional parameter `lmax` specifies the maximum degree to read from the
file. If header lines are read, r0_index, gm_index, and omega_index,
are used as the indices to set r0, gm, and omega from the last header
line. If header_unit is specified as 'km', the values of r0 and gm that
are read from the header will be converted to meters.
"""
error_coeffs = None
header_list = None
header2_list = None
if not header:
r0_index = None
gm_index = None
if type(normalization) != str:
raise ValueError('normalization must be a string. '
'Input type is {:s}.'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The input normalization must be '4pi', 'ortho', 'schmidt', "
"or 'unnorm'. Provided value is {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value is {:s}."
.format(repr(csphase))
)
if format == 'shtools' or format == 'dov':
if header_units.lower() not in ('m', 'km'):
raise ValueError("header_units can be only 'm', or 'km'. "
"Input value is {:s}."
.format(repr(header_units)))
if r0_index is not None and r0 is not None:
raise ValueError('Can not specify both r0_index and r0.')
if gm_index is not None and gm is not None:
raise ValueError('Can not specify both gm_index and gm.')
if omega_index is not None and omega is not None:
raise ValueError('Can not specify both omega_index and omega,')
if header is False and (r0 is None or gm is None):
raise ValueError('If header is False, r0 and gm must be '
'specified.')
if format.lower() == 'shtools' or format.lower() == 'dov':
if format.lower() == 'shtools':
read_func = _shread
else:
read_func = _read_dov
if header is True:
if errors is True:
if header2:
coeffs, error_coeffs, lmaxout, header_list, \
header2_list = read_func(fname, lmax=lmax,
skip=skip, header=True,
header2=True,
error=True,
encoding=encoding)
else:
coeffs, error_coeffs, lmaxout, header_list = read_func(
fname, lmax=lmax, skip=skip, header=True,
error=True, encoding=encoding)
else:
if header2:
coeffs, lmaxout, header_list, header2_list = read_func(
fname, lmax=lmax, skip=skip, header=True,
header2=True, encoding=encoding)
else:
coeffs, lmaxout, header_list = read_func(
fname, lmax=lmax, skip=skip, header=True,
encoding=encoding)
if r0_index is not None:
if header2:
r0 = float(header2_list[r0_index])
else:
r0 = float(header_list[r0_index])
if gm_index is not None:
if header2:
gm = float(header2_list[gm_index])
else:
gm = float(header_list[gm_index])
if omega_index is not None:
if header2:
omega = float(header2_list[omega_index])
else:
omega = float(header_list[omega_index])
if header_units.lower() == 'km':
r0 *= 1.e3
gm *= 1.e9
else:
if errors is True:
coeffs, error_coeffs, lmaxout = read_func(
fname, lmax=lmax, error=True, skip=skip,
encoding=encoding)
else:
coeffs, lmaxout = read_func(fname, lmax=lmax, skip=skip,
encoding=encoding)
if errors is True and error_kind is None:
error_kind = 'unspecified'
elif format.lower() == 'bshc':
if gm is None or r0 is None:
raise ValueError('For binary bshc files, gm and r0 must be '
'specified.')
coeffs, lmaxout = _read_bshc(fname, lmax=lmax)
elif format.lower() == 'icgem':
valid_err = ('unknown', 'calibrated', 'formal')
if errors is False or errors is None:
coeffs, gm, r0 = _read_icgem_gfc(filename=fname,
errors=None, lmax=lmax,
epoch=epoch,
encoding=encoding,
quiet=quiet)
elif errors in valid_err:
coeffs, gm, r0, error_coeffs = _read_icgem_gfc(
filename=fname, errors=errors, lmax=lmax, epoch=epoch,
encoding=encoding, quiet=quiet)
error_kind = errors
else:
raise ValueError('errors must be among: {}. '
'Input value is {:s}.'
.format(valid_err, repr(errors)))
lmaxout = coeffs.shape[1] - 1
elif format.lower() == 'npy':
if gm is None or r0 is None:
raise ValueError('For binary npy files, gm and r0 must be '
'specified.')
coeffs = _np.load(fname, **kwargs)
lmaxout = coeffs.shape[1] - 1
if lmax is not None:
if lmax < lmaxout:
coeffs = coeffs[:, :lmax+1, :lmax+1]
lmaxout = lmax
else:
raise NotImplementedError(
'format={:s} not implemented'.format(repr(format)))
if _np.iscomplexobj(coeffs):
raise TypeError('The input coefficients must be real.')
if normalization.lower() == 'unnorm' and lmaxout > 85:
_warnings.warn("Calculations using unnormalized coefficients "
"are stable only for degrees less than or equal "
"to 85. lmax for the coefficients will be set to "
"85. Input value is {:d}.".format(lmaxout),
category=RuntimeWarning)
lmaxout = 85
coeffs = coeffs[:, :lmaxout+1, :lmaxout+1]
if coeffs[0, 0, 0] == 0 and set_degree0:
coeffs[0, 0, 0] = 1.0
clm = SHGravRealCoeffs(coeffs, gm=gm, r0=r0, omega=omega,
errors=error_coeffs, error_kind=error_kind,
normalization=normalization.lower(),
csphase=csphase, header=header_list,
header2=header2_list, name=name, epoch=epoch)
return clm
@classmethod
def from_random(self, power, gm, r0, omega=None, function='geoid',
lmax=None, normalization='4pi', csphase=1,
exact_power=False, power_unit='per_l', name=None,
epoch=None):
"""
Initialize the class of gravitational potential spherical harmonic
coefficients as random variables with a given spectrum.
Usage
-----
x = SHGravCoeffs.from_random(power, gm, r0, [omega, function, lmax,
normalization,
csphase, exact_power,
power_unit, name, epoch])
Returns
-------
x : SHGravCoeffs class instance.
Parameters
----------
power : ndarray, shape (L+1)
numpy array of shape (L+1) that specifies the expected power
spectrum of the output function, where L is the maximum spherical
harmonic bandwidth. By default, the input power spectrum represents
the power of all angular orders of the geoid as a function of
spherical harmonic degree (see function and power_unit).
gm : float
The gravitational constant times the mass that is associated with
the gravitational potential coefficients.
r0 : float
The reference radius of the spherical harmonic coefficients.
omega : float, optional, default = None
The angular rotation rate of the body.
function : str, optional, default = 'geoid'
The type of input power spectrum: 'potential' for the gravitational
potential, 'geoid' for the geoid, 'radial' for the radial gravity,
or 'total' for the total gravity field.
lmax : int, optional, default = len(power) - 1
The maximum spherical harmonic degree l of the output coefficients.
The coefficients will be set to zero for degrees greater than L.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
exact_power : bool, optional, default = False
If True, the spherical harmonic coefficients of the random
realization will be rescaled such that the power spectrum is
exactly equal to the input spectrum.
power_unit : str, optional, default = 'per_l'
If 'per_l', the input power spectrum represents the total power of
all angular orders as a function of spherical harmonic degree. If
'per_lm', the input power spectrum represents the power per
coefficient (which is assumed isotropic and varies only as a
function of spherical harmonic degree).
name : str, optional, default = None
The name of the dataset.
epoch : str or float, optional, default = None
The epoch time of the spherical harmonic coefficients as given by
the format YYYYMMDD.DD.
Notes
-----
This routine returns a random realization of spherical harmonic
gravitational potential coefficients obtained from a normal
distribution. The variance of each coefficient is determined by the
input power spectrum and the type of spectrum (as specified by
function and power_unit). If power_unit is 'per_l' (default), the
variance of each coefficient at spherical harmonic degree l is equal to
the input total power at degree l divided by the number of coefficients
at that degree. If power_unit is 'per_lm', the variance of each
coefficient at degree l is equal to the input power at that degree.
The power of the input function can be either for the geoid (default),
potential, radial gravity, or total gravity field. The power spectrum
of the random realization can be fixed exactly to the input spectrum by
setting exact_power to True.
Note that the degree 0 term is set to 1, and the degree-1 terms are
set to 0.
"""
if type(normalization) != str:
raise ValueError('normalization must be a string. '
'Input type is {:s}.'
.format(str(type(normalization))))
if function.lower() not in ('potential', 'geoid', 'radial', 'total'):
raise ValueError(
"function must be of type 'potential', "
"'geoid', 'radial', or 'total'. Provided value is {:s}."
.format(repr(function))
)
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The input normalization must be '4pi', 'ortho', 'schmidt', "
"or 'unnorm'. Provided value is {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value is {:s}."
.format(repr(csphase))
)
if power_unit.lower() not in ('per_l', 'per_lm'):
raise ValueError("power_unit must be 'per_l' or 'per_lm'. " +
"Input value was {:s}".format(repr(power_unit)))
if lmax is None:
nl = len(power)
lmax = nl - 1
else:
if lmax <= len(power) - 1:
nl = lmax + 1
else:
nl = len(power)
degrees = _np.arange(nl)
if normalization.lower() == 'unnorm' and nl - 1 > 85:
_warnings.warn("Calculations using unnormalized coefficients "
"are stable only for degrees less than or equal "
"to 85. lmax for the coefficients will be set to "
"85. Input value is {:d}.".format(nl-1),
category=RuntimeWarning)
nl = 85 + 1
lmax = 85
# Create coefficients with unit variance, which returns an expected
# total power per degree of (2l+1) for 4pi normalized harmonics.
coeffs = _np.empty((2, nl, nl))
for l in degrees:
coeffs[:2, l, :l+1] = _np.random.normal(size=(2, l+1))
if exact_power:
power_realization = _spectrum(coeffs, normalization='4pi',
unit=power_unit)
coeffs *= _np.sqrt(
power[0:nl] / power_realization)[_np.newaxis, :, _np.newaxis]
else:
if power_unit == 'per_l':
coeffs *= \
_np.sqrt(power[0:nl] / (2 * degrees + 1))[_np.newaxis, :,
_np.newaxis]
elif power_unit == 'per_lm':
coeffs *= _np.sqrt(power[0:nl])[_np.newaxis, :, _np.newaxis]
if normalization.lower() == '4pi':
pass
elif normalization.lower() == 'ortho':
coeffs = _convert(coeffs, normalization_in='4pi',
normalization_out='ortho')
elif normalization.lower() == 'schmidt':
coeffs = _convert(coeffs, normalization_in='4pi',
normalization_out='schmidt')
elif normalization.lower() == 'unnorm':
coeffs = _convert(coeffs, normalization_in='4pi',
normalization_out='unnorm')
if function.lower() == 'potential':
coeffs /= (gm / r0)
elif function.lower() == 'geoid':
coeffs /= r0
elif function.lower() == 'radial':
for l in degrees:
coeffs[:, l, :l+1] /= (gm * (l + 1) / r0**2)
elif function.lower() == 'total':
for l in degrees:
coeffs[:, l, :l+1] /= (gm / r0**2) * _np.sqrt((l + 1) *
(2 * l + 1))
if lmax > nl - 1:
coeffs = _np.pad(coeffs, ((0, 0), (0, lmax - nl + 1),
(0, lmax - nl + 1)), 'constant')
coeffs[0, 0, 0] = 1.0
coeffs[:, 1, :] = 0.0
clm = SHGravRealCoeffs(coeffs, gm=gm, r0=r0, omega=omega,
errors=None,
normalization=normalization.lower(),
csphase=csphase, name=name, epoch=epoch)
return clm
@classmethod
def from_netcdf(self, filename, lmax=None, normalization='4pi', csphase=1,
name=None, epoch=None):
"""
Initialize the class with spherical harmonic coefficients from a
netcdf file.
Usage
-----
x = SHGravCoeffs.from_netcdf(filename, [lmax, normalization, csphase,
name, epoch])
Returns
-------
x : SHGravCoeffs class instance.
Parameters
----------
filename : str
Name of the file, including path.
lmax : int, optional, default = None
The maximum spherical harmonic degree to read.
normalization : str, optional, default = '4pi'
Spherical harmonic normalization if not specified in the netcdf
file: '4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi
normalized, orthonormalized, Schmidt semi-normalized, or
unnormalized coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention if not specified in the netcdf
file: 1 to exclude the phase factor, or -1 to include it.
name : str, optional, default = None
The name of the dataset.
epoch : str or float, optional, default = None
The epoch time of the spherical harmonic coefficients as given by
the format YYYYMMDD.DD.
Description
-----------
The format of the netcdf file has to be exactly as the format that is
used in SHGravCoeffs.to_netcdf().
"""
ds = _xr.open_dataset(filename)
try:
normalization = ds.coeffs.normalization
except:
pass
if type(normalization) != str:
raise ValueError('normalization must be a string. '
'Input type was {:s}'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The input normalization must be '4pi', 'ortho', "
"'schmidt', or 'unnorm'. Provided value was {:s}"
.format(repr(normalization))
)
try:
csphase = ds.coeffs.csphase
except:
pass
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value was {:s}"
.format(repr(csphase))
)
try:
gm = ds.coeffs.GM
except:
raise ValueError("coeffs.GM must be specified in the netcdf file.")
try:
r0 = ds.coeffs.r0
except:
raise ValueError("coeffs.r0 must be specified in the netcdf file.")
try:
omega = ds.coeffs.omega
except:
omega = None
try:
epoch = ds.coeffs.epoch
except:
pass
lmaxout = ds.dims['degree'] - 1
c = _np.tril(ds.coeffs.data)
s = _np.triu(ds.coeffs.data, k=1)
s = _np.vstack([s[-1], s[:-1]])
s = _np.transpose(s)
if isinstance(lmax, int):
c, s = c[:lmax+1, :lmax+1], s[:lmax+1, :lmax+1]
lmaxout = lmax
if normalization.lower() == 'unnorm' and lmaxout > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value was {:d}.".format(lmaxout),
category=RuntimeWarning)
lmaxout = 85
c, s = c[:lmaxout+1, :lmaxout+1], s[:lmaxout+1, :lmaxout+1]
coeffs = _np.array([c, s])
try:
cerrors = _np.tril(ds.errors.data)
serrors = _np.triu(ds.errors.data, k=1)
serrors = _np.vstack([serrors[-1], serrors[:-1]])
serrors = _np.transpose(serrors)
cerrors = cerrors[:lmaxout+1, :lmaxout+1]
serrors = serrors[:lmaxout+1, :lmaxout+1]
errors = _np.array([cerrors, serrors])
error_kind = ds.errors.error_kind
except:
errors = None
error_kind = None
if _np.iscomplexobj(coeffs):
raise ValueError('Gravitational potential coefficients must be '
'real. Input coefficients are complex.')
clm = SHGravRealCoeffs(coeffs, gm=gm, r0=r0, omega=omega,
errors=errors, error_kind=error_kind,
normalization=normalization.lower(),
csphase=csphase, name=name, epoch=epoch)
return clm
@classmethod
def from_shape(self, shape, rho, gm, nmax=7, lmax=None, lmax_grid=None,
lmax_calc=None, omega=None, name=None, epoch=None,
backend=None, nthreads=None):
"""
Initialize a class of gravitational potential spherical harmonic
coefficients by calculuting the gravitational potential associatiated
with relief along an interface.
Usage
-----
x = SHGravCoeffs.from_shape(shape, rho, gm, [nmax, lmax, lmax_grid,
lmax_calc, omega, name,
epoch, backend, nthreads])
Returns
-------
x : SHGravCoeffs class instance.
Parameters
----------
shape : SHGrid or SHCoeffs class instance
The shape of the interface, either as an SHGrid or SHCoeffs class
instance. If the input is an SHCoeffs class instance, this will be
expaned on a grid using the optional parameters lmax_grid and
lmax_calc.
rho : int, float, or ndarray, or an SHGrid or SHCoeffs class instance
The density contrast associated with the interface in kg / m3. If
the input is a scalar, the density contrast is constant. If
the input is an SHCoeffs or SHGrid class instance, the density
contrast will vary laterally.
gm : float
The gravitational constant times the mass that is associated with
the gravitational potential coefficients.
nmax : integer, optional, default = 7
The maximum order used in the Taylor-series expansion when
calculating the potential coefficients.
lmax : int, optional, shape.lmax
The maximum spherical harmonic degree of the output spherical
harmonic coefficients.
lmax_grid : int, optional, default = lmax
If shape or rho is of type SHCoeffs, this parameter determines the
maximum spherical harmonic degree that is resolvable when expanded
onto a grid.
lmax_calc : optional, integer, default = lmax
If shape or rho is of type SHCoeffs, this parameter determines the
maximum spherical harmonic degree that will be used when expanded
onto a grid.
omega : float, optional, default = None
The angular rotation rate of the body.
name : str, optional, default = None
The name of the dataset.
epoch : str or float, optional, default = None
The epoch time of the spherical harmonic coefficients as given by
the format YYYYMMDD.DD.
backend : str, optional, default = preferred_backend()
Name of the preferred backend, either 'shtools' or 'ducc'.
nthreads : int, optional, default = 1
Number of threads to use for the 'ducc' backend. Setting this
parameter to 0 will use as many threads as there are hardware
threads on the system.
Notes
-----
Initialize an SHGravCoeffs class instance by calculating the spherical
harmonic coefficients of the gravitational potential associated with
the shape of a density interface. The potential is calculated using the
finite-amplitude technique of Wieczorek and Phillips (1998) for a
constant density contrast and Wieczorek (2007) for a density contrast
that varies laterally. The output coefficients are referenced to the
mean radius of shape, and the potential is strictly valid only when it
is evaluated at a radius greater than the maximum radius of shape.
The input shape (and density contrast rho for variable density) can be
either an SHGrid or SHCoeffs class instance. The routine makes direct
use of gridded versions of these quantities, so if the input is of type
SHCoeffs, it will first be expanded onto a grid. This exansion will be
performed on a grid that can resolve degrees up to lmax_grid, with only
the first lmax_calc coefficients being used. The input shape must
correspond to absolute radii as the degree 0 term determines the
reference radius of the coefficients.
As an intermediate step, this routine calculates the spherical harmonic
coefficients of the interface raised to the nth power, i.e.,
(shape-r0)**n, where r0 is the mean radius of shape. If the input shape
is bandlimited to degree L, the resulting function will thus be
bandlimited to degree L*nmax. This subroutine assumes implicitly that
the maximum spherical harmonic degree of the input shape (when
SHCoeffs) or maximum resolvable spherical harmonic degree of shape
(when SHGrid) is greater or equal to this value. If this is not the
case, aliasing will occur. In practice, for accurate results, the
effective bandwidth needs only to be about three times the size of L,
though this should be verified for each application. The effective
bandwidth of shape (when SHCoeffs) can be increased by preprocessing
with the method pad(), or by increaesing the value of lmax_grid (when
SHGrid).
"""
mass = gm / _G.value
if backend is None:
backend = preferred_backend()
if type(shape) is not _SHRealCoeffs and type(shape) is not _DHRealGrid:
raise ValueError('shape must be of type SHRealCoeffs '
'or DHRealGrid. Input type is {:s}.'
.format(repr(type(shape))))
if (not issubclass(type(rho), float) and type(rho) is not int
and type(rho) is not _np.ndarray and
type(rho) is not _SHRealCoeffs and
type(rho is not _DHRealGrid)):
raise ValueError('rho must be of type float, int, ndarray, '
'SHRealCoeffs or DHRealGrid. Input type is {:s}.'
.format(repr(type(rho))))
if type(shape) is _SHRealCoeffs:
shape = shape.expand(lmax=lmax_grid, lmax_calc=lmax_calc,
backend=backend, nthreads=nthreads)
if type(rho) is _SHRealCoeffs:
rho = rho.expand(lmax=lmax_grid, lmax_calc=lmax_calc,
backend=backend, nthreads=nthreads)
if type(rho) is _DHRealGrid:
if shape.lmax != rho.lmax:
raise ValueError('The grids for shape and rho must have the '
'same size. '
'lmax of shape = {:d}, lmax of rho = {:d}.'
.format(shape.lmax, rho.lmax))
cilm, d = _CilmPlusRhoHDH(shape.data[:shape.nlat-shape.extend,
:shape.nlon-shape.extend],
nmax, mass,
rho.data[:rho.nlat-rho.extend,
:rho.nlon-rho.extend],
lmax=lmax)
else:
cilm, d = _CilmPlusDH(shape.data[:shape.nlat-shape.extend,
:shape.nlon-shape.extend],
nmax, mass, rho, lmax=lmax)
clm = SHGravRealCoeffs(cilm, gm=gm, r0=d, omega=omega,
normalization='4pi', csphase=1, name=name,
epoch=epoch)
return clm
@property
def mass(self):
"""Return the mass of the planet in kg.
"""
return self.gm / _G.value
@property
def center_of_mass(self):
"""
Return the Cartesian coordinates of the center of mass of the planet
in meters.
Returns
-------
[x, y, z] : numpy ndarray
Cartesian coordinates of the center of mass in meters.
"""
coeffs = self.convert(normalization='unnorm', csphase=1, lmax=1).coeffs
x_cm = coeffs[0, 1, 1] * self.r0
y_cm = coeffs[1, 1, 1] * self.r0
z_cm = coeffs[0, 1, 0] * self.r0
return _np.array([x_cm, y_cm, z_cm])
def inertia_tensor(self, dynamical_flattening):
"""Return the inertia tensor of the planet in kg * m**2.
Parameters
----------
dynamical_flattening : float
Dynamical flattening (or precession constant) of the planet,
defined as [C-(A+B)/2]/C.
Returns
-------
tensor : ndarray, shape (3, 3)
Inertia tensor of the planet.
Notes
-----
The moment of inertia tensor is given by 9 components
(Ixx, Ixy, Ixz)
(Iyx, Iyy, Iyz)
(Izx, Izy, Izz)
The diagonal elements Ixx, Iyy, Izz are the axial moments of inertia,
and the off-diagonal elements
Ixy = Iyx, Ixz = Izx, Iyz = Izy
are the products of inertia.
References
----------
Heiskanen, W.A. and Moritz, H., 1967. Physical geodesy. San Francisco,
WH Freeman, 1967.
Chen, W., Li, J.C., Ray, J., Shen, W.B. and Huang, C.L.,
Consistent estimates of the dynamic figure parameters of the Earth.
J. Geod., 89(2), 179-188, 2015.
"""
coeffs = self.convert(normalization='unnorm', csphase=1, lmax=2).coeffs
mr02 = self.mass * self.r0**2
# Products of inertia
yz = -mr02 * coeffs[1, 2, 1]
xz = -mr02 * coeffs[0, 2, 1]
xy = -2 * mr02 * coeffs[1, 2, 2]
# Axial moments of inertia
xx = mr02 * ((1 - 1 / dynamical_flattening) * coeffs[0, 2, 0] -
2 * coeffs[0, 2, 2])
yy = mr02 * ((1 - 1 / dynamical_flattening) * coeffs[0, 2, 0] +
2 * coeffs[0, 2, 2])
zz = -mr02 * coeffs[0, 2, 0] / dynamical_flattening
tensor = _np.array([
[xx, xy, xz],
[xy, yy, yz],
[xz, yz, zz]])
return tensor
# ---- Define methods that modify internal variables ----
def set_omega(self, omega):
"""
Set the angular rotation rate of the class instance.
Usage
-----
x.set_omega(omega)
Parameters
----------
omega : float
The angular rotation rate of the body.
"""
self.omega = omega
def set_coeffs(self, values, ls, ms):
"""
Set spherical harmonic coefficients in-place to specified values.
Usage
-----
x.set_coeffs(values, ls, ms)
Parameters
----------
values : float (list)
The value(s) of the spherical harmonic coefficient(s).
ls : int (list)
The degree(s) of the coefficient(s) that should be set.
ms : int (list)
The order(s) of the coefficient(s) that should be set. Positive
and negative values correspond to the cosine and sine
components, respectively.
Examples
--------
x.set_coeffs(10., 1, 1) # x.coeffs[0, 1, 1] = 10.
x.set_coeffs(5., 1, -1) # x.coeffs[1, 1, 1] = 5.
x.set_coeffs([1., 2], [1, 2], [0, -2]) # x.coeffs[0, 1, 0] = 1.
# x.coeffs[1, 2, 2] = 2.
"""
# Ensure that the type is correct
values = _np.array(values)
ls = _np.array(ls)
ms = _np.array(ms)
mneg_mask = (ms < 0).astype(_np.int)
self.coeffs[mneg_mask, ls, _np.abs(ms)] = values
# ---- IO routines ----
def to_file(self, filename, format='shtools', header=None, errors=True,
lmax=None, modelname=None, tide_system='unknown',
encoding=None, **kwargs):
"""
Save spherical harmonic coefficients to a file.
Usage
-----
x.to_file(filename, [format='shtools', header, errors, lmax, encoding])
x.to_file(filename, format='dov', [header, errors, lmax, encoding])
x.to_file(filename, format='bshc', [lmax])
x.to_file(filename, format='icgem', [header, errors, lmax, modelname,
tide_system, encoding])
x.to_file(filename, format='npy', [**kwargs])
Parameters
----------
filename : str
Name of the output file. If the filename ends with '.gz', the file
will be compressed using gzip.
format : str, optional, default = 'shtools'
'shtools', 'dov', 'bshc', 'icgem' or 'npy'.
header : str, optional, default = None
A header string written to an 'shtools' or 'dov'-formatted file
directly before the metadata and spherical harmonic coefficients.
errors : bool, optional, default = True
If True, save the errors in the file (for 'shtools', 'dov', and
'icgem' formatted files only).
lmax : int, optional, default = self.lmax
The maximum spherical harmonic degree to write to the file.
modelname : str, optional, default = None
The name of the model for 'icgem' formatted files.
tide_system : str, optional, default = 'unknown'
The tide system for 'icgem' formatted files: 'zero_tide',
'tide_free', or 'unknown'.
encoding : str, optional, default = None
Encoding of the output file when format is 'shtools', 'dov' or
'icgem'. The default is to use the system default.
**kwargs : keyword argument list, optional for format = 'npy'
Keyword arguments of numpy.save().
Notes
-----
Supported file formats:
'shtools' (see pyshtools.shio.shwrite)
'dov' (see pyshtools.shio.write_dov)
'bshc' (see pyshtools.shio.write_bshc)
'icgem' (see pyshtools.shio.write_icgem_gfc)
'npy' (see numpy.save)
If the filename end with '.gz', the file will be compressed using gzip.
'shtools': The coefficients and meta-data will be written to an ascii
formatted file. The first line is an optional user provided header
line, and the following line provides the attributes r0, gm,
omega, and lmax. The spherical harmonic coefficients (and optionally
the errors) are then listed, with increasing degree and order, with the
format
l, m, coeffs[0, l, m], coeffs[1, l, m], error[0, l, m], error[1, l, m]
where l and m are the spherical harmonic degree and order,
respectively.
'dov': This format is nearly the same as 'shtools', with the exception
that each line contains a single coefficient (and optionally an error)
for each degree and order:
l, m, coeffs[0, l, m], error[0, l, m]
l, -m, coeffs[1, l, m], error[1, l, m]
'bshc': The coefficients will be written to a binary file composed
solely of 8-byte floats. The file starts with the minimum and maximum
degree, and is followed by the cosine coefficients and then sine
coefficients (with all orders being listed, one degree at a time). This
format does noe support additional metadata or coefficient errors.
'icgem': The coefficients will be written to a text file using the
gfc format of the International Centre for Global Earth Models.
'npy': The spherical harmonic coefficients (but not the meta-data nor
errors) will be saved to a binary numpy 'npy' file.
"""
if lmax is None:
lmax = self.lmax
if errors is True and self.errors is None:
errors = False
if filename[-3:] == '.gz':
filebase = filename[:-3]
else:
filebase = filename
if format.lower() == 'shtools' or format.lower() == 'dov':
if format.lower() == 'shtools':
write_func = _shwrite
else:
write_func = _write_dov
if errors is True and self.errors is None:
raise ValueError('Can not save errors when then have not been '
'initialized.')
if self.omega is None:
omega = 0.
else:
omega = self.omega
header_str = '{:.16e}, {:.16e}, {:.16e}, {:d}'.format(
self.r0, self.gm, omega, lmax)
if header is None:
header = header_str
header2 = None
else:
header2 = header_str
if errors:
write_func(filebase, self.coeffs, errors=self.errors,
header=header, header2=header2, lmax=lmax,
encoding=encoding)
else:
write_func(filebase, self.coeffs, errors=None,
header=header, header2=header2, lmax=lmax,
encoding=encoding)
elif format.lower() == 'bshc':
_write_bshc(filebase, self.coeffs, lmax=lmax)
elif format.lower() == 'icgem':
if errors:
_write_icgem_gfc(filebase, self.coeffs, errors=self.errors,
header=header, lmax=lmax, modelname=modelname,
gm=self.gm, r0=self.r0,
error_kind=self.error_kind,
tide_system=tide_system,
normalization=self.normalization,
encoding=encoding)
else:
_write_icgem_gfc(filebase, self.coeffs, header=header,
lmax=lmax, modelname=modelname,
gm=self.gm, r0=self.r0,
tide_system=tide_system,
normalization=self.normalization,
encoding=encoding)
elif format.lower() == 'npy':
_np.save(filename, self.coeffs, **kwargs)
else:
raise NotImplementedError(
'format={:s} not implemented.'.format(repr(format)))
if filename[-3:] == '.gz':
with open(filebase, 'rb') as f_in:
with _gzip.open(filename, 'wb') as f_out:
_shutil.copyfileobj(f_in, f_out)
def to_netcdf(self, filename, title='', description='', lmax=None):
"""
Return the coefficient data as a netcdf formatted file or object.
Usage
-----
x.to_netcdf(filename, [title, description, lmax])
Parameters
----------
filename : str
Name of the output file.
title : str, optional, default = ''
Title of the dataset
description : str, optional, default = ''
Description of the data.
lmax : int, optional, default = self.lmax
The maximum spherical harmonic degree to output.
"""
if lmax is None:
lmax = self.lmax
ds = _xr.Dataset()
ds.coords['degree'] = ('degree', _np.arange(lmax+1))
ds.coords['order'] = ('order', _np.arange(lmax+1))
# c coeffs as lower triangular matrix
c = self.coeffs[0, :lmax+1, :lmax+1]
# s coeffs as upper triangular matrix
s = _np.transpose(self.coeffs[1, :lmax+1, :lmax+1])
s = _np.vstack([s[1:], s[0]])
ds['coeffs'] = (('degree', 'order'), c + s)
ds['coeffs'].attrs['title'] = title
ds['coeffs'].attrs['description'] = description
ds['coeffs'].attrs['normalization'] = self.normalization
ds['coeffs'].attrs['csphase'] = self.csphase
ds['coeffs'].attrs['GM'] = self.gm
ds['coeffs'].attrs['r0'] = self.r0
if self.omega is not None:
ds['coeffs'].attrs['omega'] = self.omega
if self.epoch is not None:
ds['coeffs'].attrs['epoch'] = self.epoch
if self.errors is not None:
cerrors = self.errors[0, :lmax+1, :lmax+1]
serrors = _np.transpose(self.errors[1, :lmax+1, :lmax+1])
serrors = _np.vstack([serrors[1:], serrors[0]])
ds['errors'] = (('degree', 'order'), cerrors + serrors)
ds['errors'].attrs['normalization'] = self.normalization
ds['errors'].attrs['csphase'] = self.csphase
ds['errors'].attrs['GM'] = self.gm
ds['errors'].attrs['r0'] = self.r0
if self.omega is not None:
ds['errors'].attrs['omega'] = self.omega
if self.epoch is not None:
ds['errors'].attrs['epoch'] = self.epoch
if self.error_kind is not None:
ds['errors'].attrs['error_kind'] = self.error_kind
ds.to_netcdf(filename)
def to_array(self, normalization=None, csphase=None, lmax=None,
errors=False):
"""
Return spherical harmonic coefficients (and errors) as a numpy array.
Usage
-----
coeffs, [errors] = x.to_array([normalization, csphase, lmax, errors])
Returns
-------
coeffs : ndarry, shape (2, lmax+1, lmax+1)
numpy ndarray of the spherical harmonic coefficients.
errors : ndarry, shape (2, lmax+1, lmax+1)
numpy ndarray of the errors of the spherical harmonic
coefficients if they are not None.
Parameters
----------
normalization : str, optional, default = x.normalization
Normalization of the output coefficients: '4pi', 'ortho',
'schmidt', or 'unnorm' for geodesy 4pi normalized, orthonormalized,
Schmidt semi-normalized, or unnormalized coefficients,
respectively.
csphase : int, optional, default = x.csphase
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree to output. If lmax is greater
than x.lmax, the array will be zero padded.
errors : bool, optional, default = False
If True, return separate arrays of the coefficients and errors. If
False, return only the coefficients.
Notes
-----
This method will return an array of the spherical harmonic coefficients
using a different normalization and Condon-Shortley phase convention,
and a different maximum spherical harmonic degree. If the maximum
degree is smaller than the maximum degree of the class instance, the
coefficients will be truncated. Conversely, if this degree is larger
than the maximum degree of the class instance, the output array will be
zero padded. If the errors of the coefficients are set, and the
optional parameter errors is set to True, the errors will be output as
a separate array.
"""
if normalization is None:
normalization = self.normalization
if csphase is None:
csphase = self.csphase
if lmax is None:
lmax = self.lmax
coeffs = _convert(self.coeffs, normalization_in=self.normalization,
normalization_out=normalization,
csphase_in=self.csphase, csphase_out=csphase,
lmax=lmax)
if self.errors is not None and errors:
errors = _convert(self.errors, normalization_in=self.normalization,
normalization_out=normalization,
csphase_in=self.csphase, csphase_out=csphase,
lmax=lmax)
return coeffs, errors
else:
return coeffs
def copy(self):
"""
Return a deep copy of the class instance.
Usage
-----
copy = x.copy()
"""
return _copy.deepcopy(self)
def info(self):
"""
Print a summary of the data stored in the SHGravCoeffs class instance.
Usage
-----
x.info()
"""
print(repr(self))
# -------------------------------------------------------------------------
# Mathematical operators
#
# All operations ignore the errors of the coefficients.
# All operations ignore the units of the coefficients, with the
# exception of multiplying and dividing by a scalar.
# -------------------------------------------------------------------------
def __add__(self, other):
"""
Add two similar sets of gravitational potential coefficients:
self + other.
"""
if isinstance(other, SHGravCoeffs):
if (self.gm == other.gm and self.r0 == other.r0 and
self.normalization == other.normalization and
self.csphase == other.csphase and self.kind == other.kind
and self.lmax == other.lmax):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (self.coeffs[self.mask] +
other.coeffs[self.mask])
return SHGravCoeffs.from_array(
coeffs, gm=self.gm, r0=self.r0, omega=self.omega,
csphase=self.csphase, normalization=self.normalization)
else:
raise ValueError('Addition is permitted only when the two '
'SHGravCoeffs instances have the same kind, '
'normalization, csphase, gm, r0, and lmax.')
else:
raise TypeError('Addition is permitted only for two SHGravCoeffs '
'instances. Type of other is {:s}.'
.format(repr(type(other))))
def __radd__(self, other):
"""
Add two similar sets of gravitational potential coefficients:
other + self.
"""
return self.__add__(other)
def __sub__(self, other):
"""
Subtract two similar sets of gravitational potential coefficients:
self - other.
"""
if isinstance(other, SHGravCoeffs):
if (self.gm == other.gm and self.r0 == other.r0 and
self.normalization == other.normalization and
self.csphase == other.csphase and self.kind == other.kind
and self.lmax == other.lmax):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (self.coeffs[self.mask] -
other.coeffs[self.mask])
return SHGravCoeffs.from_array(
coeffs, gm=self.gm, r0=self.r0, omega=self.omega,
csphase=self.csphase, normalization=self.normalization)
else:
raise ValueError('Subtraction is permitted only when the two '
'SHGravCoeffs instances have the same kind, '
'normalization, csphase, gm, r0, and lmax.')
else:
raise TypeError('Subtraction is permitted only for two '
'SHGravCoeffs instances. Type of other is {:s}.'
.format(repr(type(other))))
def __rsub__(self, other):
"""
Subtract two similar sets of gravitational potential coefficients:
other - self.
"""
if isinstance(other, SHGravCoeffs):
if (self.gm == other.gm and self.r0 == other.r0 and
self.normalization == other.normalization and
self.csphase == other.csphase and self.kind == other.kind
and self.lmax == other.lmax):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (other.coeffs[self.mask] -
self.coeffs[self.mask])
return SHGravCoeffs.from_array(
coeffs, gm=self.gm, r0=self.r0, omega=self.omega,
csphase=self.csphase, normalization=self.normalization)
else:
raise ValueError('Subtraction is permitted only when the two '
'SHGravCoeffs instances have the same kind, '
'normalization, csphase, gm, r0, and lmax.')
else:
raise TypeError('Subtraction is permitted only for two '
'SHGravCoeffs instances. Type of other is {:s}.'
.format(repr(type(other))))
def __mul__(self, other):
"""
Multiply an SHGravCoeffs instance by an SHCoeffs instance or scalar:
self * other.
"""
if isinstance(other, _SHCoeffs):
if (self.normalization == other.normalization and
self.csphase == other.csphase and self.kind == other.kind
and self.lmax == other.lmax):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (self.coeffs[self.mask] *
other.coeffs[self.mask])
return SHGravCoeffs.from_array(
coeffs, gm=self.gm, r0=self.r0, omega=self.omega,
csphase=self.csphase, normalization=self.normalization)
else:
raise ValueError('The two sets of coefficients must have the '
'same kind, normalization, csphase, and '
'lmax.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not multiply real gravitational '
'potential coefficients by a complex '
'constant.')
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = self.coeffs[self.mask] * other
return SHGravCoeffs.from_array(
coeffs, gm=self.gm, r0=self.r0, omega=self.omega,
csphase=self.csphase, normalization=self.normalization)
else:
raise TypeError('Multiplication of an SHGravCoeffs instance is '
'permitted only with either an SHCoeffs instance '
'or a scalar. '
'Type of other is {:s}.'.format(repr(type(other))))
def __rmul__(self, other):
"""
Multiply an SHGravCoeffs instance by an SHCoeffs instance or scalar:
other * self.
"""
return self.__mul__(other)
def __truediv__(self, other):
"""
Divide an SHGravCoeffs instance by an SHCoeffs instance or scalar:
self / other.
"""
if isinstance(other, _SHCoeffs):
if (self.normalization == other.normalization and
self.csphase == other.csphase and self.kind == other.kind
and self.lmax == other.lmax):
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = (self.coeffs[self.mask] /
other.coeffs[self.mask])
return SHGravCoeffs.from_array(
coeffs, gm=self.gm, r0=self.r0, omega=self.omega,
csphase=self.csphase, normalization=self.normalization)
else:
raise ValueError('The two sets of coefficients must have the '
'same kind, normalization, csphase, and '
'lmax.')
elif _np.isscalar(other) is True:
if self.kind == 'real' and _np.iscomplexobj(other):
raise ValueError('Can not divide real gravitational '
'potential coefficients by a complex '
'constant.')
coeffs = _np.empty([2, self.lmax+1, self.lmax+1],
dtype=self.coeffs.dtype)
coeffs[self.mask] = self.coeffs[self.mask] / other
return SHGravCoeffs.from_array(
coeffs, gm=self.gm, r0=self.r0, omega=self.omega,
csphase=self.csphase, normalization=self.normalization)
else:
raise TypeError('Division of an SHGravCoeffs instance is '
'permitted only with either an SHCoeffs instance '
'or a scalar. '
'Type of other is {:s}.'.format(repr(type(other))))
# ---- Extract data ----
def degrees(self):
"""
Return a numpy array with the spherical harmonic degrees from 0 to
lmax.
Usage
-----
degrees = x.degrees()
Returns
-------
degrees : ndarray, shape (lmax+1)
1-D numpy ndarray listing the spherical harmonic degrees, where
lmax is the maximum spherical harmonic degree.
"""
return _np.arange(self.lmax + 1)
def spectrum(self, function='geoid', lmax=None, unit='per_l', base=10.):
"""
Return the spectrum as a function of spherical harmonic degree.
Usage
-----
spectrum, [error_spectrum] = x.spectrum([function, lmax, unit, base])
Returns
-------
spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the spectrum, where lmax is the maximum
spherical harmonic degree.
error_spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the error_spectrum (if the attribute errors
is not None).
Parameters
----------
function : str, optional, default = 'geoid'
The type of power spectrum to return: 'potential' for the
gravitational potential in m2/s2, 'geoid' for the geoid in m,
'radial' for the radial gravity in m/s2, or 'total' for the total
gravitational field in m/s2.
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree of the spectrum to return.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Notes
-----
This method returns the power spectrum of the class instance, where the
type of function is defined by the function parameter: 'potential' for
the gravitational potential, 'geoid' for the geoid, 'radial' for
the radial gravity, or 'total' for the total gravitational field. In
all cases, the total power of the function is defined as the integral
of the function squared over all space, divided by the area the
function spans. If the mean of the function is zero, this is equivalent
to the variance of the function.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, which is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the
contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a).
"""
if function.lower() not in ('potential', 'geoid', 'radial', 'total'):
raise ValueError(
"function must be of type 'potential', 'geoid', 'radial', or "
"'total'. Provided value is {:s}.".format(repr(function))
)
s = _spectrum(self.coeffs, normalization=self.normalization,
convention='power', unit=unit, base=base, lmax=lmax)
if self.errors is not None:
es = _spectrum(self.errors, normalization=self.normalization,
convention='power', unit=unit, base=base, lmax=lmax)
if function.lower() == 'potential':
s *= (self.gm / self.r0)**2
if self.errors is not None:
es *= (self.gm / self.r0)**2
elif function.lower() == 'geoid':
s *= self.r0**2
if self.errors is not None:
es *= self.r0**2
elif function.lower() == 'radial':
degrees = _np.arange(len(s))
s *= (self.gm * (degrees + 1) / self.r0**2)**2
if self.errors is not None:
es *= (self.gm * (degrees + 1) / self.r0**2)**2
elif function.lower() == 'total':
degrees = _np.arange(len(s))
s *= (self.gm / self.r0**2)**2 * (degrees + 1) * (2 * degrees + 1)
if self.errors is not None:
es *= (self.gm / self.r0**2)**2 * (degrees + 1) * \
(2 * degrees + 1)
if self.errors is not None:
return s, es
else:
return s
def admittance(self, hlm, errors=True, function='radial', lmax=None):
"""
Return the admittance for an input topography function.
Usage
-----
admittance = g.admittance(hlm, [errors, function, lmax])
Returns
-------
admittance : ndarray, shape (lmax+1) or (2, lmax+1)
1-D array of the admittance (errors=False) or 2-D array of the
admittance and its uncertainty (errors=True), where lmax is the
maximum spherical harmonic degree.
Parameters
----------
hlm : SHCoeffs class instance.
The topography function h used in computing the admittance
Sgh / Shh. hlm is assumed to have units of meters.
errors : bool, optional, default = True
Return the uncertainty of the admittance.
function : str, optional, default = 'radial'
The type of admittance to return: 'geoid' for using the geoid, in
units of m/km, or 'radial' for using the radial gravity in units
of mGal/km.
lmax : int, optional, default = g.lmax
Maximum spherical harmonic degree of the spectrum to output.
Notes
-----
If gravity g and topography h are related by the equation
glm = Z(l) hlm + nlm
where nlm is a zero-mean random variable, the admittance Z(l) can be
estimated using
Z(l) = Sgh(l) / Shh(l),
where Sgh, Shh and Sgg are the cross-power and power spectra of the
functions g (self) and h (input).
"""
if not isinstance(hlm, _SHCoeffs):
raise ValueError('hlm must be an SHCoeffs class instance. Input '
'type is {:s}.'.format(repr(type(hlm))))
if lmax is None:
lmax = min(self.lmax, hlm.lmax)
sgh = _cross_spectrum(self.coeffs,
hlm.to_array(normalization=self.normalization,
csphase=self.csphase, lmax=lmax,
errors=False),
normalization=self.normalization,
lmax=lmax)
shh = _spectrum(hlm.coeffs, normalization=hlm.normalization, lmax=lmax)
with _np.errstate(invalid='ignore', divide='ignore'):
admit = sgh / shh
if errors:
sgg = _spectrum(self.coeffs, normalization=self.normalization,
lmax=lmax)
sigma = (sgg / shh) * (1. - sgh**2 / sgg / shh) / \
_np.arange(lmax+1) / 2.
admit = _np.column_stack((admit, _np.sqrt(sigma)))
if function == 'geoid':
admit *= 1000. * self.r0
else:
degrees = _np.arange(lmax+1)
if errors:
admit *= 1.e8 * self.gm * (degrees[:, None] + 1) / self.r0**2
else:
admit *= 1.e8 * self.gm * (degrees + 1) / self.r0**2
return admit
def correlation(self, hlm, lmax=None):
"""
Return the spectral correlation with another function.
Usage
-----
correlation = g.correlation(hlm, [lmax])
Returns
-------
correlation : ndarray, shape (lmax+1)
1-D numpy ndarray of the spectral correlation, where lmax is the
maximum spherical harmonic degree.
Parameters
----------
hlm : SHCoeffs, SHMagCoeffs or SHGravCoeffs class instance.
The function h used in computing the spectral correlation.
lmax : int, optional, default = g.lmax
Maximum spherical harmonic degree of the spectrum to output.
Notes
-----
The spectral correlation is defined as
gamma(l) = Sgh(l) / sqrt( Sgg(l) Shh(l) )
where Sgh, Shh and Sgg are the cross-power and power spectra of the
functions g (self) and h (input).
"""
from .shmagcoeffs import SHMagCoeffs as _SHMagCoeffs
if not isinstance(hlm, (_SHCoeffs, _SHMagCoeffs, SHGravCoeffs)):
raise ValueError('hlm must be an SHCoeffs, SHMagCoeffs or '
'SHGravCoeffs class instance. Input type is {:s}.'
.format(repr(type(hlm))))
if lmax is None:
lmax = min(self.lmax, hlm.lmax)
sgg = _spectrum(self.coeffs, normalization=self.normalization,
lmax=lmax)
shh = _spectrum(hlm.coeffs, normalization=hlm.normalization, lmax=lmax)
sgh = _cross_spectrum(self.coeffs,
hlm.to_array(normalization=self.normalization,
csphase=self.csphase, lmax=lmax,
errors=False),
normalization=self.normalization,
lmax=lmax)
with _np.errstate(invalid='ignore', divide='ignore'):
return sgh / _np.sqrt(sgg * shh)
def admitcorr(self, hlm, errors=True, function='radial', lmax=None):
"""
Return the admittance and correlation for an input topography function.
Usage
-----
admittance, correlation = g.admitcorr(hlm, [errors, function, lmax])
Returns
-------
admittance : ndarray, shape (lmax+1) or (2, lmax+1)
1-D array of the admittance (errors=False) or 2-D array of the
admittance and its uncertainty (errors=True), where lmax is the
maximum spherical harmonic degree.
correlation : ndarray, shape (lmax+1)
1-D numpy ndarray of the spectral correlation, where lmax is the
maximum spherical harmonic degree.
Parameters
----------
hlm : SHCoeffs class instance.
The topography function h used in computing the admittance
Sgh / Shh. hlm is assumed to have units of meters.
errors : bool, optional, default = True
Return the uncertainty of the admittance.
function : str, optional, default = 'radial'
The type of admittance to return: 'geoid' for using the geoid, in
units of m/km, or 'radial' for using the radial gravity in units
of mGal/km.
lmax : int, optional, default = g.lmax
Maximum spherical harmonic degree of the spectrum to output.
Notes
-----
If gravity g and topography h are related by the equation
glm = Z(l) hlm + nlm
where nlm is a zero-mean random variable, the admittance Z(l) and
spectral correlation can be estimated using
Z(l) = Sgh(l) / Shh(l)
gamma(l) = Sgh(l) / sqrt( Sgg(l) Shh(l) )
where Sgh, Shh and Sgg are the cross-power and power spectra of the
functions g (self) and h (input).
"""
if not isinstance(hlm, _SHCoeffs):
raise ValueError('hlm must be an SHCoeffs class instance. Input '
'type is {:s}.'.format(repr(type(hlm))))
if lmax is None:
lmax = min(self.lmax, hlm.lmax)
sgh = _cross_spectrum(self.coeffs,
hlm.to_array(normalization=self.normalization,
csphase=self.csphase, lmax=lmax,
errors=False),
normalization=self.normalization,
lmax=lmax)
shh = _spectrum(hlm.coeffs, normalization=hlm.normalization, lmax=lmax)
sgg = _spectrum(self.coeffs, normalization=self.normalization,
lmax=lmax)
with _np.errstate(invalid='ignore', divide='ignore'):
admit = sgh / shh
corr = sgh / _np.sqrt(sgg * shh)
if errors:
sigma = (sgg / shh) * (1. - corr**2) / _np.arange(lmax+1) / 2.
admit = _np.column_stack((admit, _np.sqrt(sigma)))
if function == 'geoid':
admit *= 1000. * self.r0
else:
degrees = _np.arange(lmax+1)
if errors:
admit *= 1.e8 * self.gm * (degrees[:, None] + 1) / self.r0**2
else:
admit *= 1.e8 * self.gm * (degrees + 1) / self.r0**2
return admit, corr
# ---- Operations that return a new SHGravCoeffs class instance ----
def rotate(self, alpha, beta, gamma, degrees=True, convention='y',
body=False, dj_matrix=None, backend=None, nthreads=None):
"""
Rotate either the coordinate system used to express the spherical
harmonic coefficients or the physical body, and return a new class
instance.
Usage
-----
x_rotated = x.rotate(alpha, beta, gamma, [degrees, convention,
body, dj_matrix, backend, nthreads])
Returns
-------
x_rotated : SHGravCoeffs class instance
Parameters
----------
alpha, beta, gamma : float
The three Euler rotation angles in degrees.
degrees : bool, optional, default = True
True if the Euler angles are in degrees, False if they are in
radians.
convention : str, optional, default = 'y'
The convention used for the rotation of the second angle, which
can be either 'x' or 'y' for a rotation about the x or y axes,
respectively.
body : bool, optional, default = False
If true, rotate the physical body and not the coordinate system.
dj_matrix : ndarray, optional, default = None
The djpi2 rotation matrix computed by a call to djpi2 (not used if
the backend is 'ducc').
backend : str, optional, default = preferred_backend()
Name of the preferred backend, either 'shtools' or 'ducc'.
nthreads : int, optional, default = 1
Number of threads to use for the 'ducc' backend. Setting this
parameter to 0 will use as many threads as there are hardware
threads on the system.
Notes
-----
This method will take the spherical harmonic coefficients of a
function, rotate the coordinate frame by the three Euler anlges, and
output the spherical harmonic coefficients of the new function. If
the optional parameter body is set to True, then the physical body will
be rotated instead of the coordinate system.
The rotation of a coordinate system or body can be viewed in two
complementary ways involving three successive rotations. Both methods
have the same initial and final configurations, and the angles listed
in both schemes are the same.
Scheme A:
(I) Rotation about the z axis by alpha.
(II) Rotation about the new y axis by beta.
(III) Rotation about the new z axis by gamma.
Scheme B:
(I) Rotation about the z axis by gamma.
(II) Rotation about the initial y axis by beta.
(III) Rotation about the initial z axis by alpha.
Here, the 'y convention' is employed, where the second rotation is with
respect to the y axis. When using the 'x convention', the second
rotation is instead with respect to the x axis. The relation between
the Euler angles in the x and y conventions is given by
alpha_y=alpha_x-pi/2, beta_y=beta_x, and gamma_y=gamma_x+pi/2.
To perform the inverse transform associated with the three angles
(alpha, beta, gamma), one would perform an additional rotation using
the angles (-gamma, -beta, -alpha).
The rotations can be viewed either as a rotation of the coordinate
system or the physical body. To rotate the physical body without
rotation of the coordinate system, set the optional parameter body to
True. This rotation is accomplished by performing the inverse rotation
using the angles (-gamma, -beta, -alpha).
"""
if type(convention) != str:
raise ValueError('convention must be a string. Input type is {:s}.'
.format(str(type(convention))))
if convention.lower() not in ('x', 'y'):
raise ValueError(
"convention must be either 'x' or 'y'. "
"Provided value is {:s}.".format(repr(convention))
)
if convention == 'y':
if body is True:
angles = _np.array([-gamma, -beta, -alpha])
else:
angles = _np.array([alpha, beta, gamma])
elif convention == 'x':
if body is True:
angles = _np.array([-gamma - _np.pi/2, -beta,
-alpha + _np.pi/2])
else:
angles = _np.array([alpha - _np.pi/2, beta, gamma + _np.pi/2])
if degrees:
angles = _np.radians(angles)
if backend is None:
backend = preferred_backend()
rot = self._rotate(angles, dj_matrix, gm=self.gm, r0=self.r0,
omega=self.omega, backend=backend,
nthreads=nthreads)
return rot
def convert(self, normalization=None, csphase=None, lmax=None):
"""
Return an SHGravCoeffs class instance with a different normalization
convention.
Usage
-----
clm = x.convert([normalization, csphase, lmax])
Returns
-------
clm : SHGravCoeffs class instance
Parameters
----------
normalization : str, optional, default = x.normalization
Normalization of the output class: '4pi', 'ortho', 'schmidt', or
'unnorm', for geodesy 4pi normalized, orthonormalized, Schmidt
semi-normalized, or unnormalized coefficients, respectively.
csphase : int, optional, default = x.csphase
Condon-Shortley phase convention for the output class: 1 to exclude
the phase factor, or -1 to include it.
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree to output.
Notes
-----
This method will return a new class instance of the spherical
harmonic coefficients using a different normalization and
Condon-Shortley phase convention. A different maximum spherical
harmonic degree of the output coefficients can be specified, and if
this maximum degree is smaller than the maximum degree of the original
class, the coefficients will be truncated. Conversely, if this degree
is larger than the maximum degree of the original class, the
coefficients of the new class will be zero padded.
"""
if normalization is None:
normalization = self.normalization
if csphase is None:
csphase = self.csphase
if lmax is None:
lmax = self.lmax
# check argument consistency
if type(normalization) != str:
raise ValueError('normalization must be a string. '
'Input type is {:s}.'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"normalization must be '4pi', 'ortho', 'schmidt', or "
"'unnorm'. Provided value is {:s}."
.format(repr(normalization)))
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be 1 or -1. Input value is {:s}."
.format(repr(csphase)))
if self.errors is not None:
coeffs, errors = self.to_array(normalization=normalization.lower(),
csphase=csphase, lmax=lmax,
errors=True)
return SHGravCoeffs.from_array(
coeffs, gm=self.gm, r0=self.r0, omega=self.omega,
errors=errors, error_kind=self.error_kind,
normalization=normalization.lower(),
csphase=csphase, epoch=self.epoch, copy=False)
else:
coeffs = self.to_array(normalization=normalization.lower(),
csphase=csphase, lmax=lmax)
return SHGravCoeffs.from_array(
coeffs, gm=self.gm, r0=self.r0, omega=self.omega,
normalization=normalization.lower(), csphase=csphase,
epoch=self.epoch, copy=False)
def pad(self, lmax, copy=True):
"""
Return an SHGravCoeffs class where the coefficients are zero padded or
truncated to a different lmax.
Usage
-----
clm = x.pad(lmax)
Returns
-------
clm : SHGravCoeffs class instance
Parameters
----------
lmax : int
Maximum spherical harmonic degree to output.
copy : bool, optional, default = True
If True, make a copy of x when initializing the class instance.
If False, modify x itself.
"""
if copy:
clm = self.copy()
else:
clm = self
if lmax <= self.lmax:
clm.coeffs = clm.coeffs[:, :lmax+1, :lmax+1]
clm.mask = clm.mask[:, :lmax+1, :lmax+1]
if self.errors is not None:
clm.errors = clm.errors[:, :lmax+1, :lmax+1]
else:
clm.coeffs = _np.pad(clm.coeffs, ((0, 0), (0, lmax - self.lmax),
(0, lmax - self.lmax)), 'constant')
if self.errors is not None:
clm.errors = _np.pad(
clm.errors, ((0, 0), (0, lmax - self.lmax),
(0, lmax - self.lmax)), 'constant')
mask = _np.zeros((2, lmax + 1, lmax + 1), dtype=bool)
for l in _np.arange(lmax + 1):
mask[:, l, :l + 1] = True
mask[1, :, 0] = False
clm.mask = mask
clm.lmax = lmax
return clm
def change_ref(self, gm=None, r0=None, lmax=None):
"""
Return a new SHGravCoeffs class instance with a different reference gm
or r0.
Usage
-----
clm = x.change_ref([gm, r0, lmax])
Returns
-------
clm : SHGravCoeffs class instance.
Parameters
----------
gm : float, optional, default = self.gm
The gravitational constant time the mass that is associated with
the gravitational potential coefficients.
r0 : float, optional, default = self.r0
The reference radius of the spherical harmonic coefficients.
lmax : int, optional, default = self.lmax
Maximum spherical harmonic degree to output.
Notes
-----
This method returns a new class instance of the gravitational
potential, but using a difference reference gm or r0. When
changing the reference radius r0, the spherical harmonic coefficients
will be upward or downward continued under the assumption that the
reference radius is exterior to the body.
"""
if lmax is None:
lmax = self.lmax
clm = self.pad(lmax)
if gm is not None and gm != self.gm:
clm.coeffs *= self.gm / gm
clm.gm = gm
if self.errors is not None:
clm.errors *= self.gm / gm
if r0 is not None and r0 != self.r0:
for l in _np.arange(lmax+1):
clm.coeffs[:, l, :l+1] *= (self.r0 / r0)**l
if self.errors is not None:
clm.errors[:, l, :l+1] *= (self.r0 / r0)**l
clm.r0 = r0
return clm
# ---- Routines that return different gravity-related class instances ----
def expand(self, a=None, f=None, colat=None, lat=None, lon=None,
degrees=True, lmax=None, lmax_calc=None, normal_gravity=True,
sampling=2, extend=True):
"""
Create 2D cylindrical maps on a flattened and rotating ellipsoid of the
three components of the gravity vector, the gravity disturbance, and
the gravity potential. Alternatively, compute the gravity vector at
specified coordinates.
Usage
-----
grids = x.expand([a, f, lmax, lmax_calc, normal_gravity, sampling,
extend])
g = x.expand(lat, lon, [a, f, lmax, lmax_calc, degrees])
g = x.expand(colat, lon, [a, f, lmax, lmax_calc, degrees])
Returns
-------
grids : SHGravGrid class instance.
g : (r, theta, phi) components of the gravity vector at the
specified points.
Parameters
----------
a : optional, float, default = self.r0
The semi-major axis of the flattened ellipsoid on which the field
is computed.
f : optional, float, default = 0
The flattening of the reference ellipsoid: f=(a-b)/a.
lat : int, float, ndarray, or list, optional, default = None
Latitude coordinates where the gravity is to be evaluated.
colat : int, float, ndarray, or list, optional, default = None
Colatitude coordinates where the gravity is to be evaluated.
lon : int, float, ndarray, or list, optional, default = None
Longitude coordinates where the gravity is to be evaluated.
degrees : bool, optional, default = True
True if lat, colat and lon are in degrees, False if in radians.
lmax : optional, integer, default = self.lmax
The maximum spherical harmonic degree, which determines the number
of samples of the output grids, n=2lmax+2, and the latitudinal
sampling interval, 90/(lmax+1).
lmax_calc : optional, integer, default = lmax
The maximum spherical harmonic degree used in evaluating the
functions. This must be less than or equal to lmax.
normal_gravity : optional, bool, default = True
If True (and if a, f and x.omega are set explicitly), the normal
gravity (the gravitational acceleration on the rotating ellipsoid)
will be subtracted from the total gravitational acceleration,
yielding the "gravity disturbance." This is done using Somigliana's
formula (after converting geocentric to geodetic coordinates).
sampling : optional, integer, default = 2
If 1 the output grids are equally sampled (n by n). If 2 (default),
the grids are equally spaced in degrees.
extend : bool, optional, default = True
If True, compute the longitudinal band for 360 E and the
latitudinal band for 90 S.
Notes
-----
This method will create 2-dimensional cylindrical maps of the three
components of the gravity vector (gravitational force + centrifugal
force), the magnitude of the gravity vector, and the gravity
potential, and return these as an SHGravGrid class instance. Each map
is stored as an SHGrid class instance using Driscoll and Healy grids
that are either equally sampled (n by n) or equally spaced in degrees
latitude and longitude. All grids use geocentric coordinates, the
output is in SI units, and the sign of the radial components is
positive when directed upwards. If latitude and longitude coordinates
are specified, this method will instead return the gravity vector.
If the angular rotation rate omega is specified in the SHGravCoeffs
instance, both the potential and gravity vectors will be calculated in
a body-fixed rotating reference frame and will include the contribution
from the centrifugal force. If normal_gravity is set to True, and a, f,
and omega are all set explicitly, the normal gravity will be removed
from the magnitude of the gravity vector, yielding the gravity
disturbance.
The gravitational potential is given by
V = GM/r Sum_{l=0}^lmax (r0/r)^l Sum_{m=-l}^l C_{lm} Y_{lm},
and the gravitational acceleration is
B = Grad V.
The coefficients are referenced to the radius r0, and the function is
computed on a flattened ellipsoid with semi-major axis a (i.e., the
mean equatorial radius) and flattening f. To convert m/s^2 to mGals,
multiply the gravity grids by 10^5.
"""
if lat is not None and colat is not None:
raise ValueError('lat and colat can not both be specified.')
if a is not None and f is not None and self.omega is not None \
and normal_gravity is True:
ng = 1
else:
ng = 0
if a is None:
a = self.r0
if f is None:
f = 0.
if (lat is not None or colat is not None) and lon is not None:
if lmax_calc is None:
lmax_calc = self.lmax
if colat is not None:
if degrees:
temp = 90.
else:
temp = _np.pi/2.
if type(colat) is list:
lat = list(map(lambda x: temp - x, colat))
else:
lat = temp - colat
values = self._expand_coord(a=a, f=f, lat=lat, lon=lon,
degrees=degrees, lmax_calc=lmax_calc,
omega=self.omega)
return values
else:
if lmax is None:
lmax = self.lmax
if lmax_calc is None:
lmax_calc = lmax
coeffs = self.to_array(normalization='4pi', csphase=1,
errors=False)
rad, theta, phi, total, pot = _MakeGravGridDH(
coeffs, self.gm, self.r0, a=a, f=f, lmax=lmax,
lmax_calc=lmax_calc, sampling=sampling, omega=self.omega,
normal_gravity=ng, extend=extend)
return _SHGravGrid(rad, theta, phi, total, pot, self.gm, a, f,
self.omega, normal_gravity, lmax, lmax_calc,
units='m/s2', pot_units='m2/s2',
epoch=self.epoch)
def tensor(self, a=None, f=None, lmax=None, lmax_calc=None, degree0=False,
sampling=2, extend=True):
"""
Create 2D cylindrical maps on a flattened ellipsoid of the 9
components of the gravity "gradient" tensor in a local north-oriented
reference frame, and return an SHGravTensor class instance.
Usage
-----
tensor = x.tensor([a, f, lmax, lmax_calc, sampling, extend])
Returns
-------
tensor : SHGravTensor class instance.
Parameters
----------
a : optional, float, default = self.r0
The semi-major axis of the flattened ellipsoid on which the field
is computed.
f : optional, float, default = 0
The flattening of the reference ellipsoid: f=(a-b)/a.
lmax : optional, integer, default = self.lmax
The maximum spherical harmonic degree that determines the number of
samples of the output grids, n=2lmax+2, and the latitudinal
sampling interval, 90/(lmax+1).
lmax_calc : optional, integer, default = lmax
The maximum spherical harmonic degree used in evaluating the
functions. This must be less than or equal to lmax.
degree0 : optional, default = False
If True, include the degree-0 term when calculating the tensor. If
False, set the degree-0 term to zero.
sampling : optional, integer, default = 2
If 1 the output grids are equally sampled (n by n). If 2 (default),
the grids are equally spaced in degrees.
extend : bool, optional, default = True
If True, compute the longitudinal band for 360 E and the
latitudinal band for 90 S.
Notes
-----
This method will create 2-dimensional cylindrical maps for the 9
components of the gravity 'gradient' tensor and return an SHGravTensor
class instance. The components are
(Vxx, Vxy, Vxz)
(Vyx, Vyy, Vyz)
(Vzx, Vzy, Vzz)
where the reference frame is north-oriented, where x points north, y
points west, and z points upward (all tangent or perpendicular to a
sphere of radius r, where r is the local radius of the flattened
ellipsoid). The gravitational potential is defined as
V = GM/r Sum_{l=0}^lmax (r0/r)^l Sum_{m=-l}^l C_{lm} Y_{lm},
where r0 is the reference radius of the spherical harmonic coefficients
Clm, and the gravitational acceleration is
B = Grad V.
The components of the gravity tensor are calculated according to eq. 1
in Petrovskaya and Vershkov (2006), which is based on eq. 3.28 in Reed
(1973) (noting that Reed's equations are in terms of latitude and that
the y axis points east):
Vzz = Vrr
Vxx = 1/r Vr + 1/r^2 Vtt
Vyy = 1/r Vr + 1/r^2 /tan(t) Vt + 1/r^2 /sin(t)^2 Vpp
Vxy = 1/r^2 /sin(t) Vtp - cos(t)/sin(t)^2 /r^2 Vp
Vxz = 1/r^2 Vt - 1/r Vrt
Vyz = 1/r^2 /sin(t) Vp - 1/r /sin(t) Vrp
where r, t, p stand for radius, theta, and phi, respectively, and
subscripts on V denote partial derivatives. The output grids are in
units of Eotvos (10^-9 s^-2).
References
----------
Reed, G.B., Application of kinematical geodesy for determining
the short wave length components of the gravity field by satellite
gradiometry, Ohio State University, Dept. of Geod. Sciences, Rep. No.
201, Columbus, Ohio, 1973.
Petrovskaya, M.S. and A.N. Vershkov, Non-singular expressions for the
gravity gradients in the local north-oriented and orbital reference
frames, J. Geod., 80, 117-127, 2006.
"""
if a is None:
a = self.r0
if f is None:
f = 0.
if lmax is None:
lmax = self.lmax
if lmax_calc is None:
lmax_calc = lmax
coeffs = self.to_array(normalization='4pi', csphase=1, errors=False)
if degree0 is False:
coeffs[0, 0, 0] = 0.
vxx, vyy, vzz, vxy, vxz, vyz = _MakeGravGradGridDH(
coeffs, self.gm, self.r0, a=a, f=f, lmax=lmax,
lmax_calc=lmax_calc, sampling=sampling, extend=extend)
return _SHGravTensor(1.e9*vxx, 1.e9*vyy, 1.e9*vzz, 1.e9*vxy, 1.e9*vxz,
1.e9*vyz, self.gm, a, f, lmax, lmax_calc,
units='Eötvös', epoch=self.epoch)
def geoid(self, potref, a=None, f=None, r=None, omega=None, order=2,
lmax=None, lmax_calc=None, grid='DH2', extend=True):
"""
Create a global map of the height of the geoid and return an SHGeoid
class instance.
Usage
-----
geoid = x.geoid(potref, [a, f, r, omega, order, lmax, lmax_calc, grid,
extend])
Returns
-------
geoid : SHGeoid class instance.
Parameters
----------
potref : float
The value of the potential on the chosen geoid, in m2 / s2.
a : optional, float, default = self.r0
The semi-major axis of the flattened ellipsoid on which the field
is computed.
f : optional, float, default = 0
The flattening of the reference ellipsoid: f=(a-b)/a.
r : optional, float, default = self.r0
The radius of the reference sphere that the Taylor expansion of the
potential is calculated on.
order : optional, integer, default = 2
The order of the Taylor series expansion of the potential about the
reference radius r. This can be either 1, 2, or 3.
omega : optional, float, default = self.omega
The angular rotation rate of the planet.
lmax : optional, integer, default = self.lmax
The maximum spherical harmonic degree that determines the number
of samples of the output grid, n=2lmax+2, and the latitudinal
sampling interval, 90/(lmax+1).
lmax_calc : optional, integer, default = lmax
The maximum spherical harmonic degree used in evaluating the
functions. This must be less than or equal to lmax.
grid : str, optional, default = 'DH2'
'DH' or 'DH1' for an equally sampled grid with nlat=nlon, or
'DH2' for an equally spaced grid in degrees latitude and longitude.
extend : bool, optional, default = True
If True, compute the longitudinal band for 360 E and the
latitudinal band for 90 S.
Notes
-----
This method will create a global map of the geoid height, accurate to
either first, second, or third order, using the method described in
Wieczorek (2007; equation 19-20). The algorithm expands the potential
in a Taylor series on a spherical interface of radius r, and computes
the height above this interface to the potential potref exactly from
the linear, quadratic, or cubic equation at each grid point. If the
optional parameters a and f are specified, the geoid height will be
referenced to a flattened ellipsoid with semi-major axis a and
flattening f. The pseudo-rotational potential is explicitly accounted
for by using the angular rotation rate omega of the planet in the
SHGravCoeffs class instance. If omega is explicitly specified for this
method, it will override the value present in the class instance.
Reference
----------
Wieczorek, M. A. Gravity and topography of the terrestrial planets,
Treatise on Geophysics, 10, 165-206, 2007.
"""
if a is None:
a = self.r0
if f is None:
f = 0.
if r is None:
r = self.r0
if lmax is None:
lmax = self.lmax
if lmax_calc is None:
lmax_calc = lmax
if grid.upper() in ('DH', 'DH1'):
sampling = 1
elif grid.upper() == 'DH2':
sampling = 2
else:
raise ValueError(
"grid must be 'DH', 'DH1', or 'DH2'. "
"Input value is {:s}.".format(repr(grid)))
coeffs = self.to_array(normalization='4pi', csphase=1, errors=False)
if omega is None:
omega = self.omega
geoid = _MakeGeoidGridDH(coeffs, self.r0, self.gm, potref, lmax=lmax,
omega=omega, r=r, order=order,
lmax_calc=lmax_calc, a=a, f=f,
sampling=sampling, extend=extend)
return _SHGeoid(geoid, self.gm, potref, a, f, omega, r, order,
lmax, lmax_calc, units='m', epoch=self.epoch)
# ---- Plotting routines ----
def plot_spectrum(self, function='geoid', unit='per_l', base=10.,
lmax=None, xscale='lin', yscale='log', grid=True,
legend=None, legend_error='error', legend_loc='best',
axes_labelsize=None, tick_labelsize=None, ax=None,
show=True, fname=None, **kwargs):
"""
Plot the spectrum as a function of spherical harmonic degree.
Usage
-----
x.plot_spectrum([function, unit, base, lmax, xscale, yscale, grid,
legend, legend_loc, axes_labelsize, tick_labelsize,
ax, show, fname, **kwargs])
Parameters
----------
function : str, optional, default = 'geoid'
The type of power spectrum to calculate: 'potential' for the
gravitational potential, 'geoid' for the geoid, 'radial' for
the radial gravity, or 'total' for the total gravitational field.
unit : str, optional, default = 'per_l'
If 'per_l', plot the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', plot the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', plot the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum, and
the base to use for logarithmic axes.
lmax : int, optional, default = self.lmax
The maximum spherical harmonic degree to plot.
xscale : str, optional, default = 'lin'
Scale of the x axis: 'lin' for linear or 'log' for logarithmic.
yscale : str, optional, default = 'log'
Scale of the y axis: 'lin' for linear or 'log' for logarithmic.
grid : bool, optional, default = True
If True, plot grid lines.
legend : str, optional, default = None
Text to use for the legend.
legend_error : str, optional, default = 'error'
Text to use for the legend of the error spectrum.
legend_loc : str, optional, default = 'best'
Location of the legend, such as 'upper right' or 'lower center'
(see pyplot.legend for all options).
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot to the screen.
fname : str, optional, default = None
If present, and if ax is not specified, save the image to the
specified file.
**kwargs : keyword arguments, optional
Keyword arguments for pyplot.plot().
Notes
-----
This method plots the power (and error) spectrum of the class instance,
where the type of spectrum is defined by the parameter function:
'potential' for the gravitational potential, 'geoid' for the geoid,
'radial' for the radial gravity, or 'total' for the total gravitational
field. The power for the degree 0 and 1 terms are not plotted. In all
cases, the total power of the function is defined as the integral of
the function squared over all space, divided by the area the function
spans. If the mean of the function is zero, this is equivalent to the
variance of the function.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, which is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the
contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a).
"""
if lmax is None:
lmax = self.lmax
if self.errors is not None:
spectrum, error_spectrum = self.spectrum(function=function,
unit=unit, base=base,
lmax=lmax)
if function == 'radial' or function == 'total':
spectrum *= 1.e10
error_spectrum *= 1.e10
else:
spectrum = self.spectrum(function=function, unit=unit, base=base,
lmax=lmax)
if function == 'radial' or function == 'total':
spectrum *= 1.e10
ls = _np.arange(lmax + 1)
if ax is None:
fig, axes = _plt.subplots(1, 1)
else:
axes = ax
if axes_labelsize is None:
axes_labelsize = _mpl.rcParams['axes.labelsize']
if type(axes_labelsize) == str:
axes_labelsize = _mpl.font_manager \
.FontProperties(size=axes_labelsize) \
.get_size_in_points()
if tick_labelsize is None:
tick_labelsize = _mpl.rcParams['xtick.labelsize']
if type(tick_labelsize) == str:
tick_labelsize = _mpl.font_manager \
.FontProperties(size=tick_labelsize) \
.get_size_in_points()
axes.set_xlabel('Spherical harmonic degree', fontsize=axes_labelsize)
if function == 'geoid':
axes.set_ylabel('Power, m$^2$', fontsize=axes_labelsize)
elif function == 'potential':
axes.set_ylabel('Power, m$^4$ s$^{-4}$', fontsize=axes_labelsize)
elif function == 'radial':
axes.set_ylabel('Power, mGal$^2$', fontsize=axes_labelsize)
elif function == 'total':
axes.set_ylabel('Power, mGal$^2$', fontsize=axes_labelsize)
if legend is None:
if (unit == 'per_l'):
legend = 'power per degree'
elif (unit == 'per_lm'):
legend = 'power per coefficient'
elif (unit == 'per_dlogl'):
legend = 'power per log bandwidth'
if xscale == 'log':
axes.set_xscale('log', base=base)
if yscale == 'log':
axes.set_yscale('log', base=base)
if self.errors is not None:
axes.plot(ls[2:lmax + 1], spectrum[2:lmax + 1], label=legend,
**kwargs)
axes.plot(ls[2:lmax + 1], error_spectrum[2:lmax + 1],
label=legend_error, **kwargs)
else:
axes.plot(ls[2:lmax + 1], spectrum[2: lmax + 1], label=legend,
**kwargs)
if xscale == 'lin':
if ax is None:
axes.set(xlim=(ls[0], ls[lmax]))
else:
axes.set(xlim=(ls[0], max(ls[lmax], ax.get_xbound()[1])))
axes.grid(grid, which='major')
axes.minorticks_on()
axes.tick_params(labelsize=tick_labelsize)
axes.legend(loc=legend_loc)
if ax is None:
fig.tight_layout(pad=0.5)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
def plot_spectrum2d(self, function='geoid', ticks='WSen',
tick_interval=[None, None],
minor_tick_interval=[None, None],
degree_label='Spherical harmonic degree',
order_label='Spherical harmonic order', title=None,
colorbar='right', origin='top', cmap='viridis',
cmap_limits=None, cmap_rlimits=None,
cmap_reverse=False, cmap_scale='log',
cb_triangles='neither', cb_label=None, cb_offset=None,
cb_width=None, lmax=None, errors=False, xscale='lin',
yscale='lin', grid=False, titlesize=None,
axes_labelsize=None, tick_labelsize=None, ax=None,
show=True, fname=None):
"""
Plot the spectrum as a function of spherical harmonic degree and order.
Usage
-----
x.plot_spectrum2d([function, ticks, tick_interval, minor_tick_interval,
degree_label, order_label, title, colorbar, origin,
cmap, cmap_limits, cmap_rlimits, cmap_reverse,
cmap_scale, cb_triangles, cb_label, cb_offset,
cb_width, lmax, errors, xscale, yscale, grid,
titlesize, axes_labelsize, tick_labelsize, ax,
show, fname])
Parameters
----------
function : str, optional, default = 'geoid'
The type of power spectrum to calculate: 'potential' for the
gravitational potential, 'geoid' for the geoid, 'radial' for
the radial gravity, or 'total' for the total gravitational field.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot, respectively. Alternatively, use
'L', 'B', 'R', and 'T' for left, bottom, right, and top.
tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the degree and order ticks,
respectively (used only when xscale and yscale are 'lin'). If set
to None, ticks will be generated automatically.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor degree and order ticks,
respectively (used only when xscale and yscale are 'lin'). If set
to None, minor ticks will be generated automatically.
degree_label : str, optional, default = 'Spherical harmonic degree'
Label for the spherical harmonic degree axis.
order_label : str, optional, default = 'Spherical harmonic order'
Label for the spherical harmonic order axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
origin : str, optional, default = 'top'
Location where the degree 0 coefficient is plotted. Either 'left',
'right', 'top', or 'bottom'.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2] for linear scales
and log10(cmap_limits[1]/cmap_limits[0])*cmap_limits[2] for
logarithmic scales.
cmap_rlimits : list, optional, default = None
Same as cmap_limits, except the provided upper and lower values are
relative with respect to the maximum value of the data.
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cmap_scale : str, optional, default = 'log'
Scale of the color axis: 'lin' for linear or 'log' for logarithmic.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = None
Text label for the colorbar.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
lmax : int, optional, default = self.lmax
The maximum spherical harmonic degree to plot.
errors : bool, optional, default = False
If True, plot the spectrum of the errors.
xscale : str, optional, default = 'lin'
Scale of the l axis: 'lin' for linear or 'log' for logarithmic.
yscale : str, optional, default = 'lin'
Scale of the m axis: 'lin' for linear or 'log' for logarithmic.
grid : bool, optional, default = False
If True, plot grid lines.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
titlesize : int, optional, default = None
The font size of the title.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot to the screen.
fname : str, optional, default = None
If present, and if ax is not specified, save the image to the
specified file.
Notes
-----
This method plots the power of the class instance for each spherical
harmonic degree and order, where the type of spectrum is defined by
the parameter function: 'potential' for the gravitational potential,
'geoid' for the geoid, 'radial' for the radial gravity, or 'total' for
the total gravitational field. In all cases, the total power of the
function is defined as the integral of the function squared over all
space, divided by the area the function spans. If the mean of the
function is zero, this is equivalent to the variance of the function.
"""
if tick_interval is None:
tick_interval = [None, None]
if minor_tick_interval is None:
minor_tick_interval = [None, None]
if axes_labelsize is None:
axes_labelsize = _mpl.rcParams['axes.labelsize']
if type(axes_labelsize) == str:
axes_labelsize = _mpl.font_manager \
.FontProperties(size=axes_labelsize) \
.get_size_in_points()
if tick_labelsize is None:
tick_labelsize = _mpl.rcParams['xtick.labelsize']
if type(tick_labelsize) == str:
tick_labelsize = _mpl.font_manager \
.FontProperties(size=tick_labelsize) \
.get_size_in_points()
if titlesize is None:
titlesize = _mpl.rcParams['axes.titlesize']
if type(titlesize) == str:
titlesize = _mpl.font_manager \
.FontProperties(size=titlesize) \
.get_size_in_points()
if lmax is None:
lmax = self.lmax
degrees = _np.arange(lmax + 1)
# Create the matrix of the spectrum for each coefficient
if errors is True:
if self.errors is None:
raise ValueError('Can not plot the error spectrum when the '
'errors are not set.')
coeffs = self.errors
else:
coeffs = self.coeffs
spectrum = _np.empty((lmax + 1, 2 * lmax + 1))
mpositive = _np.abs(coeffs[0, :lmax + 1, :lmax + 1])**2
mpositive[0, 0] = 0.
mpositive[~self.mask[0, :lmax + 1, :lmax + 1]] = _np.nan
mnegative = _np.abs(coeffs[1, :lmax + 1, :lmax + 1])**2
mnegative[~self.mask[1, :lmax + 1, :lmax + 1]] = _np.nan
spectrum[:, :lmax] = _np.fliplr(mnegative)[:, :lmax]
spectrum[:, lmax:] = mpositive
if self.normalization == '4pi':
pass
elif self.normalization == 'schmidt':
for l in degrees:
spectrum[l, :] /= (2. * l + 1.)
elif self.normalization == 'ortho':
for l in degrees:
spectrum[l, :] /= (4. * _np.pi)
elif self.normalization == 'unnorm':
for l in degrees:
ms = _np.arange(l+1)
conv = _factorial(l+ms) / (2. * l + 1.) / _factorial(l-ms)
if self.kind == 'real':
conv[1:l + 1] = conv[1:l + 1] / 2.
spectrum[l, lmax-l:lmax] *= conv[::-1][0:l]
spectrum[l, lmax:lmax+l+1] *= conv[0:l+1]
else:
raise ValueError(
"normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value is {:s}."
.format(repr(self.normalization)))
if function == 'geoid':
spectrum *= self.r0**2
elif function == 'potential':
spectrum *= (self.gm / self.r0)**2
elif function == 'radial':
for l in degrees:
spectrum[l, :] *= 1.e10 * (self.gm * (l + 1) /
self.r0**2)**2
elif function == 'total':
for l in degrees:
spectrum[l, :] *= 1.e10 * (self.gm / self.r0**2)**2 * \
(l + 1) * (2 * l + 1)
if origin in ('top', 'bottom'):
spectrum = _np.rot90(spectrum, axes=(1, 0))
spectrum_masked = _np.ma.masked_invalid(spectrum)
# need to add one extra value to each in order for pcolormesh
# to plot the last row and column.
ls = _np.arange(lmax+2).astype(_np.float64)
ms = _np.arange(-lmax, lmax + 2, dtype=_np.float64)
if origin in ('left', 'right'):
xgrid, ygrid = _np.meshgrid(ls, ms, indexing='ij')
elif origin in ('top', 'bottom'):
xgrid, ygrid = _np.meshgrid(ms, ls[::-1], indexing='ij')
else:
raise ValueError(
"origin must be 'left', 'right', 'top', or 'bottom'. "
"Input value is {:s}.".format(repr(origin)))
xgrid -= 0.5
ygrid -= 0.5
if ax is None:
if colorbar is not None:
if colorbar in set(['top', 'bottom']):
scale = 1.2
else:
scale = 0.9
else:
scale = 1.025
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig = _plt.figure(figsize=figsize)
axes = fig.add_subplot(111)
else:
axes = ax
# make colormap
if cmap_limits is None and cmap_rlimits is None:
if cmap_scale.lower() == 'log':
_temp = spectrum
_temp[_temp == 0] = _np.NaN
vmin = _np.nanmin(_temp)
else:
vmin = _np.nanmin(spectrum)
vmax = _np.nanmax(spectrum)
cmap_limits = [vmin, vmax]
elif cmap_rlimits is not None:
vmin = _np.nanmax(spectrum) * cmap_rlimits[0]
vmax = _np.nanmax(spectrum) * cmap_rlimits[1]
cmap_limits = [vmin, vmax]
if len(cmap_rlimits) == 3:
cmap_limits.append(cmap_rlimits[2])
if len(cmap_limits) == 3:
if cmap_scale.lower() == 'log':
num = int(_np.log10(cmap_limits[1]/cmap_limits[0])
* cmap_limits[2])
else:
num = int((cmap_limits[1] - cmap_limits[0]) / cmap_limits[2])
if isinstance(cmap, _mpl.colors.Colormap):
cmap_scaled = cmap._resample(num)
else:
cmap_scaled = _mpl.cm.get_cmap(cmap, num)
else:
cmap_scaled = _mpl.cm.get_cmap(cmap)
if cmap_reverse:
cmap_scaled = cmap_scaled.reversed()
if cmap_scale.lower() == 'log':
norm = _mpl.colors.LogNorm(cmap_limits[0], cmap_limits[1],
clip=True)
# Clipping is required to avoid an invalid value error
elif cmap_scale.lower() == 'lin':
norm = _plt.Normalize(cmap_limits[0], cmap_limits[1])
else:
raise ValueError(
"cmap_scale must be 'lin' or 'log'. " +
"Input value is {:s}.".format(repr(cmap_scale)))
# determine which ticks to plot
if 'W' in ticks or 'L' in ticks:
left, labelleft = True, True
elif 'w' in ticks or 'l' in ticks:
left, labelleft = True, False
else:
left, labelleft = False, False
if 'S' in ticks or 'B' in ticks:
bottom, labelbottom = True, True
elif 's' in ticks or 'b' in ticks:
bottom, labelbottom = True, False
else:
bottom, labelbottom = False, False
if 'E' in ticks or 'R' in ticks:
right, labelright = True, True
elif 'e' in ticks or 'r' in ticks:
right, labelright = True, False
else:
right, labelright = False, False
if 'N' in ticks or 'T' in ticks:
top, labeltop = True, True
elif 'n' in ticks or 't' in ticks:
top, labeltop = True, False
else:
top, labeltop = False, False
# Set tick intervals (used only for linear axis)
if tick_interval[0] is not None:
degree_ticks = _np.linspace(
0, lmax, num=lmax//tick_interval[0]+1, endpoint=True)
if tick_interval[1] is not None:
order_ticks = _np.linspace(
-lmax, lmax, num=2*lmax//tick_interval[1]+1, endpoint=True)
if minor_tick_interval[0] is not None:
degree_minor_ticks = _np.linspace(
0, lmax, num=lmax//minor_tick_interval[0]+1, endpoint=True)
if minor_tick_interval[1] is not None:
order_minor_ticks = _np.linspace(
-lmax, lmax, num=2*lmax//minor_tick_interval[1]+1,
endpoint=True)
if (xscale == 'lin'):
cmesh = axes.pcolormesh(xgrid, ygrid, spectrum_masked,
norm=norm, cmap=cmap_scaled)
if origin in ('left', 'right'):
axes.set(xlim=(-0.5, lmax + 0.5))
if tick_interval[0] is not None:
axes.set(xticks=degree_ticks)
if minor_tick_interval[0] is not None:
axes.set_xticks(degree_minor_ticks, minor=True)
else:
axes.set(xlim=(-lmax - 0.5, lmax + 0.5))
if tick_interval[1] is not None:
axes.set(xticks=order_ticks)
if minor_tick_interval[1] is not None:
axes.set_xticks(order_minor_ticks, minor=True)
elif (xscale == 'log'):
cmesh = axes.pcolormesh(xgrid[1:], ygrid[1:], spectrum_masked[1:],
norm=norm, cmap=cmap_scaled)
if origin in ('left', 'right'):
axes.set(xscale='log', xlim=(1., lmax + 0.5))
else:
axes.set(xscale='symlog', xlim=(-lmax - 0.5, lmax + 0.5))
else:
raise ValueError(
"xscale must be 'lin' or 'log'. "
"Input value is {:s}.".format(repr(xscale)))
if (yscale == 'lin'):
if origin in ('left', 'right'):
axes.set(ylim=(-lmax - 0.5, lmax + 0.5))
if tick_interval[1] is not None:
axes.set(yticks=order_ticks)
if minor_tick_interval[1] is not None:
axes.set_yticks(order_minor_ticks, minor=True)
else:
axes.set(ylim=(-0.5, lmax + 0.5))
if tick_interval[0] is not None:
axes.set(yticks=degree_ticks)
if minor_tick_interval[0] is not None:
axes.set_yticks(degree_minor_ticks, minor=True)
elif (yscale == 'log'):
if origin in ('left', 'right'):
axes.set(yscale='symlog', ylim=(-lmax - 0.5, lmax + 0.5))
else:
axes.set(yscale='log', ylim=(1., lmax + 0.5))
else:
raise ValueError(
"yscale must be 'lin' or 'log'. "
"Input value is {:s}.".format(repr(yscale)))
axes.set_aspect('auto')
if origin in ('left', 'right'):
axes.set_xlabel(degree_label, fontsize=axes_labelsize)
axes.set_ylabel(order_label, fontsize=axes_labelsize)
else:
axes.set_xlabel(order_label, fontsize=axes_labelsize)
axes.set_ylabel(degree_label, fontsize=axes_labelsize)
if labeltop:
axes.xaxis.set_label_position('top')
if labelright:
axes.yaxis.set_label_position('right')
axes.tick_params(bottom=bottom, top=top, right=right, left=left,
labelbottom=labelbottom, labeltop=labeltop,
labelleft=labelleft, labelright=labelright,
which='both')
axes.tick_params(labelsize=tick_labelsize)
axes.minorticks_on()
axes.grid(grid, which='major')
if title is not None:
axes.set_title(title, fontsize=titlesize)
if origin == 'right':
axes.invert_xaxis()
if origin == 'top':
axes.invert_yaxis()
# plot colorbar
if colorbar is not None:
if cb_label is None:
if function == 'geoid':
cb_label = 'Power, m$^2$'
elif function == 'potential':
cb_label = 'Power, m$^4$ s$^{-4}$'
elif function == 'radial':
cb_label = 'Power, mGal$^2$'
elif function == 'total':
cb_label = 'Power, mGal$^2$'
if cb_offset is None:
offset = 1.3 * _mpl.rcParams['font.size']
if (colorbar == 'left' and left) or \
(colorbar == 'right' and right) or \
(colorbar == 'bottom' and bottom) or \
(colorbar == 'top' and top):
offset += _mpl.rcParams['xtick.major.size']
if (colorbar == 'left' and labelleft) or \
(colorbar == 'right' and labelright) or \
(colorbar == 'bottom' and labelbottom) or \
(colorbar == 'top' and labeltop):
offset += _mpl.rcParams['xtick.major.pad']
offset += tick_labelsize
if origin in ('left', 'right') and colorbar == 'left' and \
order_label != '' and order_label is not None \
and labelleft:
offset += 1.9 * axes_labelsize
if origin in ('left', 'right') and colorbar == 'right' \
and order_label != '' and order_label is not None \
and labelright:
offset += 1.9 * axes_labelsize
if origin in ('bottom', 'top') and colorbar == 'left' \
and degree_label != '' \
and degree_label is not None and labelleft:
offset += 1.9 * axes_labelsize
if origin in ('bottom', 'top') and colorbar == 'right' \
and degree_label != '' \
and degree_label is not None and labelright:
offset += 1.9 * axes_labelsize
if origin in ('left', 'right') and colorbar == 'bottom' \
and degree_label != '' \
and degree_label is not None and labelbottom:
offset += axes_labelsize
if origin in ('left', 'right') and colorbar == 'top' \
and degree_label != '' \
and degree_label is not None and labeltop:
offset += axes_labelsize
if origin in ('bottom', 'top') and colorbar == 'bottom' \
and order_label != '' \
and order_label is not None and labelbottom:
offset += axes_labelsize
if origin in ('bottom', 'top') and colorbar == 'top' \
and order_label != '' \
and order_label is not None and labeltop:
offset += axes_labelsize
else:
offset = cb_offset
offset /= 72. # convert to inches
divider = _make_axes_locatable(axes)
if colorbar in set(['left', 'right']):
orientation = 'vertical'
extendfrac = 0.025
if cb_width is None:
size = '5%'
else:
size = '{:f}%'.format(cb_width)
else:
orientation = 'horizontal'
extendfrac = 0.025
if cb_width is None:
size = '5%'
else:
size = '{:f}%'.format(cb_width)
cax = divider.append_axes(colorbar, size=size, pad=offset)
cbar = _plt.colorbar(cmesh, cax=cax, orientation=orientation,
extend=cb_triangles, extendfrac=extendfrac)
if colorbar == 'left':
cbar.ax.yaxis.set_ticks_position('left')
cbar.ax.yaxis.set_label_position('left')
if colorbar == 'top':
cbar.ax.xaxis.set_ticks_position('top')
cbar.ax.xaxis.set_label_position('top')
cbar.set_label(cb_label, fontsize=axes_labelsize)
cbar.ax.tick_params(labelsize=tick_labelsize)
if ax is None:
fig.tight_layout(pad=0.5)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
return fig, axes
def plot_admitcorr(self, hlm, errors=True, function='radial',
style='separate', lmax=None, grid=True, legend=None,
legend_loc='best', axes_labelsize=None,
tick_labelsize=None, elinewidth=0.75, ax=None, ax2=None,
show=True, fname=None, **kwargs):
"""
Plot the admittance and/or correlation with another function.
Usage
-----
x.plot_admitcorr(hlm, [errors, function, style, lmax, grid, legend,
legend_loc, axes_labelsize, tick_labelsize,
elinewidth, ax, ax2, show, fname, **kwargs])
Parameters
----------
hlm : SHCoeffs class instance.
The second function used in computing the admittance and
correlation.
errors : bool, optional, default = True
Plot the uncertainty of the admittance.
function : str, optional, default = 'radial'
The type of admittance to return: 'geoid' for using the geoid, in
units of m/km, or 'radial' for using the radial gravity in units
of mGal/km.
style : str, optional, default = 'separate'
Style of the plot. 'separate' to plot the admittance and
correlation in separate plots, 'combined' to plot the admittance
and correlation in a single plot, 'admit' to plot only the
admittance, or 'corr' to plot only the correlation.
lmax : int, optional, default = self.lmax
The maximum spherical harmonic degree to plot.
grid : bool, optional, default = True
If True, plot grid lines. grid is set to False when style is
'combined'.
legend : str, optional, default = None
Text to use for the legend. If style is 'combined' or 'separate',
provide a list of two strings for the admittance and correlation,
respectively.
legend_loc : str, optional, default = 'best'
Location of the legend, such as 'upper right' or 'lower center'
(see pyplot.legend for all options). If style is 'separate',
provide a list of two strings for the admittance and correlation,
respectively.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
elinewidth : float, optional, default = 0.75
Line width of the error bars when errors is True.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
ax2 : matplotlib axes object, optional, default = None
A single matplotlib axes object where the second plot will appear
when style is 'separate'.
show : bool, optional, default = True
If True, plot to the screen.
fname : str, optional, default = None
If present, and if ax is not specified, save the image to the
specified file.
**kwargs : keyword arguments, optional
Keyword arguments for pyplot.plot() and pyplot.errorbar().
Notes
-----
If gravity g and topography h are related by the equation
glm = Z(l) hlm + nlm
where nlm is a zero-mean random variable, the admittance and spectral
correlation gamma(l) can be estimated using
Z(l) = Sgh(l) / Shh(l)
gamma(l) = Sgh(l) / sqrt( Sgg(l) Shh(l) )
where Sgh, Shh and Sgg are the cross-power and power spectra of g
(self) and h (input).
"""
if lmax is None:
lmax = min(self.lmax, hlm.lmax)
if style in ('combined', 'separate'):
admit, corr = self.admitcorr(hlm, errors=errors, function=function,
lmax=lmax)
elif style == 'corr':
corr = self.correlation(hlm, lmax=lmax)
elif style == 'admit':
admit = self.admittance(hlm, errors=errors, function=function,
lmax=lmax)
else:
raise ValueError("style must be 'combined', 'separate', 'admit' "
"or 'corr'. Input value is {:s}"
.format(repr(style)))
ls = _np.arange(lmax + 1)
if style == 'separate':
if ax is None:
scale = 0.4
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0]*scale)
fig, (axes, axes2) = _plt.subplots(1, 2, figsize=figsize)
else:
axes = ax
axes2 = ax2
elif style == 'combined':
if ax is None:
fig, axes = _plt.subplots(1, 1)
axes2 = axes.twinx()
else:
axes = ax
axes2 = axes.twinx()
else:
if ax is None:
fig, axes = _plt.subplots(1, 1)
else:
axes = ax
if style in ('separate', 'combined'):
admitax = axes
corrax = axes2
elif style == 'admit':
admitax = axes
elif style == 'corr':
corrax = axes
if legend is None:
legend = [None, None]
elif style == 'admit':
legend = [legend, None]
legend_loc = [legend_loc, None]
elif style == 'corr':
legend = [None, legend]
legend_loc = [None, legend_loc]
elif style == 'combined':
legend_loc = [legend_loc, legend_loc]
else:
if type(legend_loc) is str:
legend_loc = [legend_loc, legend_loc]
if axes_labelsize is None:
axes_labelsize = _mpl.rcParams['axes.labelsize']
if type(axes_labelsize) == str:
axes_labelsize = _mpl.font_manager \
.FontProperties(size=axes_labelsize) \
.get_size_in_points()
if tick_labelsize is None:
tick_labelsize = _mpl.rcParams['xtick.labelsize']
if type(tick_labelsize) == str:
tick_labelsize = _mpl.font_manager \
.FontProperties(size=tick_labelsize) \
.get_size_in_points()
if style in ('admit', 'separate', 'combined'):
if errors:
admitax.errorbar(ls, admit[:, 0], yerr=admit[:, 1],
label=legend[0], elinewidth=elinewidth,
**kwargs)
else:
admitax.plot(ls, admit, label=legend[0], **kwargs)
if ax is None:
admitax.set(xlim=(0, lmax))
else:
admitax.set(xlim=(0, max(lmax, ax.get_xbound()[1])))
admitax.set_xlabel('Spherical harmonic degree',
fontsize=axes_labelsize)
if function == 'radial':
admitax.set_ylabel('Admittance, mGal/km',
fontsize=axes_labelsize)
else:
admitax.set_ylabel('Admittance, m/km',
fontsize=axes_labelsize)
admitax.minorticks_on()
admitax.tick_params(labelsize=tick_labelsize)
if legend[0] is not None:
if style != 'combined':
admitax.legend(loc=legend_loc[0])
if style != 'combined':
admitax.grid(grid, which='major')
if style in ('corr', 'separate', 'combined'):
if style == 'combined':
# plot with next color
next(corrax._get_lines.prop_cycler)['color']
corrax.plot(ls, corr, label=legend[1], **kwargs)
if ax is None:
corrax.set(xlim=(0, lmax))
corrax.set(ylim=(-1, 1))
else:
corrax.set(xlim=(0, max(lmax, ax.get_xbound()[1])))
corrax.set_xlabel('Spherical harmonic degree',
fontsize=axes_labelsize)
corrax.set_ylabel('Correlation', fontsize=axes_labelsize)
corrax.minorticks_on()
corrax.tick_params(labelsize=tick_labelsize)
if legend[1] is not None:
if style == 'combined':
lines, labels = admitax.get_legend_handles_labels()
lines2, labels2 = corrax.get_legend_handles_labels()
corrax.legend(lines + lines2, labels + labels2,
loc=legend_loc[1])
else:
corrax.legend(loc=legend_loc[1])
if style != 'combined':
corrax.grid(grid, which='major')
if ax is None:
fig.tight_layout(pad=0.5)
if show:
fig.show()
if fname is not None:
fig.savefig(fname)
if style in ('separate', 'combined'):
return fig, (axes, axes2)
else:
return fig, axes
def plot_admittance(self, hlm, errors=True, function='radial',
lmax=None, grid=True, legend=None,
legend_loc='best', axes_labelsize=None,
tick_labelsize=None, elinewidth=0.75, ax=None,
show=True, fname=None, **kwargs):
"""
Plot the admittance with another function.
Usage
-----
x.plot_admittance(hlm, [errors, function, lmax, grid, legend,
legend_loc, axes_labelsize, tick_labelsize,
elinewidth, ax, show, fname, **kwargs])
Parameters
----------
hlm : SHCoeffs class instance.
The second function used in computing the admittance.
errors : bool, optional, default = True
Plot the uncertainty of the admittance.
function : str, optional, default = 'radial'
The type of admittance to return: 'geoid' for using the geoid, in
units of m/km, or 'radial' for using the radial gravity in units
of mGal/km.
lmax : int, optional, default = self.lmax
The maximum spherical harmonic degree to plot.
grid : bool, optional, default = True
If True, plot grid lines.
legend : str, optional, default = None
Text to use for the legend.
legend_loc : str, optional, default = 'best'
Location of the legend, such as 'upper right' or 'lower center'
(see pyplot.legend for all options).
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
elinewidth : float, optional, default = 0.75
Line width of the error bars when errors is True.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot to the screen.
fname : str, optional, default = None
If present, and if ax is not specified, save the image to the
specified file.
**kwargs : keyword arguments, optional
Keyword arguments for pyplot.plot() and pyplot.errorbar().
Notes
-----
If gravity g and topography h are related by the equation
glm = Z(l) hlm + nlm
where nlm is a zero-mean random variable, the admittance can be
estimated using
Z(l) = Sgh(l) / Shh(l)
where Sgh and Shh are the cross-power and power spectra of the
g (self) and h (input).
"""
return self.plot_admitcorr(hlm, errors=errors, function=function,
style='admit', lmax=lmax, grid=grid,
legend=legend, legend_loc=legend_loc,
axes_labelsize=axes_labelsize,
tick_labelsize=tick_labelsize,
elinewidth=elinewidth, show=True,
fname=fname, ax=ax, **kwargs)
def plot_correlation(self, hlm, lmax=None, grid=True, legend=None,
legend_loc='best', axes_labelsize=None,
tick_labelsize=None, elinewidth=0.75, ax=None,
show=True, fname=None, **kwargs):
"""
Plot the correlation with another function.
Usage
-----
x.plot_correlation(hlm, [lmax, grid, legend, legend_loc,
axes_labelsize, tick_labelsize, elinewidth,
ax, show, fname, **kwargs])
Parameters
----------
hlm : SHCoeffs class instance.
The second function used in computing the correlation.
lmax : int, optional, default = self.lmax
The maximum spherical harmonic degree to plot.
grid : bool, optional, default = True
If True, plot grid lines.
legend : str, optional, default = None
Text to use for the legend.
legend_loc : str, optional, default = 'best'
Location of the legend, such as 'upper right' or 'lower center'
(see pyplot.legend for all options).
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
elinewidth : float, optional, default = 0.75
Line width of the error bars when errors is True.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot to the screen.
fname : str, optional, default = None
If present, and if ax is not specified, save the image to the
specified file.
**kwargs : keyword arguments, optional
Keyword arguments for pyplot.plot() and pyplot.errorbar().
Notes
-----
The spectral correlation is defined as
gamma(l) = Sgh(l) / sqrt( Sgg(l) Shh(l) )
where Sgh, Shh and Sgg are the cross-power and power spectra of the
functions g (self) and h (input).
"""
return self.plot_admitcorr(hlm, style='corr', lmax=lmax, grid=grid,
legend=legend, legend_loc=legend_loc,
axes_labelsize=axes_labelsize,
tick_labelsize=tick_labelsize,
show=True, fname=fname, ax=ax, **kwargs)
class SHGravRealCoeffs(SHGravCoeffs):
"""
Real spherical harmonic coefficient class for the gravitational potential.
"""
def __init__(self, coeffs, gm=None, r0=None, omega=None, errors=None,
error_kind=None, normalization='4pi', csphase=1, copy=True,
header=None, header2=None, name=None, epoch=None):
"""Initialize real gravitational potential coefficients class."""
lmax = coeffs.shape[1] - 1
# ---- create mask to filter out m<=l ----
mask = _np.zeros((2, lmax + 1, lmax + 1), dtype=bool)
mask[0, 0, 0] = True
for l in _np.arange(lmax + 1):
mask[:, l, :l + 1] = True
mask[1, :, 0] = False
self.mask = mask
self.lmax = lmax
self.kind = 'real'
self.normalization = normalization
self.csphase = csphase
self.header = header
self.header2 = header2
self.gm = gm
self.r0 = r0
self.omega = omega
self.name = name
self.epoch = epoch
self.error_kind = error_kind
if copy:
self.coeffs = _np.copy(coeffs)
self.coeffs[~mask] = 0.
else:
self.coeffs = coeffs
if errors is not None:
if copy:
self.errors = _np.copy(errors)
self.errors[~mask] = 0.
else:
self.errors = errors
else:
self.errors = None
def __repr__(self):
return ('kind = {:s}\n'
'normalization = {:s}\n'
'csphase = {:d}\n'
'lmax = {:d}\n'
'GM (m3 / s2) = {:s}\n'
'r0 (m) = {:s}\n'
'Omega (rad / s) = {:s}\n'
'error_kind = {:s}\n'
'header = {:s}\n'
'header2 = {:s}\n'
'name = {:s}\n'
'epoch = {:s}'
.format(repr(self.kind), repr(self.normalization),
self.csphase, self.lmax, repr(self.gm), repr(self.r0),
repr(self.omega), repr(self.error_kind),
repr(self.header), repr(self.header2),
repr(self.name), repr(self.epoch)))
def _rotate(self, angles, dj_matrix, gm=None, r0=None, omega=None,
backend=None, nthreads=None):
"""Rotate the coefficients by the Euler angles alpha, beta, gamma."""
if self.lmax > 1200 and backend.lower() == "shtools":
_warnings.warn("The rotate() method is accurate only to about" +
" spherical harmonic degree 1200 when using the" +
" shtools backend. " +
"lmax = {:d}".format(self.lmax),
category=RuntimeWarning)
if backend == "shtools" and dj_matrix is None:
dj_matrix = _djpi2(self.lmax + 1)
# The coefficients need to be 4pi normalized with csphase = 1
coeffs = backend_module(
backend=backend, nthreads=nthreads).SHRotateRealCoef(
self.to_array(normalization='4pi', csphase=1, errors=False),
angles, dj_matrix)
# Convert 4pi normalized coefficients to the same normalization
# as the unrotated coefficients.
if self.normalization != '4pi' or self.csphase != 1:
temp = _convert(coeffs, normalization_in='4pi', csphase_in=1,
normalization_out=self.normalization,
csphase_out=self.csphase)
return SHGravCoeffs.from_array(
temp, errors=self.errors, normalization=self.normalization,
csphase=self.csphase, copy=False, gm=gm, r0=r0, omega=omega,
epoch=self.epoch)
else:
return SHGravCoeffs.from_array(coeffs, errors=self.errors,
gm=gm, r0=r0, omega=omega,
epoch=self.epoch, copy=False)
def _expand_coord(self, a, f, lat, lon, degrees, lmax_calc, omega):
"""Evaluate the gravity at the coordinates lat and lon."""
coeffs = self.to_array(normalization='4pi', csphase=1, errors=False)
if degrees is True:
latin = lat
lonin = lon
else:
latin = _np.rad2deg(lat)
lonin = _np.rad2deg(lon)
if type(lat) is not type(lon):
raise ValueError('lat and lon must be of the same type. ' +
'Input types are {:s} and {:s}.'
.format(repr(type(lat)), repr(type(lon))))
if type(lat) is int or type(lat) is float or type(lat) is _np.float_:
if f == 0.:
r = a
else:
r = _np.cos(_np.deg2rad(latin))**2 + \
_np.sin(_np.deg2rad(latin))**2 / (1.0 - f)**2
r = a * _np.sqrt(1. / r)
return _MakeGravGridPoint(coeffs, gm=self.gm, r0=self.r0,
r=r, lat=latin, lon=lonin,
lmax=lmax_calc, omega=self.omega)
elif type(lat) is _np.ndarray:
values = _np.empty((len(lat), 3), dtype=_np.float64)
for i, (latitude, longitude) in enumerate(zip(latin, lonin)):
if f == 0.:
r = a
else:
r = _np.cos(_np.deg2rad(latitude))**2 + \
_np.sin(_np.deg2rad(latitude))**2 / (1.0 - f)**2
r = a * _np.sqrt(1. / r)
values[i, :] = _MakeGravGridPoint(coeffs, gm=self.gm,
r0=self.r0, r=r,
lat=latitude, lon=longitude,
lmax=lmax_calc,
omega=self.omega)
return values
elif type(lat) is list:
values = []
for latitude, longitude in zip(latin, lonin):
if f == 0.:
r = a
else:
r = _np.cos(_np.deg2rad(latitude))**2 + \
_np.sin(_np.deg2rad(latitude))**2 / (1.0 - f)**2
r = a * _np.sqrt(1. / r)
values.append(
_MakeGravGridPoint(coeffs, gm=self.gm, r0=self.r0,
r=r, lat=latitude, lon=longitude,
lmax=lmax_calc, omega=self.omega))
return values
else:
raise ValueError('lat and lon must be either an int, float, '
'ndarray, or list. Input types are {:s} and {:s}.'
.format(repr(type(lat)), repr(type(lon))))
| {
"content_hash": "73a9aa2f84e178dbd937f53722c4c7fb",
"timestamp": "",
"source": "github",
"line_count": 3893,
"max_line_length": 79,
"avg_line_length": 44.69303878756743,
"alnum_prop": 0.5383585263520893,
"repo_name": "SHTOOLS/SHTOOLS",
"id": "72d07e082369fab83b56e38febb7582c7a719233",
"size": "173992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyshtools/shclasses/shgravcoeffs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "47054"
},
{
"name": "Fortran",
"bytes": "1568027"
},
{
"name": "Makefile",
"bytes": "21146"
},
{
"name": "Python",
"bytes": "1584095"
}
],
"symlink_target": ""
} |
"""Base class for linear operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperator"]
# TODO(langmore) Use matrix_solve_ls for singular or non-square matrices.
@tf_export("linalg.LinearOperator")
class LinearOperator(object):
"""Base class defining a [batch of] linear operator[s].
Subclasses of `LinearOperator` provide a access to common methods on a
(batch) matrix, without the need to materialize the matrix. This allows:
* Matrix free computations
* Operators that take advantage of special structure, while providing a
consistent API to users.
#### Subclassing
To enable a public method, subclasses should implement the leading-underscore
version of the method. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable
`matmul(x, adjoint=False, name="matmul")` a subclass should implement
`_matmul(x, adjoint=False)`.
#### Performance contract
Subclasses should only implement the assert methods
(e.g. `assert_non_singular`) if they can be done in less than `O(N^3)`
time.
Class docstrings should contain an explanation of computational complexity.
Since this is a high-performance library, attention should be paid to detail,
and explanations can include constants as well as Big-O notation.
#### Shape compatibility
`LinearOperator` sub classes should operate on a [batch] matrix with
compatible shape. Class docstrings should define what is meant by compatible
shape. Some sub-classes may not support batching.
An example is:
`x` is a batch matrix with compatible shape for `matmul` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
x.shape = [B1,...,Bb] + [N, R]
```
`rhs` is a batch matrix with compatible shape for `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
rhs.shape = [B1,...,Bb] + [M, R]
```
#### Example docstring for subclasses.
This operator acts like a (batch) matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `m x n` matrix. Again, this matrix `A` may not be materialized, but for
purposes of identifying and working with compatible arguments the shape is
relevant.
Examples:
```python
some_tensor = ... shape = ????
operator = MyLinOp(some_tensor)
operator.shape()
==> [2, 4, 4]
operator.log_abs_determinant()
==> Shape [2] Tensor
x = ... Shape [2, 4, 5] Tensor
operator.matmul(x)
==> Shape [2, 4, 5] Tensor
```
#### Shape compatibility
This operator acts on batch matrices with compatible shape.
FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE
#### Performance
FILL THIS IN
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
__metaclass__ = abc.ABCMeta
def __init__(self,
dtype,
graph_parents=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize the `LinearOperator`.
**This is a private method for subclass use.**
**Subclasses should copy-paste this `__init__` documentation.**
Args:
dtype: The type of the this `LinearOperator`. Arguments to `matmul` and
`solve` will have to be this type.
graph_parents: Python list of graph prerequisites of this `LinearOperator`
Typically tensors that are passed during initialization.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `dtype` is real, this is equivalent to being symmetric.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If any member of graph_parents is `None` or not a `Tensor`.
ValueError: If hints are set incorrectly.
"""
# Check and auto-set flags.
if is_positive_definite:
if is_non_singular is False:
raise ValueError("A positive definite matrix is always non-singular.")
is_non_singular = True
if is_non_singular:
if is_square is False:
raise ValueError("A non-singular matrix is always square.")
is_square = True
if is_self_adjoint:
if is_square is False:
raise ValueError("A self-adjoint matrix is always square.")
is_square = True
self._is_square_set_or_implied_by_hints = is_square
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._graph_parents = graph_parents
self._is_non_singular = is_non_singular
self._is_self_adjoint = is_self_adjoint
self._is_positive_definite = is_positive_definite
self._name = name or type(self).__name__
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(
name, values=((values or []) + self._graph_parents)) as scope:
yield scope
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `LinearOperator`."""
return self._dtype
@property
def name(self):
"""Name prepended to all ops created by this `LinearOperator`."""
return self._name
@property
def graph_parents(self):
"""List of graph dependencies of this `LinearOperator`."""
return self._graph_parents
@property
def is_non_singular(self):
return self._is_non_singular
@property
def is_self_adjoint(self):
return self._is_self_adjoint
@property
def is_positive_definite(self):
return self._is_positive_definite
@property
def is_square(self):
"""Return `True/False` depending on if this operator is square."""
# Static checks done after __init__. Why? Because domain/range dimension
# sometimes requires lots of work done in the derived class after init.
auto_square_check = self.domain_dimension == self.range_dimension
if self._is_square_set_or_implied_by_hints is False and auto_square_check:
raise ValueError(
"User set is_square hint to False, but the operator was square.")
if self._is_square_set_or_implied_by_hints is None:
return auto_square_check
return self._is_square_set_or_implied_by_hints
@abc.abstractmethod
def _shape(self):
# Write this in derived class to enable all static shape methods.
raise NotImplementedError("_shape is not implemented.")
@property
def shape(self):
"""`TensorShape` of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb, M, N])`, equivalent to `A.get_shape()`.
Returns:
`TensorShape`, statically determined, may be undefined.
"""
return self._shape()
@abc.abstractmethod
def _shape_tensor(self):
raise NotImplementedError("_shape_tensor is not implemented.")
def shape_tensor(self, name="shape_tensor"):
"""Shape of this `LinearOperator`, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`.
Args:
name: A name for this `Op.
Returns:
`int32` `Tensor`
"""
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.shape.is_fully_defined():
return linear_operator_util.shape_tensor(self.shape.as_list())
else:
return self._shape_tensor()
@property
def batch_shape(self):
"""`TensorShape` of batch dimensions of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb])`, equivalent to `A.get_shape()[:-2]`
Returns:
`TensorShape`, statically determined, may be undefined.
"""
# Derived classes get this "for free" once .shape is implemented.
return self.shape[:-2]
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of batch dimensions of this operator, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb]`.
Args:
name: A name for this `Op.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.batch_shape.is_fully_defined():
return linear_operator_util.shape_tensor(
self.batch_shape.as_list(), name="batch_shape")
else:
return self.shape_tensor()[:-2]
@property
def tensor_rank(self, name="tensor_rank"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op.
Returns:
Python integer, or None if the tensor rank is undefined.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
return self.shape.ndims
def tensor_rank_tensor(self, name="tensor_rank_tensor"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op.
Returns:
`int32` `Tensor`, determined at runtime.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.tensor_rank is not None:
return ops.convert_to_tensor(self.tensor_rank)
else:
return array_ops.size(self.shape_tensor())
@property
def domain_dimension(self):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
return self.shape[-1]
def domain_dimension_tensor(self, name="domain_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.domain_dimension.value is not None:
return ops.convert_to_tensor(self.domain_dimension.value)
else:
return self.shape_tensor()[-1]
@property
def range_dimension(self):
"""Dimension (in the sense of vector spaces) of the range of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
return self.shape[-2]
def range_dimension_tensor(self, name="range_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the range of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.range_dimension.value is not None:
return ops.convert_to_tensor(self.range_dimension.value)
else:
return self.shape_tensor()[-2]
def _assert_non_singular(self):
"""Private default implementation of _assert_non_singular."""
logging.warn(
"Using (possibly slow) default implementation of assert_non_singular."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return self.assert_positive_definite()
else:
singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)
# TODO(langmore) Add .eig and .cond as methods.
cond = (math_ops.reduce_max(singular_values, axis=-1) /
math_ops.reduce_min(singular_values, axis=-1))
return check_ops.assert_less(
cond,
self._max_condition_number_to_be_non_singular(),
message="Singular matrix up to precision epsilon.")
def _max_condition_number_to_be_non_singular(self):
"""Return the maximum condition number that we consider nonsingular."""
with ops.name_scope("max_nonsingular_condition_number"):
dtype_eps = np.finfo(self.dtype.as_numpy_dtype).eps
eps = math_ops.cast(
math_ops.reduce_max([
100.,
math_ops.cast(self.range_dimension_tensor(), self.dtype),
math_ops.cast(self.domain_dimension_tensor(), self.dtype)
]), self.dtype) * dtype_eps
return 1. / eps
def assert_non_singular(self, name="assert_non_singular"):
"""Returns an `Op` that asserts this operator is non singular.
This operator is considered non-singular if
```
ConditionNumber < max{100, range_dimension, domain_dimension} * eps,
eps := np.finfo(self.dtype.as_numpy_dtype).eps
```
Args:
name: A string name to prepend to created ops.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is singular.
"""
with self._name_scope(name):
return self._assert_non_singular()
def _assert_positive_definite(self):
"""Default implementation of _assert_positive_definite."""
logging.warn(
"Using (possibly slow) default implementation of "
"assert_positive_definite."
" Requires conversion to a dense matrix and O(N^3) operations.")
# If the operator is self-adjoint, then checking that
# Cholesky decomposition succeeds + results in positive diag is necessary
# and sufficient.
if self.is_self_adjoint:
return check_ops.assert_positive(
array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())),
message="Matrix was not positive definite.")
# We have no generic check for positive definite.
raise NotImplementedError("assert_positive_definite is not implemented.")
def assert_positive_definite(self, name="assert_positive_definite"):
"""Returns an `Op` that asserts this operator is positive definite.
Here, positive definite means that the quadratic form `x^H A x` has positive
real part for all nonzero `x`. Note that we do not require the operator to
be self-adjoint to be positive definite.
Args:
name: A name to give this `Op`.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is not positive definite.
"""
with self._name_scope(name):
return self._assert_positive_definite()
def _assert_self_adjoint(self):
dense = self.to_dense()
logging.warn(
"Using (possibly slow) default implementation of assert_self_adjoint."
" Requires conversion to a dense matrix.")
return check_ops.assert_equal(
dense,
linalg.adjoint(dense),
message="Matrix was not equal to its adjoint.")
def assert_self_adjoint(self, name="assert_self_adjoint"):
"""Returns an `Op` that asserts this operator is self-adjoint.
Here we check that this operator is *exactly* equal to its hermitian
transpose.
Args:
name: A string name to prepend to created ops.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is not self-adjoint.
"""
with self._name_scope(name):
return self._assert_self_adjoint()
def _check_input_dtype(self, arg):
"""Check that arg.dtype == self.dtype."""
if arg.dtype != self.dtype:
raise TypeError(
"Expected argument to have dtype %s. Found: %s in tensor %s" %
(self.dtype, arg.dtype, arg))
@abc.abstractmethod
def _matmul(self, x, adjoint=False, adjoint_arg=False):
raise NotImplementedError("_matmul is not implemented.")
def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"):
"""Transform [batch] matrix `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
X = ... # shape [..., N, R], batch matrix, R > 0.
Y = operator.matmul(X)
Y.shape
==> [..., M, R]
Y[..., :, r] = sum_j A[..., :, j] X[j, r]
```
Args:
x: `Tensor` with compatible shape and same `dtype` as `self`.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is
the hermitian transpose (transposition and complex conjugation).
name: A name for this `Op.
Returns:
A `Tensor` with shape `[..., M, R]` and same `dtype` as `self`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
arg_dim = -1 if adjoint_arg else -2
self.shape[self_dim].assert_is_compatible_with(x.get_shape()[arg_dim])
return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _matvec(self, x, adjoint=False):
x_mat = array_ops.expand_dims(x, axis=-1)
y_mat = self.matmul(x_mat, adjoint=adjoint)
return array_ops.squeeze(y_mat, axis=-1)
def matvec(self, x, adjoint=False, name="matvec"):
"""Transform [batch] vector `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matric A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
X = ... # shape [..., N], batch vector
Y = operator.matvec(X)
Y.shape
==> [..., M]
Y[..., :] = sum_j A[..., :, j] X[..., j]
```
Args:
x: `Tensor` with compatible shape and same `dtype` as `self`.
`x` is treated as a [batch] vector meaning for every set of leading
dimensions, the last dimension defines a vector.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
name: A name for this `Op.
Returns:
A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
self.shape[self_dim].assert_is_compatible_with(x.get_shape()[-1])
return self._matvec(x, adjoint=adjoint)
def _determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return math_ops.exp(self.log_abs_determinant())
return linalg_ops.matrix_determinant(self._matrix)
def determinant(self, name="det"):
"""Determinant for every batch member.
Args:
name: A name for this `Op.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._determinant()
def _log_abs_determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
diag = array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense()))
return 2 * math_ops.reduce_sum(math_ops.log(diag), reduction_indices=[-1])
_, log_abs_det = linalg.slogdet(self._matrix)
return log_abs_det
def log_abs_determinant(self, name="log_abs_det"):
"""Log absolute value of determinant for every batch member.
Args:
name: A name for this `Op.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._log_abs_determinant()
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
"""Default implementation of _solve."""
if self.is_square is False:
raise NotImplementedError(
"Solve is not yet implemented for non-square operators.")
logging.warn(
"Using (possibly slow) default implementation of solve."
" Requires conversion to a dense matrix and O(N^3) operations.")
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
if self._can_use_cholesky():
return linalg_ops.cholesky_solve(
linalg_ops.cholesky(self.to_dense()), rhs)
return linalg_ops.matrix_solve(self.to_dense(), rhs, adjoint=adjoint)
def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
"""Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve R > 0 linear systems for every member of the batch.
RHS = ... # shape [..., M, R]
X = operator.solve(RHS)
# X[..., :, r] is the solution to the r'th linear system
# sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]
operator.matmul(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator and compatible shape.
`rhs` is treated like a [batch] matrix meaning for every set of leading
dimensions, the last two dimensions defines a matrix.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`
is the hermitian transpose (transposition and complex conjugation).
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
if self.is_non_singular is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"be singular.")
if self.is_square is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name, values=[rhs]):
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
self_dim = -1 if adjoint else -2
arg_dim = -1 if adjoint_arg else -2
self.shape[self_dim].assert_is_compatible_with(rhs.get_shape()[arg_dim])
return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _solvevec(self, rhs, adjoint=False):
"""Default implementation of _solvevec."""
rhs_mat = array_ops.expand_dims(rhs, axis=-1)
solution_mat = self.solve(rhs_mat, adjoint=adjoint)
return array_ops.squeeze(solution_mat, axis=-1)
def solvevec(self, rhs, adjoint=False, name="solve"):
"""Solve single equation with best effort: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve one linear system for every member of the batch.
RHS = ... # shape [..., M]
X = operator.solvevec(RHS)
# X is the solution to the linear system
# sum_j A[..., :, j] X[..., j] = RHS[..., :]
operator.matvec(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator.
`rhs` is treated like a [batch] vector meaning for every set of leading
dimensions, the last dimension defines a vector. See class docstring
for definition of compatibility regarding batch dimensions.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
with self._name_scope(name, values=[rhs]):
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
self_dim = -1 if adjoint else -2
self.shape[self_dim].assert_is_compatible_with(rhs.get_shape()[-1])
return self._solvevec(rhs, adjoint=adjoint)
def _to_dense(self):
"""Generic and often inefficient implementation. Override often."""
logging.warn("Using (possibly slow) default implementation of to_dense."
" Converts by self.matmul(identity).")
if self.batch_shape.is_fully_defined():
batch_shape = self.batch_shape
else:
batch_shape = self.batch_shape_tensor()
if self.domain_dimension.value is not None:
n = self.domain_dimension.value
else:
n = self.domain_dimension_tensor()
eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)
return self.matmul(eye)
def to_dense(self, name="to_dense"):
"""Return a dense (batch) matrix representing this operator."""
with self._name_scope(name):
return self._to_dense()
def _diag_part(self):
"""Generic and often inefficient implementation. Override often."""
return array_ops.matrix_diag_part(self.to_dense())
def diag_part(self, name="diag_part"):
"""Efficiently get the [batch] diagonal part of this operator.
If this operator has shape `[B1,...,Bb, M, N]`, this returns a
`Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where
`diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`.
```
my_operator = LinearOperatorDiag([1., 2.])
# Efficiently get the diagonal
my_operator.diag_part()
==> [1., 2.]
# Equivalent, but inefficient method
tf.matrix_diag_part(my_operator.to_dense())
==> [1., 2.]
```
Args:
name: A name for this `Op`.
Returns:
diag_part: A `Tensor` of same `dtype` as self.
"""
with self._name_scope(name):
return self._diag_part()
def _trace(self):
return math_ops.reduce_sum(self.diag_part(), axis=-1)
def trace(self, name="trace"):
"""Trace of the linear operator, equal to sum of `self.diag_part()`.
If the operator is square, this is also the sum of the eigenvalues.
Args:
name: A name for this `Op`.
Returns:
Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.
"""
with self._name_scope(name):
return self._trace()
def _add_to_tensor(self, x):
# Override if a more efficient implementation is available.
return self.to_dense() + x
def add_to_tensor(self, x, name="add_to_tensor"):
"""Add matrix represented by this operator to `x`. Equivalent to `A + x`.
Args:
x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
return self._add_to_tensor(x)
def _can_use_cholesky(self):
# TODO(langmore) Add complex types when tf.cholesky can use them.
return (not self.dtype.is_complex and self.is_self_adjoint and
self.is_positive_definite)
| {
"content_hash": "f04e271e27290909fdbc38a2df6a67cc",
"timestamp": "",
"source": "github",
"line_count": 898,
"max_line_length": 80,
"avg_line_length": 34.724944320712694,
"alnum_prop": 0.6460250777667319,
"repo_name": "Xeralux/tensorflow",
"id": "c7513d5b40c5a4bb11501c90e08a9dc3a38c2e09",
"size": "31872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/linalg/linear_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "340972"
},
{
"name": "C++",
"bytes": "39479562"
},
{
"name": "CMake",
"bytes": "194702"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33675501"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425916"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from item import Item
from types import METHOD_TO_TYPE_MAPPING
from url import URL
## Represents a request for API data.
#
# The Request object provides a standard interface for creating requests for API
# data. An instance of this class is never directly initialized, but instead
# returned from a method in either the API or Site class.
class Request:
# A list of all non-top-level methods including network and site-specific ones
# This list is needed because __getattr__ needs to differentiate between methods and parameters
# Note: 'create' is omitted here because it duplicates functionality found in the Filter class
_methods = ['add',
'advanced',
'answers',
'associated',
'badges',
'comments',
'de-authenticate',
'delete',
'edit',
'elected',
'faq',
'favorites',
'featured',
'full',
'inbox',
'info',
'invalidate',
'linked',
'mentioned',
'merges',
'moderator-only',
'moderators',
'name',
'no-answers',
'notifications',
'privileges',
'questions',
'recipients',
'related',
'reputation',
'reputation-history',
'required',
'revisions',
'suggested-edits',
'synonyms',
'tags',
'timeline',
'top-answer-tags',
'top-answerers',
'top-answers',
'top-askers',
'top-question-tags',
'top-questions',
'unaccepted',
'unanswered',
'unread',
'wikis',
'write-permissions',]
# The presence of any of these methods will force all parameters to be
# passed as POST parameters instead of with GET.
_post_methods = ['add',
'delete',
'edit',]
## Creates a request object.
# @param url the domain name to initialize the URL to or a URL instance
# @param method a method name to append to the URL
# @param response_type an optional type to use for returning the response
def __init__(self, url=None, method=None, response_type=Item):
self._url = URL(url) if isinstance(url, basestring) else url
if not method is None:
self._url.add_method(method)
self._response_type = response_type
self._data = None
## Provides a way to specify IDs.
# @param items either a single item or a list/tuple of items
def __call__(self, items):
self._url.add_method(self._string_list(items), True)
return self
## Appends the specified item to the appropriate part of the URL.
# @param raw_item the item to be added
#
# Note: any underscores in the item name are converted to dashes.
def __getattr__(self, raw_item):
# access_token is a singular exception to this rule
item = raw_item if raw_item == 'access_token' else raw_item.replace('_', '-')
# No matter what, we're going to be modifying the URL, so make
# a deep copy of it
url = deepcopy(self._url)
if item in self._methods:
if item in self._post_methods:
url.switch_to_post()
return Request(url, item)
else:
# This is a neat trick - we return a local function that will
# finish setting the parameter in the URL once the user provides
# the value for the specified parameter (which may be a list).
def set_parameter(value):
url.add_parameter(item, self._string_list(value))
return Request(url)
return set_parameter
## Retrieves the item or data at the specified index and returns it.
# @param index the index to retrieve the item / data from
# @return the item / data at the specified index
#
# This method serves a dual purpose - if supplied with an integer value it
# will return the item at such an index. If however, supplied with a string,
# it will return the appropriate value from the response. For example, given
# the value 'total', it will return the total number of items in the set.
def __getitem__(self, index):
return self._fetch()['items'][index] if type(index) == int else self._fetch()[index]
## Provides a means of iterating through the response.
# @return an iterator for the response
def __iter__(self):
return iter(self._fetch()['items'])
## Returns the total number of items in the response.
# @return the number of items in the response
def __len__(self):
return len(self._fetch()['items'])
## Returns an internal representation of the current instance.
# @return the internal representation
def __repr__(self):
return "<Request '%s'>" % self._url
## Either fetches the data for the request or returns the data.
# @return the data for the request
def _fetch(self):
if self._data is None:
# Fetch the data and replace the 'items' entry with initialized response objects
self._data = self._url.fetch()
if self._url.base_method() in METHOD_TO_TYPE_MAPPING:
item_type = METHOD_TO_TYPE_MAPPING[self._url.base_method()]
else:
item_type = self._data['type'] if 'type' in self._data else ''
self._data['items'] = [self._response_type(i, item_type) for i in self._data['items']]
return self._data
## Converts the provided item or list of items into a string.
# @param items the list of items to join
# @return a string with the items joined together
def _string_list(self, items):
# Ensure that items is iterable - if not, put it in a list
try:
# Trigger the TypeError exception if this object is a string
# so that it isn't treated like a list
if isinstance(items, basestring):
raise TypeError
iter(items)
except (KeyError, TypeError):
items = [items,]
return ';'.join([str(i.id() if issubclass(i.__class__, Item) else i) for i in items])
| {
"content_hash": "15b476c0a48672046704cde0e4a6494f",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 99,
"avg_line_length": 41.13939393939394,
"alnum_prop": 0.5490571596935769,
"repo_name": "josephdunn/stackpy",
"id": "44e80b48115bc3c68e63e34c9a5e13796467bc18",
"size": "6788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stackpy/request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47598"
}
],
"symlink_target": ""
} |
import operator
from urllib import urlencode
from django.http import HttpResponseRedirect
from django.template import Context, loader, RequestContext
from django.template.response import TemplateResponse
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.views.generic import ListView, DetailView
from django.template.response import TemplateResponse
from django.contrib import messages
from django.db import transaction
from django.db.models import Q
from django.contrib.comments.views.moderation import delete
from oscar.core.loading import import_module
from oscar.views.generic import PostActionMixin
import_module('order.models', ['Order', 'Line', 'ShippingEvent', 'ShippingEventQuantity',
'ShippingEventType', 'PaymentEvent', 'PaymentEventType', 'OrderNote'], locals())
import_module('order_management.forms', ['SimpleSearch'], locals())
class OrderListView(ListView):
u"""A list of orders"""
context_object_name = "orders"
template_name = 'order_management/browse.html'
paginate_by = 20
def get_queryset(self):
if 'search_query' in self.request.GET and self.request.GET['search_query'].strip():
q = self.request.GET['search_query'].strip()
q_list = [Q(number__icontains=q)]
search_by = self.request.GET.getlist('search_by')
if search_by:
if 'billing_address' in search_by:
q_list.append(Q(billing_address__search_text__icontains=q))
if 'shipping_address' in search_by:
q_list.append(Q(shipping_address__search_text__icontains=q))
if 'customer' in search_by:
q_list.append(Q(number__icontains=q))
q_list.append(Q(user__first_name__icontains=q))
q_list.append(Q(user__last_name__icontains=q))
q_list.append(Q(user__email__icontains=q))
return Order._default_manager.filter(reduce(operator.or_, q_list))
return Order._default_manager.all()
def get_context_data(self, **kwargs):
context = super(OrderListView, self).get_context_data(**kwargs)
search_params = self.request.GET.copy()
if 'page' in search_params:
del(search_params['page'])
context['search_params'] = '&' + search_params.urlencode()
context['order_simple_search_form'] = SimpleSearch(self.request.GET)
return context
def get(self, request, *args, **kwargs):
response = super(OrderListView, self).get(request, *args, **kwargs)
return response
class OrderDetailView(DetailView, PostActionMixin):
u"""A detail view of an order"""
template_name = "order_management/order.html"
context_object_name = 'order'
def get_object(self):
u"""Return an order object or a 404"""
return get_object_or_404(Order, number=self.kwargs['order_number'])
def get_context_data(self, **kwargs):
context = super(OrderDetailView, self).get_context_data(**kwargs)
context['shipping_options'] = ShippingEventType._default_manager.all()
context['payment_options'] = PaymentEventType._default_manager.all()
return context
def post(self, request, *args, **kwargs):
order = self.get_object()
self.response = HttpResponseRedirect(reverse('oscar-order-management-order', kwargs={'order_number': order.number}))
return super(OrderDetailView, self).post(request, *args, **kwargs)
def do_create_order_event(self, order):
self.create_shipping_event(order, order.lines.all())
def do_create_line_event(self, order):
u"""Create an event for an order"""
line_ids = self.request.POST.getlist('order_line')
lines = order.lines.in_bulk(line_ids)
if not len(lines):
messages.info(self.request, "Please select some lines")
return
try:
if self.request.POST['shipping_event']:
self.create_shipping_event(order, lines.values())
except (AttributeError, ValueError), e:
messages.error(self.request, str(e))
def create_shipping_event(self, order, lines):
u"""Create a shipping event for an order"""
with transaction.commit_on_success():
event_type = ShippingEventType._default_manager.get(code=self.request.POST['shipping_event'])
event = ShippingEvent._default_manager.create(order=order, event_type=event_type)
for line in lines:
try:
event_quantity = int(self.request.POST['order_line_quantity_%d' % line.id])
except KeyError:
event_quantity = line.quantity
ShippingEventQuantity._default_manager.create(event=event, line=line,
quantity=event_quantity)
def create_payment_event(self, order, lines, type_code):
u"""Create a payment event for an order"""
event_type = PaymentEventType._default_manager.get(code=type_code)
for line in lines.values():
order_models.PaymentEvent._default_manager.create(order=order, line=line,
quantity=line.quantity, event_type=event_type)
def do_add_note(self, order):
u"""Save a note against an order."""
if self.request.user.is_authenticated():
message = self.request.POST['message'].strip()
if message:
messages.info(self.request, "Message added")
OrderNote._default_manager.create(order=order, message=self.request.POST['message'],
user=self.request.user)
else:
messages.info(self.request, "Please enter a message")
| {
"content_hash": "b39ecc1f5ecd03f6f02b1f5f56d7eaac",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 124,
"avg_line_length": 47.07751937984496,
"alnum_prop": 0.6199571875514572,
"repo_name": "aykut/django-oscar",
"id": "0ed18fd2ad9e3467172bb5e3d3cf0632dfe70317",
"size": "6073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/apps/order_management/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import json
from lib.base import BaseRedisAction
__all__ = [
'MultiSRemAction'
]
class MultiSRemAction(BaseRedisAction):
def run(self, key, values):
values = json.loads(values)
for value in values:
self._client.srem(key, value)
return True
| {
"content_hash": "0d6a078739029c5ab52706c6ca663b88",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 41,
"avg_line_length": 18.0625,
"alnum_prop": 0.6332179930795848,
"repo_name": "ryandub/st2redis",
"id": "f3b52dde186ee59385d12eeb03d963b8f3b5b3d3",
"size": "289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actions/multi_srem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1872"
}
],
"symlink_target": ""
} |
from south.db import db
from django.db import models
from mypage.pages.models import *
class Migration:
def forwards(self, orm):
# Adding field 'Page.layout_migrated'
db.add_column('pages_page', 'layout_migrated', models.BooleanField(default=False))
def backwards(self, orm):
# Deleting field 'Page.layout_migrated'
db.delete_column('pages_page', 'layout_migrated')
models = {
'auth.user': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'pages.page': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'layout_json': ('models.TextField', [], {}),
'layout_migrated': ('models.BooleanField', [], {'default': 'False'}),
'site': ('models.ForeignKey', ["orm['sites.Site']"], {'default': ' lambda :settings.SITE_ID'}),
'skin': ('models.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'template': ('models.CharField', [], {'default': "'page.html'", 'max_length': '100'})
},
'pages.userpage': {
'Meta': {'unique_together': "(('site_copy','user',),)", '_bases': ['mypage.pages.models.Page']},
'page_ptr': ('models.OneToOneField', ["orm['pages.Page']"], {}),
'site_copy': ('models.ForeignKey', ["orm['sites.Site']"], {'default': ' lambda :settings.SITE_ID'}),
'user': ('models.ForeignKey', ["orm['auth.User']"], {'db_index': 'True'})
},
'pages.sessionpage': {
'Meta': {'unique_together': "(('site_copy','session_key',),)", '_bases': ['mypage.pages.models.Page']},
'page_ptr': ('models.OneToOneField', ["orm['pages.Page']"], {}),
'session_key': ('models.CharField', ["_('session key')"], {'max_length': '40', 'db_index': 'True'}),
'site_copy': ('models.ForeignKey', ["orm['sites.Site']"], {'default': ' lambda :settings.SITE_ID'}),
'updated': ('models.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'False'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'db_table': "'django_site'"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['pages']
| {
"content_hash": "4ff667bf8374b10168a3d706aa465810",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 115,
"avg_line_length": 44.51851851851852,
"alnum_prop": 0.5029118136439268,
"repo_name": "ella/mypage",
"id": "5dcd0d5a49af7eb4cf7cc71e0530122373b53f1c",
"size": "2405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mypage/pages/migrations/0010_add_tmp_field_layout_migrated.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "232497"
},
{
"name": "Shell",
"bytes": "3912"
}
],
"symlink_target": ""
} |
from .EntityFactory import EntityFactory | {
"content_hash": "dd1aa5f6c6d99b7736763f303a856923",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 40,
"avg_line_length": 40,
"alnum_prop": 0.9,
"repo_name": "GMadorell/larv",
"id": "1691eedf490f30ae26609570defc6bf1a9f240f5",
"size": "54",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "larv/Examples/barebones_project/Pieces/EntityFactories/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41481"
}
],
"symlink_target": ""
} |
import glob
import pkg_resources
def data_file(basename):
datadir = pkg_resources.resource_filename('kevlar', 'tests/data')
return datadir + '/' + basename
def data_glob(globstr):
datadir = pkg_resources.resource_filename('kevlar', 'tests/data')
return sorted(glob.glob(datadir + '/' + globstr))
| {
"content_hash": "55f42bb21ba220708c15d9de510cf6ff",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 69,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.6962025316455697,
"repo_name": "dib-lab/kevlar",
"id": "c160d42875b63de0285cdba981fb781b30ebc825",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kevlar/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3342"
},
{
"name": "C++",
"bytes": "16738"
},
{
"name": "Dockerfile",
"bytes": "1538"
},
{
"name": "Makefile",
"bytes": "2648"
},
{
"name": "Python",
"bytes": "488299"
},
{
"name": "Shell",
"bytes": "4576"
}
],
"symlink_target": ""
} |
from functools import wraps
from pyramid.request import call_app_with_subpath_as_path_info
def wsgiapp(wrapped):
""" Decorator to turn a WSGI application into a :app:`Pyramid`
:term:`view callable`. This decorator differs from the
:func:`pyramid.wsgi.wsgiapp2` decorator inasmuch as fixups of
``PATH_INFO`` and ``SCRIPT_NAME`` within the WSGI environment *are
not* performed before the application is invoked.
E.g., the following in a ``views.py`` module::
@wsgiapp
def hello_world(environ, start_response):
body = 'Hello world'
start_response('200 OK', [ ('Content-Type', 'text/plain'),
('Content-Length', len(body)) ] )
return [body]
Allows the following call to
:meth:`pyramid.config.Configurator.add_view`::
from views import hello_world
config.add_view(hello_world, name='hello_world.txt')
The ``wsgiapp`` decorator will convert the result of the WSGI
application to a :term:`Response` and return it to
:app:`Pyramid` as if the WSGI app were a :app:`Pyramid`
view.
"""
def decorator(context, request):
return request.get_response(wrapped)
# Support case where wrapped is a callable object instance
if getattr(wrapped, '__name__', None):
return wraps(wrapped)(decorator)
return wraps(wrapped, ('__module__', '__doc__'))(decorator)
def wsgiapp2(wrapped):
""" Decorator to turn a WSGI application into a :app:`Pyramid`
view callable. This decorator differs from the
:func:`pyramid.wsgi.wsgiapp` decorator inasmuch as fixups of
``PATH_INFO`` and ``SCRIPT_NAME`` within the WSGI environment
*are* performed before the application is invoked.
E.g. the following in a ``views.py`` module::
@wsgiapp2
def hello_world(environ, start_response):
body = 'Hello world'
start_response('200 OK', [ ('Content-Type', 'text/plain'),
('Content-Length', len(body)) ] )
return [body]
Allows the following call to
:meth:`pyramid.config.Configurator.add_view`::
from views import hello_world
config.add_view(hello_world, name='hello_world.txt')
The ``wsgiapp2`` decorator will convert the result of the WSGI
application to a Response and return it to :app:`Pyramid` as if the WSGI
app were a :app:`Pyramid` view. The ``SCRIPT_NAME`` and ``PATH_INFO``
values present in the WSGI environment are fixed up before the
application is invoked. In particular, a new WSGI environment is
generated, and the :term:`subpath` of the request passed to ``wsgiapp2``
is used as the new request's ``PATH_INFO`` and everything preceding the
subpath is used as the ``SCRIPT_NAME``. The new environment is passed to
the downstream WSGI application."""
def decorator(context, request):
return call_app_with_subpath_as_path_info(request, wrapped)
# Support case where wrapped is a callable object instance
if getattr(wrapped, '__name__', None):
return wraps(wrapped)(decorator)
return wraps(wrapped, ('__module__', '__doc__'))(decorator)
| {
"content_hash": "d231c9058db4cd2fd8bb1d23ebefb345",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 40.94871794871795,
"alnum_prop": 0.6552911709455228,
"repo_name": "danielpronych/pyramid-doxygen",
"id": "d176e4ce5d265789e231f61eece0c08b577ca3d3",
"size": "3194",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyramid/wsgi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "29212"
},
{
"name": "HTML",
"bytes": "39"
},
{
"name": "Python",
"bytes": "1837059"
}
],
"symlink_target": ""
} |
from apysignature import __version__
from setuptools import setup, find_packages
setup(
name='apysignature',
version=__version__,
install_requires=[
'ordereddict>=1.1'
],
url='https://github.com/erickponce/apysignature',
description='Python implementation of the Ruby Signature library (https://github.com/mloughran/signature)',
long_description=open("README.rst").read(),
packages=find_packages(),
include_package_data=True,
license='Apache 2.0',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries',
],
author='Erick Ponce Leão',
author_email='erickponceleao@gmail.com'
)
| {
"content_hash": "7cc2e37f8c041b0c03768268642217ac",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 111,
"avg_line_length": 33.42857142857143,
"alnum_prop": 0.6527777777777778,
"repo_name": "erickponce/apysignature",
"id": "b5d61d51ff58ce089d0d53932d4f82b5215ebc3b",
"size": "962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14396"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forum', '0005_auto_20161009_1758'),
]
operations = [
migrations.RemoveField(
model_name='thread',
name='content',
),
migrations.AddField(
model_name='post',
name='original',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='thread',
name='locked',
field=models.BooleanField(default=False),
),
]
| {
"content_hash": "dc548bbaa7f9848a28c009ec730cf2ea",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 53,
"avg_line_length": 23.75,
"alnum_prop": 0.5413533834586466,
"repo_name": "WarwickAnimeSoc/aniMango",
"id": "15e8eff20fd35b7274215f5161d84bc99aa25ee4",
"size": "738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forum/migrations/0006_auto_20170920_2026.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14554"
},
{
"name": "HTML",
"bytes": "145725"
},
{
"name": "JavaScript",
"bytes": "1241"
},
{
"name": "Python",
"bytes": "197412"
}
],
"symlink_target": ""
} |
from fabric.api import *
from fabric.contrib.files import exists
env.user = 'vagrant'
env.hosts = '192.168.33.10'
vagrant_ssh = local('vagrant ssh-config | grep IdentityFile', capture=True)
env.key_filename = vagrant_ssh.split()[1].strip('"')
env.project_dir = 'duoclub'
env.project_name = 'duoclub'
env.project_config = 'duoclub'
env.project_root = '/vagrant'
env.project_domain = 'duo.mittya.com'
env.project_port = '8080'
env.nginx_log = '/home/%s/logs' % env.user
def venv():
"""
在项目下生成虚拟环境
"""
with cd(env.project_root):
if exists('venv'):
run('rm -rf venv')
run('virtualenv venv --python=python3.6')
# 如果 Celery 运行,需先停止否则安装依赖失败
pid = sudo('sudo supervisorctl pid')
if 'no such' not in pid:
sudo('kill %s' % pid)
with prefix('source venv/bin/activate'):
run('pip install -i https://pypi.tuna.tsinghua.edu.cn/simple -r requirements/prod.txt')
run('pip list --format=columns')
run('python -V')
run('python %s/manage.py collectstatic --settings=%s.settings.prod --no-input' % (env.project_name, env.project_config))
run('deactivate')
def gunicorn():
"""
配置 Gunicorn
"""
with cd('%s/deploy' % env.project_root):
# 新建临时文件替换变量
run('cp gunicorn gunicorn_tmp')
run('sed -i "s/{# user #}/%s/g" gunicorn_tmp' % env.user)
run('sed -i "s/{# project_dir #}/%s/g" gunicorn_tmp' % env.project_dir)
run('sed -i "s/{# project_name #}/%s/g" gunicorn_tmp' % env.project_name)
run('sed -i "s/{# project_port #}/%s/g" gunicorn_tmp' % env.project_port)
run('sed -i "s/{# project_config #}/%s/g" gunicorn_tmp' % env.project_config)
# 添加新站
if not exists('/home/%s/run' % env.user):
run('mkdir /home/%s/run' % env.user)
if exists('/home/%s/run/gunicorn_%s' % (env.user, env.project_name)):
run('rm /home/%s/run/gunicorn_%s' % (env.user, env.project_name))
run('cp gunicorn_tmp /home/%s/run/gunicorn_%s' % (env.user, env.project_name))
# 删除临时文件
run('rm gunicorn_tmp')
# 添加权限
run('chmod +x /home/%s/run/gunicorn_%s' % (env.user, env.project_name))
def supervisor():
"""
配置 Supervisor
"""
with cd('%s/deploy' % env.project_root):
# 默认安装 Supervisor 后会自动启动,先结束进程
pid = sudo('sudo supervisorctl pid')
if 'no such' not in pid:
sudo('supervisorctl stop all')
sudo('kill %s' % pid)
# 新建临时文件替换变量
run('cp supervisor.conf supervisor_tmp.conf')
run('sed -i "s/{# user #}/%s/g" supervisor_tmp.conf' % env.user)
run('sed -i "s/{# project_dir #}/%s/g" supervisor_tmp.conf' % env.project_dir)
run('sed -i "s/{# project_name #}/%s/g" supervisor_tmp.conf' % env.project_name)
# 复制 Supervisor 配置文件
if not exists('/etc/supervisor/conf.d'):
sudo('mkdir -p /etc/supervisor/conf.d')
if exists('/etc/supervisor/supervisord.conf'):
sudo('rm /etc/supervisor/supervisord.conf')
sudo('cp supervisord.conf /etc/supervisor/supervisord.conf')
# 添加新站
if exists('/etc/supervisor/conf.d/%s.conf' % env.project_name):
sudo('rm /etc/supervisor/conf.d/%s.conf' % env.project_name)
sudo('cp supervisor_tmp.conf /etc/supervisor/conf.d/%s.conf' % env.project_name)
# 删除临时文件
run('rm supervisor_tmp.conf')
# 添加 Celery
run('cp celeryd.conf celeryd_tmp.conf')
run('sed -i "s/{# project_name #}/%s/g" celeryd_tmp.conf' % env.project_name)
if exists('/etc/supervisor/conf.d/celeryd.conf'):
sudo('rm /etc/supervisor/conf.d/celeryd.conf')
sudo('cp celeryd_tmp.conf /etc/supervisor/conf.d/celeryd.conf')
run('rm celeryd_tmp.conf')
if not exists('/var/log/celery'):
run('mkdir -p /var/log/celery')
# 按新的配置文件启动
sudo('supervisord -c /etc/supervisor/supervisord.conf')
sudo('supervisorctl reread')
sudo('supervisorctl update')
with settings(warn_only=True):
sudo('supervisorctl status')
sudo('supervisorctl status %s' % env.project_name)
# 添加开机启动
if not exists('/etc/init.d/supervisord'):
sudo('cp supervisord /etc/init.d/supervisord')
sudo('chmod +x /etc/init.d/supervisord')
sudo('mkdir /var/run/supervisor')
sudo('chmod 777 /run')
sudo('chmod 777 /var/log')
sudo('update-rc.d supervisord defaults')
def nginx():
"""
配置 nginx
"""
with cd('%s/deploy' % env.project_root):
# 新建临时文件替换变量
run('cp nginx.conf nginx_tmp.conf')
run('sed -i "s/{# user #}/%s/g" nginx_tmp.conf' % env.user)
run('sed -i "s/{# domain #}/%s/g" nginx_tmp.conf' % env.project_domain)
run('sed -i "s/{# project_dir #}/%s/g" nginx_tmp.conf' % env.project_dir)
run('sed -i "s/{# project_name #}/%s/g" nginx_tmp.conf' % env.project_name)
run('sed -i "s/{# project_port #}/%s/g" nginx_tmp.conf' % env.project_port)
# 添加新站
if exists('/etc/nginx/sites-available/%s' % env.project_name):
sudo('rm /etc/nginx/sites-available/%s' % env.project_name)
sudo('rm /etc/nginx/sites-enabled/%s' % env.project_name)
sudo('cp nginx_tmp.conf /etc/nginx/sites-available/%s' % env.project_name)
sudo('ln -s /etc/nginx/sites-available/%s /etc/nginx/sites-enabled/%s' % (env.project_name, env.project_name))
# 删除临时文件
run('rm nginx_tmp.conf')
# 日志
if not exists(env.nginx_log):
run('mkdir %s' % env.nginx_log)
# 测试重启
sudo('service nginx configtest')
sudo('service nginx restart')
def restart():
with settings(warn_only=True):
sudo('supervisorctl restart all')
sudo('supervisorctl status')
sudo('service nginx restart')
def deploy():
sudo('echo "Deploy..."')
venv()
gunicorn()
supervisor()
nginx()
| {
"content_hash": "0d61ff0e72d074b5cc12cde60182653d",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 132,
"avg_line_length": 32.462765957446805,
"alnum_prop": 0.5739800098312305,
"repo_name": "mittya/duoclub",
"id": "ca0f45cdf757e90be04795f26dae25b454f6f282",
"size": "6412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4334"
},
{
"name": "HTML",
"bytes": "59173"
},
{
"name": "JavaScript",
"bytes": "1915"
},
{
"name": "Python",
"bytes": "104074"
},
{
"name": "Shell",
"bytes": "5158"
}
],
"symlink_target": ""
} |
"""Benchmarks on Keras layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.keras.benchmarks.layer_benchmarks import layer_benchmarks_test_base
from tensorflow.python.platform import benchmark
def _get_benchmark_name(name):
return name.split("__")[-1].split("_")
def _get_metadata(name):
return {
"model_name": "ideal_layers",
"parameters": name[1] + "_shape",
}
def _get_input_data(inputs):
if "input_shape" in inputs:
return tf.ones(inputs["input_shape"])
elif "input" in inputs:
return inputs["input"]
else:
raise ValueError("Please specificy either `input_shape` or `input`"
"for the benchmark test")
def _generate_benchmark_params(*params_list):
benchmark_params = []
for params in params_list:
benchmark_params.extend(
[((param[0] + "_CPU",) + param[1:]) for param in params])
benchmark_params.extend(
[((param[0] + "_GPU",) + param[1:]) for param in params])
return benchmark_params
def _layer_call_backward(layer, x):
with tf.GradientTape() as tape:
y = layer(x)
loss = tf.reduce_mean(y**2)
_ = tape.gradient(loss, layer.trainable_variables)
class KerasLayerBenchmarks(six.with_metaclass(
benchmark.ParameterizedBenchmark,
layer_benchmarks_test_base.LayerBenchmarksBase)):
# The parameter of each layer benchmark is a tuple, and the first one is
# the benchmark name. It must follow the convention of
# "{layer_name}_{small|normal|large}_shape" to make it compatible with
# `self.report_benchmark()` method.
_benchmark_parameters = _generate_benchmark_params([
("Conv2D_small_shape", tf.keras.layers.Conv2D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (1, 1, 1, 1)}, 10),
("Conv2D_normal_shape", tf.keras.layers.Conv2D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
{"input_shape": (64, 28, 28, 3)}, 10),
("LSTM_small_shape", tf.keras.layers.LSTM,
{"units": 1}, {"input_shape": (1, 1, 1)}, 10),
("LSTM_normal_shape", tf.keras.layers.LSTM,
{"units": 4}, {"input_shape": (32, 10, 8)}, 10),
("Embedding_small_shape", tf.keras.layers.Embedding,
{"input_dim": 1, "output_dim": 1, "input_length": 1},
{"input": np.random.randint(1, size=(1, 1))}, 10),
("Embedding_normal_shape", tf.keras.layers.Embedding,
{"input_dim": 1000, "output_dim": 64, "input_length": 10},
{"input": np.random.randint(1000, size=(32, 10))}, 10),
])
def benchmark_layer_call(self, layer_cls, layer_args, inputs, num_iters):
layer = layer_cls(**layer_args)
x = _get_input_data(inputs)
fn = functools.partial(layer, x)
name = _get_benchmark_name(self._get_name())
metadata = {"implementation": name[0] + ".layer.call"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_with_function(
self, layer_cls, layer_args, inputs, num_iters):
layer = layer_cls(**layer_args)
x = _get_input_data(inputs)
layer.call = tf.function(layer.call)
fn = functools.partial(layer, x)
name = _get_benchmark_name(self._get_name())
metadata = {"implementation": name[0] + ".layer.call.function"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_with_xla(
self, layer_cls, layer_args, inputs, num_iters):
name = _get_benchmark_name(self._get_name())
# TODO(b/173461426)
if layer_cls is tf.keras.layers.Embedding and name[-1] == "GPU":
return
layer = layer_cls(**layer_args)
x = _get_input_data(inputs)
layer.call = tf.function(
layer.call, jit_compile=True)
fn = functools.partial(layer, x)
metadata = {"implementation": name[0] + ".layer.call.xla"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_backward(
self, layer_cls, layer_args, inputs, num_iters):
layer = layer_cls(**layer_args)
x = _get_input_data(inputs)
fn = functools.partial(_layer_call_backward, layer, x)
name = _get_benchmark_name(self._get_name())
metadata = {"implementation": name[0] + ".layer.call.backward"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
def benchmark_layer_call_backward_with_function(
self, layer_cls, layer_args, inputs, num_iters):
layer = layer_cls(**layer_args)
x = _get_input_data(inputs)
layer.call = tf.function(layer.call)
fn = functools.partial(_layer_call_backward, layer, x)
name = _get_benchmark_name(self._get_name())
metadata = {"implementation": name[0] + ".layer.call.backward.function"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
class KerasLayerBenchmarksBackwardXLA(six.with_metaclass(
benchmark.ParameterizedBenchmark,
layer_benchmarks_test_base.LayerBenchmarksBase)):
_benchmark_parameters = _generate_benchmark_params([
("Conv2D_small_shape", tf.keras.layers.Conv2D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
(1, 1, 1, 1), 10000),
("Conv2D_normal_shape", tf.keras.layers.Conv2D,
{"filters": 1, "kernel_size": 1, "activation": "relu"},
(64, 28, 28, 3), 10000),
# TODO(b/153480400)
# ("LSTM_small_shape", tf.keras.layers.LSTM,
# {"units": 1}, (1, 1, 1), 10000),
# ("LSTM_normal_shape", tf.keras.layers.LSTM,
# {"units": 4}, (32, 10, 8), 10000),
("Embedding_small_shape", tf.keras.layers.Embedding,
{"input_dim": 1, "output_dim": 1, "input_length": 1},
{"input": np.random.randint(1, size=(1, 1))}, 10),
("Embedding_normal_shape", tf.keras.layers.Embedding,
{"input_dim": 1000, "output_dim": 64, "input_length": 10},
{"input": np.random.randint(1000, size=(32, 10))}, 10),
])
def benchmark_layer_call_backward_with_xla(
self, layer_cls, layer_args, inputs, num_iters):
name = _get_benchmark_name(self._get_name())
# TODO(b/173461426)
if layer_cls is tf.keras.layers.Embedding and name[-1] == "GPU":
return
layer = layer_cls(**layer_args)
x = _get_input_data(inputs)
layer.call = tf.function(
layer.call, jit_compile=True)
fn = functools.partial(_layer_call_backward, layer, x)
metadata = {"implementation": name[0] + ".layer.call.backward.xla"}
metadata.update(_get_metadata(name))
self.run_report(fn, num_iters, metadata)
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "500e3038be34b1c784882fd53acbf836",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 90,
"avg_line_length": 36.134408602150536,
"alnum_prop": 0.6384466597232554,
"repo_name": "freedomtan/tensorflow",
"id": "a8a0031046583ed13a6e8d4a5c1e07f6f416d38c",
"size": "7410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/benchmarks/layer_benchmarks/layer_benchmarks_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "32479"
},
{
"name": "Batchfile",
"bytes": "38366"
},
{
"name": "C",
"bytes": "1035837"
},
{
"name": "C#",
"bytes": "13395"
},
{
"name": "C++",
"bytes": "99324075"
},
{
"name": "CMake",
"bytes": "107781"
},
{
"name": "Dockerfile",
"bytes": "283435"
},
{
"name": "Go",
"bytes": "2013128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "928595"
},
{
"name": "Jupyter Notebook",
"bytes": "981916"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "4489624"
},
{
"name": "Makefile",
"bytes": "97500"
},
{
"name": "NASL",
"bytes": "8048"
},
{
"name": "Objective-C",
"bytes": "141623"
},
{
"name": "Objective-C++",
"bytes": "360423"
},
{
"name": "PHP",
"bytes": "20570"
},
{
"name": "Pawn",
"bytes": "32277"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42762396"
},
{
"name": "RobotFramework",
"bytes": "2661"
},
{
"name": "Roff",
"bytes": "2515"
},
{
"name": "Ruby",
"bytes": "6723"
},
{
"name": "Shell",
"bytes": "647623"
},
{
"name": "Smarty",
"bytes": "52687"
},
{
"name": "Starlark",
"bytes": "4632847"
},
{
"name": "Swift",
"bytes": "56924"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import json
import os
import re
import sys
"""Simple superficial API doc generator for .cmake files"""
def crawl_for_cmake(path, excluded_files=None):
"""Crawls over path, looking for files named *.cmake, returns tuple of full and relative path."""
cmake_files = []
for (parentdir, _, files) in os.walk(path):
for filename in files:
if not filename.endswith('.cmake') or \
(excluded_files and filename in excluded_files):
continue
fullpath = os.path.join(parentdir, filename)
relpath = os.path.relpath(fullpath, path)
cmake_files.append((fullpath, relpath))
return cmake_files
def generate_rst(files, skip_private=False, skip_undocumented=False):
"""
Each of the CMake files is traversed line by line, looking for lines like function(...) or macro(...).
For each of these, multiple lines of reStructured text are added documenting the function.
"""
public = {}
documented = {}
undocumented = {}
for (fullpath, relpath) in files:
last_block = []
last_block_public = False
with open(fullpath, 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith('#'):
line = line.lstrip('#')
if line.strip() == '@public':
last_block_public = True
else:
last_block.append(line.rstrip('\n'))
else:
declaration = re.match(r'[a-zA-Z]+ *\([a-zA-Z0-9_ ]+\)', line)
if declaration is None:
last_block = []
last_block_public = False
else:
tokens = line.split('(')
dec_type = tokens[0].strip()
dec_args = tokens[1].strip().rstrip(')').split(' ')
if dec_type == 'function' or dec_type == 'macro':
rst = []
# directives defined in catkin-sphinx
dec_line = '.. _`%s_ref`:\n\n`%s`\n%s\n\n.. cmake:macro:: %s(%s)' % (dec_args[0], dec_args[0], '~' * (len(dec_args[0]) + 2), dec_args[0], ', '.join(dec_args[1:]))
rst.append(dec_line)
rst.append('')
rst.append(' *[%s defined in %s]*' % (dec_type, relpath))
if last_block:
rst.append('')
rst.extend(last_block)
if dec_args[0] in documented or dec_args[0] in undocumented:
raise RuntimeError('Function/macro with same name "%s" exists multiple times' % dec_args[0])
if last_block_public:
public[dec_args[0]] = rst
elif last_block:
documented[dec_args[0]] = rst
else:
undocumented[dec_args[0]] = rst
last_block = []
last_block_public = False
rst = ['Extracted CMake API reference',
'=============================']
rst.append('This page was auto-generated from cmake source files using %s\n' % os.path.basename(__file__))
rst.append('.. ' + '!' * 70)
rst.append('.. !!!!!! Auto-generated file, do not modify')
rst.append('.. ' + '!' * 70)
rst.append('')
rst.append('.. contents::')
rst.append(' :local:')
rst.append('')
rst.append('')
rst.append('Public CMake functions / macros')
rst.append('-------------------------------')
rst.append('')
for name in sorted(public.keys()):
rst.append(' * :cmake:macro:`%s`' % name)
for name in sorted(public.keys()):
rst.append('')
rst.extend(public[name])
rst.append('')
if not skip_private:
rst.append('Non-public CMake functions / macros')
rst.append('-----------------------------------')
rst.append('')
for name in sorted(documented.keys()):
rst.append(' * :cmake:macro:`%s`' % name)
for name in sorted(documented.keys()):
rst.append('')
rst.extend(documented[name])
rst.append('')
if not skip_undocumented:
rst.append('Not documented CMake functions / macros')
rst.append('---------------------------------------')
for name in sorted(undocumented.keys()):
rst.append('')
rst.extend(undocumented[name])
rst.append('')
return rst
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Crawls a path for .cmake files and extract documentation of functions and macros into reStructured text.')
parser.add_argument('path', nargs='?', default='.', help='The path to be crawled')
parser.add_argument('-o', '--output', help='The name of the generated rst file')
parser.add_argument('--skip_private', action='store_true', help='Skip documented items not marked with @public')
parser.add_argument('--skip_undocumented', action='store_true', help='Skip items without documentation.')
args = parser.parse_args()
exclusions = '{}/.sphinx_exclusions.json'.format(args.path)
excluded_files = []
if os.path.exists(exclusions):
try:
with open(exclusions, 'r') as f:
excluded_files = json.load(f)
except (TypeError, ValueError) as err:
print('unable to load exclusions\nerr={}\n'
'make sure the file <{}> is valid json or remove it'.
format(err, exclusions), file=sys.stderr)
sys.exit(-1)
cmake_files = crawl_for_cmake(args.path, excluded_files)
lines = generate_rst(cmake_files, args.skip_private, args.skip_undocumented)
if args.output:
with open(args.output, 'w') as f:
f.write('\n'.join(lines))
else:
for line in lines:
print(line)
sys.exit(0)
| {
"content_hash": "f1fc43c2129eaceca72cccc05037dccf",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 186,
"avg_line_length": 40.026315789473685,
"alnum_prop": 0.5208744247205785,
"repo_name": "ros/catkin",
"id": "12ef0335c68845e13acefb388c7e1e18a0c11fe5",
"size": "7713",
"binary": false,
"copies": "1",
"ref": "refs/heads/noetic-devel",
"path": "doc/generate_cmake_rst.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "2915"
},
{
"name": "CMake",
"bytes": "195414"
},
{
"name": "EmberScript",
"bytes": "3429"
},
{
"name": "Makefile",
"bytes": "331"
},
{
"name": "Python",
"bytes": "228932"
},
{
"name": "Shell",
"bytes": "23212"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(name='PyBIND',
version='0.1.0',
description='Python package for writing ISC BIND files',
author='Brian L. Brush',
author_email='rhubarbsin@gmail.com',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: Name Service (DNS)']
license='LICENSE',
url='https://github.com/RhubarbSin/PyBIND',
packages=['pybind'],
install_requires=['ipaddr >= 2.1.7'])
| {
"content_hash": "1d212962d87a216192d3c378e33c7e04",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 65,
"avg_line_length": 40.2,
"alnum_prop": 0.5887230514096186,
"repo_name": "RhubarbSin/PyBIND",
"id": "b8404f8860ba3aa472eaf3d72cd8cc3ed29b1030",
"size": "603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35549"
}
],
"symlink_target": ""
} |
from pysmt.shortcuts import Symbol, LE, GE, Int, And, Equals, Plus, Solver
from pysmt.typing import INT
hello = [Symbol(s, INT) for s in "hello"]
world = [Symbol(s, INT) for s in "world"]
letters = set(hello+world)
domains = And(And(LE(Int(1), l),
GE(Int(10), l)) for l in letters)
sum_hello = Plus(hello)
sum_world = Plus(world)
problem = And(Equals(sum_hello, sum_world),
Equals(sum_hello, Int(36)))
formula = And(domains, problem)
print("Serialization of the formula:")
print(formula)
with Solver(logic="QF_LIA") as solver:
solver.add_assertion(domains)
if not solver.solve():
print("Domain is not SAT!!!")
exit()
solver.add_assertion(problem)
if solver.solve():
for l in letters:
print("%s = %s" %(l, solver.get_value(l)))
else:
print("No solution found")
| {
"content_hash": "b0c86df4955c0a1967d6194b3ae0b427",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 74,
"avg_line_length": 26.181818181818183,
"alnum_prop": 0.6180555555555556,
"repo_name": "agriggio/pysmt",
"id": "e2eded27723be122f5b40f8e5f9d5c4155bac6ab",
"size": "864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/code_snippets/hello_world_qf_lia.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "PowerShell",
"bytes": "5987"
},
{
"name": "Python",
"bytes": "1019481"
},
{
"name": "Shell",
"bytes": "6094"
}
],
"symlink_target": ""
} |
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.encoding import python_2_unicode_compatible
from django.db.models.signals import post_save
from django.dispatch import receiver
@python_2_unicode_compatible
class SecuredpiProfile(models.Model):
"""Define class for user profile."""
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
related_name='profile',
on_delete=models.CASCADE
)
first_name = models.CharField(max_length=50, blank=True)
last_name = models.CharField(max_length=50, blank=True)
address = models.CharField(max_length=200, blank=True)
phone_number = models.CharField(max_length=50, blank=True)
def __str__(self):
return 'Profile for {}'.format(self.user)
@property
def is_active(self):
return self.user.is_active
@classmethod
def active(cls):
return SecuredpiProfile.objects.filter(user__is_active=True)
@receiver(post_save, sender=User)
def create_profile(sender, **kwargs):
if not SecuredpiProfile.objects.filter(user=kwargs['instance']):
SecuredpiProfile(
user=kwargs['instance']
).save()
| {
"content_hash": "328857b82844c7cc48e434d1d09fcb7c",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 68,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.7004909983633388,
"repo_name": "Secured-Pi/Secured-Pi",
"id": "665d85a9409258751668f0cadeef3da6d6506078",
"size": "1222",
"binary": false,
"copies": "1",
"ref": "refs/heads/staging",
"path": "securedpi_profile/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "95"
},
{
"name": "HTML",
"bytes": "25485"
},
{
"name": "Python",
"bytes": "79801"
}
],
"symlink_target": ""
} |
import logging
import struct
import zlib
from kafka.codec import (
gzip_encode, gzip_decode, snappy_encode, snappy_decode
)
from kafka.common import (
BrokerMetadata, PartitionMetadata, Message, OffsetAndMessage,
ProduceResponse, FetchResponse, OffsetResponse,
OffsetCommitResponse, OffsetFetchResponse
)
from kafka.util import (
read_short_string, read_int_string, relative_unpack,
write_short_string, write_int_string, group_by_topic_and_partition,
BufferUnderflowError, ChecksumError, ConsumerFetchSizeTooSmall
)
log = logging.getLogger("kafka")
class KafkaProtocol(object):
"""
Class to encapsulate all of the protocol encoding/decoding.
This class does not have any state associated with it, it is purely
for organization.
"""
PRODUCE_KEY = 0
FETCH_KEY = 1
OFFSET_KEY = 2
METADATA_KEY = 3
OFFSET_COMMIT_KEY = 6
OFFSET_FETCH_KEY = 7
ATTRIBUTE_CODEC_MASK = 0x03
CODEC_NONE = 0x00
CODEC_GZIP = 0x01
CODEC_SNAPPY = 0x02
###################
# Private API #
###################
@classmethod
def _encode_message_header(cls, client_id, correlation_id, request_key):
"""
Encode the common request envelope
"""
return struct.pack('>hhih%ds' % len(client_id),
request_key, # ApiKey
0, # ApiVersion
correlation_id, # CorrelationId
len(client_id),
client_id) # ClientId
@classmethod
def _encode_message_set(cls, messages):
"""
Encode a MessageSet. Unlike other arrays in the protocol,
MessageSets are not length-prefixed
Format
======
MessageSet => [Offset MessageSize Message]
Offset => int64
MessageSize => int32
"""
message_set = ""
for message in messages:
encoded_message = KafkaProtocol._encode_message(message)
message_set += struct.pack('>qi%ds' % len(encoded_message), 0,
len(encoded_message), encoded_message)
return message_set
@classmethod
def _encode_message(cls, message):
"""
Encode a single message.
The magic number of a message is a format version number.
The only supported magic number right now is zero
Format
======
Message => Crc MagicByte Attributes Key Value
Crc => int32
MagicByte => int8
Attributes => int8
Key => bytes
Value => bytes
"""
if message.magic == 0:
msg = struct.pack('>BB', message.magic, message.attributes)
msg += write_int_string(message.key)
msg += write_int_string(message.value)
crc = zlib.crc32(msg)
msg = struct.pack('>i%ds' % len(msg), crc, msg)
else:
raise Exception("Unexpected magic number: %d" % message.magic)
return msg
@classmethod
def _decode_message_set_iter(cls, data):
"""
Iteratively decode a MessageSet
Reads repeated elements of (offset, message), calling decode_message
to decode a single message. Since compressed messages contain futher
MessageSets, these two methods have been decoupled so that they may
recurse easily.
"""
cur = 0
read_message = False
while cur < len(data):
try:
((offset, ), cur) = relative_unpack('>q', data, cur)
(msg, cur) = read_int_string(data, cur)
for (offset, message) in KafkaProtocol._decode_message(msg, offset):
read_message = True
yield OffsetAndMessage(offset, message)
except BufferUnderflowError:
if read_message is False:
# If we get a partial read of a message, but haven't yielded anyhting
# there's a problem
raise ConsumerFetchSizeTooSmall()
else:
raise StopIteration()
@classmethod
def _decode_message(cls, data, offset):
"""
Decode a single Message
The only caller of this method is decode_message_set_iter.
They are decoupled to support nested messages (compressed MessageSets).
The offset is actually read from decode_message_set_iter (it is part
of the MessageSet payload).
"""
((crc, magic, att), cur) = relative_unpack('>iBB', data, 0)
if crc != zlib.crc32(data[4:]):
raise ChecksumError("Message checksum failed")
(key, cur) = read_int_string(data, cur)
(value, cur) = read_int_string(data, cur)
codec = att & KafkaProtocol.ATTRIBUTE_CODEC_MASK
if codec == KafkaProtocol.CODEC_NONE:
yield (offset, Message(magic, att, key, value))
elif codec == KafkaProtocol.CODEC_GZIP:
gz = gzip_decode(value)
for (offset, msg) in KafkaProtocol._decode_message_set_iter(gz):
yield (offset, msg)
elif codec == KafkaProtocol.CODEC_SNAPPY:
snp = snappy_decode(value)
for (offset, msg) in KafkaProtocol._decode_message_set_iter(snp):
yield (offset, msg)
##################
# Public API #
##################
@classmethod
def encode_produce_request(cls, client_id, correlation_id,
payloads=None, acks=1, timeout=1000):
"""
Encode some ProduceRequest structs
Params
======
client_id: string
correlation_id: string
payloads: list of ProduceRequest
acks: How "acky" you want the request to be
0: immediate response
1: written to disk by the leader
2+: waits for this many number of replicas to sync
-1: waits for all replicas to be in sync
timeout: Maximum time the server will wait for acks from replicas.
This is _not_ a socket timeout
"""
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.PRODUCE_KEY)
message += struct.pack('>hii', acks, timeout, len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += struct.pack('>h%dsi' % len(topic),
len(topic), topic, len(topic_payloads))
for partition, payload in topic_payloads.items():
msg_set = KafkaProtocol._encode_message_set(payload.messages)
message += struct.pack('>ii%ds' % len(msg_set), partition,
len(msg_set), msg_set)
return struct.pack('>i%ds' % len(message), len(message), message)
@classmethod
def decode_produce_response(cls, data):
"""
Decode bytes to a ProduceResponse
Params
======
data: bytes to decode
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for i in range(num_topics):
((strlen,), cur) = relative_unpack('>h', data, cur)
topic = data[cur:cur + strlen]
cur += strlen
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in range(num_partitions):
((partition, error, offset), cur) = relative_unpack('>ihq',
data, cur)
yield ProduceResponse(topic, partition, error, offset)
@classmethod
def encode_fetch_request(cls, client_id, correlation_id, payloads=None,
max_wait_time=100, min_bytes=4096):
"""
Encodes some FetchRequest structs
Params
======
client_id: string
correlation_id: string
payloads: list of FetchRequest
max_wait_time: int, how long to block waiting on min_bytes of data
min_bytes: int, the minimum number of bytes to accumulate before
returning the response
"""
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.FETCH_KEY)
# -1 is the replica id
message += struct.pack('>iiii', -1, max_wait_time, min_bytes,
len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_string(topic)
message += struct.pack('>i', len(topic_payloads))
for partition, payload in topic_payloads.items():
message += struct.pack('>iqi', partition, payload.offset,
payload.max_bytes)
return struct.pack('>i%ds' % len(message), len(message), message)
@classmethod
def decode_fetch_response(cls, data):
"""
Decode bytes to a FetchResponse
Params
======
data: bytes to decode
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for i in range(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in range(num_partitions):
((partition, error, highwater_mark_offset), cur) = \
relative_unpack('>ihq', data, cur)
(message_set, cur) = read_int_string(data, cur)
yield FetchResponse(
topic, partition, error,
highwater_mark_offset,
KafkaProtocol._decode_message_set_iter(message_set))
@classmethod
def encode_offset_request(cls, client_id, correlation_id, payloads=None):
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.OFFSET_KEY)
# -1 is the replica id
message += struct.pack('>ii', -1, len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_string(topic)
message += struct.pack('>i', len(topic_payloads))
for partition, payload in topic_payloads.items():
message += struct.pack('>iqi', partition, payload.time,
payload.max_offsets)
return struct.pack('>i%ds' % len(message), len(message), message)
@classmethod
def decode_offset_response(cls, data):
"""
Decode bytes to an OffsetResponse
Params
======
data: bytes to decode
"""
((correlation_id, num_topics), cur) = relative_unpack('>ii', data, 0)
for i in range(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in range(num_partitions):
((partition, error, num_offsets,), cur) = \
relative_unpack('>ihi', data, cur)
offsets = []
for j in range(num_offsets):
((offset,), cur) = relative_unpack('>q', data, cur)
offsets.append(offset)
yield OffsetResponse(topic, partition, error, tuple(offsets))
@classmethod
def encode_metadata_request(cls, client_id, correlation_id, topics=None):
"""
Encode a MetadataRequest
Params
======
client_id: string
correlation_id: string
topics: list of strings
"""
topics = [] if topics is None else topics
message = cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.METADATA_KEY)
message += struct.pack('>i', len(topics))
for topic in topics:
message += struct.pack('>h%ds' % len(topic), len(topic), topic)
return write_int_string(message)
@classmethod
def decode_metadata_response(cls, data):
"""
Decode bytes to a MetadataResponse
Params
======
data: bytes to decode
"""
((correlation_id, numBrokers), cur) = relative_unpack('>ii', data, 0)
# Broker info
brokers = {}
for i in range(numBrokers):
((nodeId, ), cur) = relative_unpack('>i', data, cur)
(host, cur) = read_short_string(data, cur)
((port,), cur) = relative_unpack('>i', data, cur)
brokers[nodeId] = BrokerMetadata(nodeId, host, port)
# Topic info
((num_topics,), cur) = relative_unpack('>i', data, cur)
topicMetadata = {}
for i in range(num_topics):
((topicError,), cur) = relative_unpack('>h', data, cur)
(topicName, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
partitionMetadata = {}
for j in range(num_partitions):
((partitionErrorCode, partition, leader, numReplicas), cur) = \
relative_unpack('>hiii', data, cur)
(replicas, cur) = relative_unpack('>%di' % numReplicas,
data, cur)
((numIsr,), cur) = relative_unpack('>i', data, cur)
(isr, cur) = relative_unpack('>%di' % numIsr, data, cur)
partitionMetadata[partition] = \
PartitionMetadata(topicName, partition, leader,
replicas, isr)
topicMetadata[topicName] = partitionMetadata
return (brokers, topicMetadata)
@classmethod
def encode_offset_commit_request(cls, client_id, correlation_id,
group, payloads):
"""
Encode some OffsetCommitRequest structs
Params
======
client_id: string
correlation_id: string
group: string, the consumer group you are committing offsets for
payloads: list of OffsetCommitRequest
"""
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.OFFSET_COMMIT_KEY)
message += write_short_string(group)
message += struct.pack('>i', len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_string(topic)
message += struct.pack('>i', len(topic_payloads))
for partition, payload in topic_payloads.items():
message += struct.pack('>iq', partition, payload.offset)
message += write_short_string(payload.metadata)
return struct.pack('>i%ds' % len(message), len(message), message)
@classmethod
def decode_offset_commit_response(cls, data):
"""
Decode bytes to an OffsetCommitResponse
Params
======
data: bytes to decode
"""
((correlation_id,), cur) = relative_unpack('>i', data, 0)
(client_id, cur) = read_short_string(data, cur)
((num_topics,), cur) = relative_unpack('>i', data, cur)
for i in xrange(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in xrange(num_partitions):
((partition, error), cur) = relative_unpack('>ih', data, cur)
yield OffsetCommitResponse(topic, partition, error)
@classmethod
def encode_offset_fetch_request(cls, client_id, correlation_id,
group, payloads):
"""
Encode some OffsetFetchRequest structs
Params
======
client_id: string
correlation_id: string
group: string, the consumer group you are fetching offsets for
payloads: list of OffsetFetchRequest
"""
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.OFFSET_FETCH_KEY)
message += write_short_string(group)
message += struct.pack('>i', len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_string(topic)
message += struct.pack('>i', len(topic_payloads))
for partition, payload in topic_payloads.items():
message += struct.pack('>i', partition)
return struct.pack('>i%ds' % len(message), len(message), message)
@classmethod
def decode_offset_fetch_response(cls, data):
"""
Decode bytes to an OffsetFetchResponse
Params
======
data: bytes to decode
"""
((correlation_id,), cur) = relative_unpack('>i', data, 0)
(client_id, cur) = read_short_string(data, cur)
((num_topics,), cur) = relative_unpack('>i', data, cur)
for i in range(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for i in range(num_partitions):
((partition, offset), cur) = relative_unpack('>iq', data, cur)
(metadata, cur) = read_short_string(data, cur)
((error,), cur) = relative_unpack('>h', data, cur)
yield OffsetFetchResponse(topic, partition, offset,
metadata, error)
def create_message(payload, key=None):
"""
Construct a Message
Params
======
payload: bytes, the payload to send to Kafka
key: bytes, a key used for partition routing (optional)
"""
return Message(0, 0, key, payload)
def create_gzip_message(payloads, key=None):
"""
Construct a Gzipped Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Params
======
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional)
"""
message_set = KafkaProtocol._encode_message_set(
[create_message(payload) for payload in payloads])
gzipped = gzip_encode(message_set)
codec = KafkaProtocol.ATTRIBUTE_CODEC_MASK & KafkaProtocol.CODEC_GZIP
return Message(0, 0x00 | codec, key, gzipped)
def create_snappy_message(payloads, key=None):
"""
Construct a Snappy Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Params
======
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional)
"""
message_set = KafkaProtocol._encode_message_set(
[create_message(payload) for payload in payloads])
snapped = snappy_encode(message_set)
codec = KafkaProtocol.ATTRIBUTE_CODEC_MASK & KafkaProtocol.CODEC_SNAPPY
return Message(0, 0x00 | codec, key, snapped)
| {
"content_hash": "aee892b39af079af79f3f153a9dd29f2",
"timestamp": "",
"source": "github",
"line_count": 560,
"max_line_length": 89,
"avg_line_length": 35.683928571428574,
"alnum_prop": 0.5543712155332032,
"repo_name": "spicavigo/kafka-python",
"id": "f98547909e95c0c1aeddcce6236b302abda76511",
"size": "19983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kafka/protocol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from collections import OrderedDict
from operator import attrgetter
from django.db import connections, transaction, IntegrityError
from django.db.models import signals, sql
from django.utils import six
class ProtectedError(IntegrityError):
def __init__(self, msg, protected_objects):
self.protected_objects = protected_objects
super(ProtectedError, self).__init__(msg, protected_objects)
def CASCADE(collector, field, sub_objs, using):
collector.collect(sub_objs, source=field.rel.to,
source_attr=field.name, nullable=field.null)
if field.null and not connections[using].features.can_defer_constraint_checks:
collector.add_field_update(field, None, sub_objs)
def PROTECT(collector, field, sub_objs, using):
raise ProtectedError("Cannot delete some instances of model '%s' because "
"they are referenced through a protected foreign key: '%s.%s'" % (
field.rel.to.__name__, sub_objs[0].__class__.__name__, field.name
),
sub_objs
)
def SET(value):
if callable(value):
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value(), sub_objs)
else:
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value, sub_objs)
set_on_delete.deconstruct = lambda: ('django.db.models.SET', (value,), {})
return set_on_delete
def SET_NULL(collector, field, sub_objs, using):
collector.add_field_update(field, None, sub_objs)
def SET_DEFAULT(collector, field, sub_objs, using):
collector.add_field_update(field, field.get_default(), sub_objs)
def DO_NOTHING(collector, field, sub_objs, using):
pass
class Collector(object):
def __init__(self, using):
self.using = using
# Initially, {model: set([instances])}, later values become lists.
self.data = {}
self.field_updates = {} # {model: {(field, value): set([instances])}}
# fast_deletes is a list of queryset-likes that can be deleted without
# fetching the objects into memory.
self.fast_deletes = []
# Tracks deletion-order dependency for databases without transactions
# or ability to defer constraint checks. Only concrete model classes
# should be included, as the dependencies exist only between actual
# database tables; proxy models are represented here by their concrete
# parent.
self.dependencies = {} # {model: set([models])}
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
"""
Adds 'objs' to the collection of objects to be deleted. If the call is
the result of a cascade, 'source' should be the model that caused it,
and 'nullable' should be set to True if the relation can be null.
Returns a list of all objects that were not already collected.
"""
if not objs:
return []
new_objs = []
if source is not None:
source = source._meta.concrete_model
concrete_model_objs = {}
for obj in objs:
model = obj.__class__
concrete_model = model._meta.concrete_model
concrete_model_objs.setdefault(concrete_model, {})
concrete_model_objs[concrete_model].setdefault(model, [])
concrete_model_objs[concrete_model][model].append(obj)
for concrete_model, model_objs in concrete_model_objs.iteritems():
for model, objs in model_objs.iteritems():
instances = self.data.setdefault(model, set())
for obj in objs:
if obj not in instances:
new_objs.append(obj)
instances.update(new_objs)
# Nullable relationships can be ignored -- they are nulled out before
# deleting, and therefore do not affect the order in which objects have
# to be deleted.
if source is not None and not nullable:
if reverse_dependency:
source_, concrete_model_ = concrete_model, source
else:
concrete_model_, source_ = concrete_model, source
self.dependencies.setdefault(
source_, set()).add(concrete_model_)
return new_objs
def add_field_update(self, field, value, objs):
"""
Schedules a field update. 'objs' must be a homogeneous iterable
collection of model instances (e.g. a QuerySet).
"""
if not objs:
return
concrete_model_objs = {}
for obj in objs:
model = obj.__class__
concrete_model = model._meta.concrete_model
concrete_model_objs.setdefault(concrete_model, {})
concrete_model_objs[concrete_model].setdefault(model, [])
concrete_model_objs[concrete_model][model].append(obj)
for concrete_model, model_objs in concrete_model_objs.iteritems():
for model, objs in model_objs.iteritems():
self.field_updates.setdefault(
model, {}).setdefault(
(field, value), set()).update(objs)
def can_fast_delete(self, objs, from_field=None):
"""
Determines if the objects in the given queryset-like can be
fast-deleted. This can be done if there are no cascades, no
parents and no signal listeners for the object class.
The 'from_field' tells where we are coming from - we need this to
determine if the objects are in fact to be deleted. Allows also
skipping parent -> child -> parent chain preventing fast delete of
the child.
"""
if from_field and from_field.rel.on_delete is not CASCADE:
return False
if not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete')):
return False
model = objs.model
if (signals.pre_delete.has_listeners(model)
or signals.post_delete.has_listeners(model)
or signals.m2m_changed.has_listeners(model)):
return False
# The use of from_field comes from the need to avoid cascade back to
# parent when parent delete is cascading to child.
opts = model._meta
if any(link != from_field for link in opts.concrete_model._meta.parents.values()):
return False
# Foreign keys pointing to this model, both from m2m and other
# models.
for related in opts.get_all_related_objects(
include_hidden=True, include_proxy_eq=True):
if related.field.rel.on_delete is not DO_NOTHING:
return False
for field in model._meta.virtual_fields:
if hasattr(field, 'bulk_related_objects'):
# It's something like generic foreign key.
return False
return True
def collect(self, objs, source=None, nullable=False, collect_related=True,
source_attr=None, reverse_dependency=False):
"""
Adds 'objs' to the collection of objects to be deleted as well as all
parent instances. 'objs' must be a homogeneous iterable collection of
model instances (e.g. a QuerySet). If 'collect_related' is True,
related objects will be handled by their respective on_delete handler.
If the call is the result of a cascade, 'source' should be the model
that caused it and 'nullable' should be set to True, if the relation
can be null.
If 'reverse_dependency' is True, 'source' will be deleted before the
current model, rather than after. (Needed for cascading to parent
models, the one case in which the cascade follows the forwards
direction of an FK rather than the reverse direction.)
"""
if self.can_fast_delete(objs):
self.fast_deletes.append(objs)
return
new_objs = self.add(objs, source, nullable,
reverse_dependency=reverse_dependency)
if not new_objs:
return
concrete_model_objs = {}
for obj in new_objs:
model = obj.__class__
concrete_model = model._meta.concrete_model
concrete_model_objs.setdefault(concrete_model, {})
concrete_model_objs[concrete_model].setdefault(model, [])
concrete_model_objs[concrete_model][model].append(obj)
for concrete_model, model_objs in concrete_model_objs.iteritems():
parent_objs = []
for model, new_objs in model_objs.iteritems():
# Recursively collect concrete model's parent models, but not their
# related objects. These will be found by meta.get_all_related_objects()
for ptr in six.itervalues(concrete_model._meta.parents):
if ptr:
# FIXME: This seems to be buggy and execute a query for each
# parent object fetch. We have the parent data in the obj,
# but we don't have a nice way to turn that data into parent
# object instance.
parent_objs += [getattr(obj, ptr.name) for obj in new_objs]
if parent_objs:
self.collect(parent_objs, source=model,
source_attr=ptr.rel.related_name,
collect_related=False,
reverse_dependency=True)
if collect_related:
for model, new_objs in model_objs.iteritems():
for related in model._meta.get_all_related_objects(
include_hidden=True, include_proxy_eq=True):
field = related.field
if field.rel.on_delete == DO_NOTHING:
continue
sub_objs = self.related_objects(related, new_objs)
if self.can_fast_delete(sub_objs, from_field=field):
self.fast_deletes.append(sub_objs)
elif sub_objs:
field.rel.on_delete(self, field, sub_objs, self.using)
for field in model._meta.virtual_fields:
if hasattr(field, 'bulk_related_objects'):
# Its something like generic foreign key.
sub_objs = field.bulk_related_objects(new_objs, self.using)
self.collect(sub_objs,
source=model,
source_attr=field.rel.related_name,
nullable=True)
def related_objects(self, related, objs):
"""
Gets a QuerySet of objects related to ``objs`` via the relation ``related``.
"""
return related.model._base_manager.using(self.using).filter(
**{"%s__in" % related.field.name: objs}
)
def instances_with_model(self):
for model, instances in six.iteritems(self.data):
for obj in instances:
yield model, obj
def sort(self):
sorted_models = []
concrete_models = set()
models = list(self.data)
while len(sorted_models) < len(models):
found = False
for model in models:
if model in sorted_models:
continue
dependencies = self.dependencies.get(model._meta.concrete_model)
if not (dependencies and dependencies.difference(concrete_models)):
sorted_models.append(model)
concrete_models.add(model._meta.concrete_model)
found = True
if not found:
return
self.data = OrderedDict((model, self.data[model])
for model in sorted_models)
def delete(self):
# sort instance collections
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer constraint checks until the
# end of a transaction.
self.sort()
with transaction.atomic(using=self.using, savepoint=False):
# send pre_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.pre_delete.send(
sender=model, instance=obj, using=self.using
)
# fast deletes
for qs in self.fast_deletes:
qs._raw_delete(using=self.using)
# update fields
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
query = sql.UpdateQuery(model)
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
# reverse instance collections
for instances in six.itervalues(self.data):
instances.reverse()
# delete instances
for model, instances in six.iteritems(self.data):
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
query.delete_batch(pk_list, self.using)
if not model._meta.auto_created:
for obj in instances:
signals.post_delete.send(
sender=model, instance=obj, using=self.using
)
# update collected instances
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
for obj in instances:
setattr(obj, field.attname, value)
for model, instances in six.iteritems(self.data):
for instance in instances:
setattr(instance, model._meta.pk.attname, None)
#from operator import attrgetter
#
#from django.db import connections, transaction, IntegrityError
#from django.db.models import signals, sql
#from django.utils.datastructures import SortedDict
#from django.utils import six
#
#
#class ProtectedError(IntegrityError):
# def __init__(self, msg, protected_objects):
# self.protected_objects = protected_objects
# super(ProtectedError, self).__init__(msg, protected_objects)
#
#
#def CASCADE(collector, field, sub_objs, using):
# collector.collect(sub_objs, source=field.rel.to,
# source_attr=field.name, nullable=field.null)
# if field.null and not connections[using].features.can_defer_constraint_checks:
# collector.add_field_update(field, None, sub_objs)
#
#
#def PROTECT(collector, field, sub_objs, using):
# raise ProtectedError("Cannot delete some instances of model '%s' because "
# "they are referenced through a protected foreign key: '%s.%s'" % (
# field.rel.to.__name__, sub_objs[0].__class__.__name__, field.name
# ),
# sub_objs
# )
#
#
#def SET(value):
# if callable(value):
# def set_on_delete(collector, field, sub_objs, using):
# collector.add_field_update(field, value(), sub_objs)
# else:
# def set_on_delete(collector, field, sub_objs, using):
# collector.add_field_update(field, value, sub_objs)
# return set_on_delete
#
#
#SET_NULL = SET(None)
#
#
#def SET_DEFAULT(collector, field, sub_objs, using):
# collector.add_field_update(field, field.get_default(), sub_objs)
#
#
#def DO_NOTHING(collector, field, sub_objs, using):
# pass
#
#
#class Collector(object):
# def __init__(self, using):
# self.using = using
# # Initially, {model: set([instances])}, later values become lists.
# self.data = {}
# self.field_updates = {} # {model: {(field, value): set([instances])}}
# # fast_deletes is a list of queryset-likes that can be deleted without
# # fetching the objects into memory.
# self.fast_deletes = []
#
# # Tracks deletion-order dependency for databases without transactions
# # or ability to defer constraint checks. Only concrete model classes
# # should be included, as the dependencies exist only between actual
# # database tables; proxy models are represented here by their concrete
# # parent.
# self.dependencies = {} # {model: set([models])}
#
# def add(self, objs, source=None, nullable=False, reverse_dependency=False):
# """
# Adds 'objs' to the collection of objects to be deleted. If the call is
# the result of a cascade, 'source' should be the model that caused it,
# and 'nullable' should be set to True if the relation can be null.
#
# Returns a list of all objects that were not already collected.
# """
# if not objs:
# return []
# new_objs = []
# model = objs[0].__class__
# instances = self.data.setdefault(model, set())
# for obj in objs:
# if obj not in instances:
# new_objs.append(obj)
# instances.update(new_objs)
# # Nullable relationships can be ignored -- they are nulled out before
# # deleting, and therefore do not affect the order in which objects have
# # to be deleted.
# if source is not None and not nullable:
# if reverse_dependency:
# source, model = model, source
# self.dependencies.setdefault(
# source._meta.concrete_model, set()).add(model._meta.concrete_model)
# return new_objs
#
# def add_field_update(self, field, value, objs):
# """
# Schedules a field update. 'objs' must be a homogenous iterable
# collection of model instances (e.g. a QuerySet).
# """
# if not objs:
# return
# model = objs[0].__class__
# self.field_updates.setdefault(
# model, {}).setdefault(
# (field, value), set()).update(objs)
#
# def can_fast_delete(self, objs, from_field=None):
# """
# Determines if the objects in the given queryset-like can be
# fast-deleted. This can be done if there are no cascades, no
# parents and no signal listeners for the object class.
#
# The 'from_field' tells where we are coming from - we need this to
# determine if the objects are in fact to be deleted. Allows also
# skipping parent -> child -> parent chain preventing fast delete of
# the child.
# """
# if from_field and from_field.rel.on_delete is not CASCADE:
# return False
# if not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete')):
# return False
# model = objs.model
# if (signals.pre_delete.has_listeners(model)
# or signals.post_delete.has_listeners(model)
# or signals.m2m_changed.has_listeners(model)):
# return False
# # The use of from_field comes from the need to avoid cascade back to
# # parent when parent delete is cascading to child.
# opts = model._meta
# if any(link != from_field for link in opts.concrete_model._meta.parents.values()):
# return False
# # Foreign keys pointing to this model, both from m2m and other
# # models.
# for related in opts.get_all_related_objects(
# include_hidden=True, include_proxy_eq=True):
# if related.field.rel.on_delete is not DO_NOTHING:
# return False
# # GFK deletes
# for relation in opts.many_to_many:
# if not relation.rel.through:
# return False
# return True
#
# def collect(self, objs, source=None, nullable=False, collect_related=True,
# source_attr=None, reverse_dependency=False):
# """
# Adds 'objs' to the collection of objects to be deleted as well as all
# parent instances. 'objs' must be a homogenous iterable collection of
# model instances (e.g. a QuerySet). If 'collect_related' is True,
# related objects will be handled by their respective on_delete handler.
#
# If the call is the result of a cascade, 'source' should be the model
# that caused it and 'nullable' should be set to True, if the relation
# can be null.
#
# If 'reverse_dependency' is True, 'source' will be deleted before the
# current model, rather than after. (Needed for cascading to parent
# models, the one case in which the cascade follows the forwards
# direction of an FK rather than the reverse direction.)
# """
# if self.can_fast_delete(objs):
# self.fast_deletes.append(objs)
# return
# new_objs = self.add(objs, source, nullable,
# reverse_dependency=reverse_dependency)
# if not new_objs:
# return
#
# model = new_objs[0].__class__
#
# # Recursively collect concrete model's parent models, but not their
# # related objects. These will be found by meta.get_all_related_objects()
# concrete_model = model._meta.concrete_model
# for ptr in six.itervalues(concrete_model._meta.parents):
# if ptr:
# # FIXME: This seems to be buggy and execute a query for each
# # parent object fetch. We have the parent data in the obj,
# # but we don't have a nice way to turn that data into parent
# # object instance.
# parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
# self.collect(parent_objs, source=model,
# source_attr=ptr.rel.related_name,
# collect_related=False,
# reverse_dependency=True)
#
# if collect_related:
# for related in model._meta.get_all_related_objects(
# include_hidden=True, include_proxy_eq=True):
# field = related.field
# if field.rel.on_delete == DO_NOTHING:
# continue
# sub_objs = self.related_objects(related, new_objs)
# if self.can_fast_delete(sub_objs, from_field=field):
# self.fast_deletes.append(sub_objs)
# elif sub_objs:
# field.rel.on_delete(self, field, sub_objs, self.using)
# for field in model._meta.virtual_fields:
# if hasattr(field, 'bulk_related_objects'):
# # Its something like generic foreign key.
# sub_objs = field.bulk_related_objects(new_objs, self.using)
# self.collect(sub_objs,
# source=model,
# source_attr=field.rel.related_name,
# nullable=True)
#
# def related_objects(self, related, objs):
# """
# Gets a QuerySet of objects related to ``objs`` via the relation ``related``.
#
# """
# return related.model._base_manager.using(self.using).filter(
# **{"%s__in" % related.field.name: objs}
# )
#
# def instances_with_model(self):
# for model, instances in six.iteritems(self.data):
# for obj in instances:
# yield model, obj
#
# def sort(self):
# sorted_models = []
# concrete_models = set()
# models = list(self.data)
# while len(sorted_models) < len(models):
# found = False
# for model in models:
# if model in sorted_models:
# continue
# dependencies = self.dependencies.get(model._meta.concrete_model)
# if not (dependencies and dependencies.difference(concrete_models)):
# sorted_models.append(model)
# concrete_models.add(model._meta.concrete_model)
# found = True
# if not found:
# return
# self.data = SortedDict([(model, self.data[model])
# for model in sorted_models])
#
# def delete(self):
# # sort instance collections
# for model, instances in self.data.items():
# self.data[model] = sorted(instances, key=attrgetter("pk"))
#
# # if possible, bring the models in an order suitable for databases that
# # don't support transactions or cannot defer constraint checks until the
# # end of a transaction.
# self.sort()
#
# with transaction.commit_on_success_unless_managed(using=self.using):
# # send pre_delete signals
# for model, obj in self.instances_with_model():
# if not model._meta.auto_created:
# signals.pre_delete.send(
# sender=model, instance=obj, using=self.using
# )
#
# # fast deletes
# for qs in self.fast_deletes:
# qs._raw_delete(using=self.using)
#
# # update fields
# for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
# query = sql.UpdateQuery(model)
# for (field, value), instances in six.iteritems(instances_for_fieldvalues):
# query.update_batch([obj.pk for obj in instances],
# {field.name: value}, self.using)
#
# # reverse instance collections
# for instances in six.itervalues(self.data):
# instances.reverse()
#
# # delete instances
# for model, instances in six.iteritems(self.data):
# query = sql.DeleteQuery(model)
# pk_list = [obj.pk for obj in instances]
# query.delete_batch(pk_list, self.using)
#
# if not model._meta.auto_created:
# for obj in instances:
# signals.post_delete.send(
# sender=model, instance=obj, using=self.using
# )
#
# # update collected instances
# for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
# for (field, value), instances in six.iteritems(instances_for_fieldvalues):
# for obj in instances:
# setattr(obj, field.attname, value)
# for model, instances in six.iteritems(self.data):
# for instance in instances:
# setattr(instance, model._meta.pk.attname, None)
# | {
"content_hash": "cfb34114536100ebf9faf5e06d702294",
"timestamp": "",
"source": "github",
"line_count": 628,
"max_line_length": 91,
"avg_line_length": 43.04936305732484,
"alnum_prop": 0.577510634362863,
"repo_name": "atruberg/django-custom",
"id": "9d08786cf3a75f91004b19f3667ca938352bb53b",
"size": "27035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/db/models/deletion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "51013"
},
{
"name": "JavaScript",
"bytes": "98272"
},
{
"name": "Python",
"bytes": "8636914"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
} |
import gc
import webrepl
import my_network
my_network.connect()
webrepl.start()
gc.collect()
| {
"content_hash": "fb8bba418d5bc70abb185f996a26d580",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 20,
"avg_line_length": 10.666666666666666,
"alnum_prop": 0.7604166666666666,
"repo_name": "NSE-labs/ESP8266-wifi-devices",
"id": "f2f8f84999db031ba424b29abef32ee4ff1c9520",
"size": "171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MyLib/boot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26240"
}
],
"symlink_target": ""
} |
"""
py2exe conversion script
"""
from distutils.core import setup
import py2exe
setup(windows=[{"script":"main.py"}], options={"py2exe":{"includes":["sip"]}}) | {
"content_hash": "918cc51e97ca3d4efe23a99fa00004d1",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 78,
"avg_line_length": 20,
"alnum_prop": 0.6875,
"repo_name": "AeroNotix/pdftotif",
"id": "2ec2437802b5f0fa1c206616bcb670c87dced382",
"size": "160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "65865"
}
],
"symlink_target": ""
} |
"""A module that implements the "theanolm decode" command.
"""
import gc
import sys
import os
import logging
import numpy
import theano
from theanolm import Network
from theanolm.backend import TextFileType
from theanolm.backend import get_default_device, log_free_mem
from theanolm.scoring import LatticeBatch, LatticeDecoder, RescoredLattice
def add_arguments(parser):
"""Specifies the command line arguments supported by the "theanolm decode"
command.
:type parser: argparse.ArgumentParser
:param parser: a command line argument parser
"""
argument_group = parser.add_argument_group("files")
argument_group.add_argument(
'model_path', metavar='MODEL-FILE', type=str,
help='the model file that will be used to compute new word scores')
argument_group.add_argument(
'--lattices', metavar='FILE', type=str, nargs='*', default=[],
help='word lattices to be decoded (default stdin, assumed to be '
'compressed if the name ends in ".gz")')
argument_group.add_argument(
'--lattice-list', metavar='FILE', type=TextFileType('r'),
help='text file containing a list of word lattices to be decoded (one '
'path per line, the list and the lattice files are assumed to be '
'compressed if the name ends in ".gz")')
argument_group.add_argument(
'--lattice-format', metavar='FORMAT', type=str, default='slf',
choices=['slf', 'kaldi'],
help='format of the lattice files, either "slf" (HTK format, default) '
'or "kaldi" (a Kaldi lattice archive containing text '
'CompactLattices')
argument_group.add_argument(
'--kaldi-vocabulary', metavar='FILE', type=TextFileType('r'),
default=None,
help='mapping of words to word IDs in Kaldi lattices (usually '
'named words.txt)')
argument_group.add_argument(
'--output-file', metavar='FILE', type=TextFileType('w'), default='-',
help='where to write the best paths through the lattices or the '
'rescored lattice (default stdout, will be compressed if the name '
'ends in ".gz")')
argument_group.add_argument(
'--num-jobs', metavar='J', type=int, default=1,
help='divide the set of lattice files into J distinct batches, and '
'process only batch I')
argument_group.add_argument(
'--job', metavar='I', type=int, default=0,
help='the index of the batch that this job should process, between 0 '
'and J-1')
argument_group = parser.add_argument_group("decoding")
argument_group.add_argument(
'--output', metavar='FORMAT', type=str, default='ref',
choices=['ref', 'trn', 'full', 'slf', 'kaldi'],
help='format of the output, one of "ref" (default, utterance ID '
'followed by words), "trn" (words followed by utterance ID in '
'parentheses), "full" (utterance ID, acoustic score, language '
'score, and number of words, followed by words), "slf" (rescored '
'lattice in HTK format), "kaldi" (rescored lattice in Kaldi '
'format)')
argument_group.add_argument(
'--n-best', metavar='N', type=int, default=1,
help='print N best paths of each lattice (default 1)')
argument_group.add_argument(
'--nnlm-weight', metavar='LAMBDA', type=float, default=1.0,
help="language model probabilities given by the model read from "
"MODEL-FILE will be weighted by LAMBDA, when interpolating with "
"the language model probabilities in the lattice (default is 1.0, "
"meaning that the LM probabilities in the lattice will be "
"ignored)")
argument_group.add_argument(
'--lm-scale', metavar='LMSCALE', type=float, default=None,
help="scale language model log probabilities by LMSCALE when computing "
"the total probability of a path (default is to use the LM scale "
"specified in the lattice file, or 1.0 if not specified)")
argument_group.add_argument(
'--wi-penalty', metavar='WIP', type=float, default=None,
help="penalize word insertion by adding WIP to the total log "
"probability as many times as there are words in the path "
"(without scaling WIP by LMSCALE)")
argument_group.add_argument(
'--log-base', metavar='B', type=int, default=None,
help="convert output log probabilities to base B and WIP from base B "
"(default is natural logarithm; this does not affect reading "
"lattices, since they specify their internal log base)")
argument_group.add_argument(
'--unk-penalty', metavar='LOGPROB', type=float, default=None,
help="use constant LOGPROB as <unk> token score (default is to use the "
"network to predict <unk> probability)")
argument_group.add_argument(
'--shortlist', action="store_true",
help='distribute <unk> token probability among the out-of-shortlist '
'words according to their unigram frequencies in the training '
'data')
argument_group.add_argument(
'--unk-from-lattice', action="store_true",
help='use only the probability from the lattice for <unk> tokens')
argument_group.add_argument(
'--linear-interpolation', action="store_true",
help="use linear interpolation of language model probabilities, "
"instead of (pseudo) log-linear")
argument_group = parser.add_argument_group("pruning")
argument_group.add_argument(
'--max-tokens-per-node', metavar='T', type=int, default=None,
help="keep only at most T tokens at each node when decoding a lattice "
"(default is no limit)")
argument_group.add_argument(
'--beam', metavar='B', type=float, default=None,
help="prune tokens whose log probability is at least B smaller than "
"the log probability of the best token at any given time (default "
"is no beam pruning)")
argument_group.add_argument(
'--recombination-order', metavar='O', type=int, default=None,
help="keep only the best token, when at least O previous words are "
"identical (default is to recombine tokens only if the entire "
"word history matches)")
argument_group.add_argument(
'--prune-relative', metavar='R', type=int, default=None,
help="if set, tighten the beam and the max-tokens-per-node pruning "
"linearly in the number of tokens in a node; those parameters "
"will be divided by the number of tokens and multiplied by R")
argument_group.add_argument(
'--abs-min-max-tokens', metavar='T', type=float, default=30,
help="if prune-extra-limit is used, do not tighten max-tokens-per-node "
"further than this (default is 30)")
argument_group.add_argument(
'--abs-min-beam', metavar='B', type=float, default=150,
help="if prune-extra-limit is used, do not tighten the beam further "
"than this (default is 150)")
argument_group = parser.add_argument_group("configuration")
argument_group.add_argument(
'--default-device', metavar='DEVICE', type=str, default=None,
help='when multiple GPUs are present, use DEVICE as default')
argument_group = parser.add_argument_group("logging and debugging")
argument_group.add_argument(
'--log-file', metavar='FILE', type=str, default='-',
help='path where to write log file (default is standard output)')
argument_group.add_argument(
'--log-level', metavar='LEVEL', type=str, default='info',
help='minimum level of events to log, one of "debug", "info", "warn" '
'(default "info")')
argument_group.add_argument(
'--debug', action="store_true",
help='enables debugging Theano errors')
argument_group.add_argument(
'--profile', action="store_true",
help='enables profiling Theano functions')
def decode(args):
"""A function that performs the "theanolm decode" command.
:type args: argparse.Namespace
:param args: a collection of command line arguments
"""
log_file = args.log_file
log_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(log_level, int):
print("Invalid logging level requested:", args.log_level,
file=sys.stderr)
sys.exit(1)
log_format = '%(asctime)s %(funcName)s: %(message)s'
if args.log_file == '-':
logging.basicConfig(stream=sys.stdout, format=log_format, level=log_level)
else:
logging.basicConfig(filename=log_file, format=log_format, level=log_level)
if args.debug:
theano.config.compute_test_value = 'warn'
else:
theano.config.compute_test_value = 'off'
theano.config.profile = args.profile
theano.config.profile_memory = args.profile
if (args.lattice_format == 'kaldi') or (args.output == 'kaldi'):
if args.kaldi_vocabulary is None:
print("Kaldi lattice vocabulary is not given.", file=sys.stderr)
sys.exit(1)
default_device = get_default_device(args.default_device)
network = Network.from_file(args.model_path,
mode=Network.Mode(minibatch=False),
default_device=default_device)
log_scale = 1.0 if args.log_base is None else numpy.log(args.log_base)
if (args.log_base is not None) and (args.lattice_format == 'kaldi'):
logging.info("Warning: Kaldi lattice reader doesn't support logarithm "
"base conversion.")
if args.wi_penalty is None:
wi_penalty = None
else:
wi_penalty = args.wi_penalty * log_scale
decoding_options = {
'nnlm_weight': args.nnlm_weight,
'lm_scale': args.lm_scale,
'wi_penalty': wi_penalty,
'unk_penalty': args.unk_penalty,
'use_shortlist': args.shortlist,
'unk_from_lattice': args.unk_from_lattice,
'linear_interpolation': args.linear_interpolation,
'max_tokens_per_node': args.max_tokens_per_node,
'beam': args.beam,
'recombination_order': args.recombination_order,
'prune_relative': args.prune_relative,
'abs_min_max_tokens': args.abs_min_max_tokens,
'abs_min_beam': args.abs_min_beam
}
logging.debug("DECODING OPTIONS")
for option_name, option_value in decoding_options.items():
logging.debug("%s: %s", option_name, str(option_value))
logging.info("Building word lattice decoder.")
decoder = LatticeDecoder(network, decoding_options)
batch = LatticeBatch(args.lattices, args.lattice_list, args.lattice_format,
args.kaldi_vocabulary, args.num_jobs, args.job)
for lattice_number, lattice in enumerate(batch):
if lattice.utterance_id is None:
lattice.utterance_id = str(lattice_number)
logging.info("Utterance `%s´ -- %d of job %d",
lattice.utterance_id,
lattice_number + 1,
args.job)
log_free_mem()
final_tokens, recomb_tokens = decoder.decode(lattice)
if (args.output == "slf") or (args.output == "kaldi"):
rescored_lattice = RescoredLattice(lattice,
final_tokens,
recomb_tokens,
network.vocabulary)
rescored_lattice.lm_scale = args.lm_scale
rescored_lattice.wi_penalty = args.wi_penalty
if args.output == "slf":
rescored_lattice.write_slf(args.output_file)
else:
assert args.output == "kaldi"
rescored_lattice.write_kaldi(args.output_file,
batch.kaldi_word_to_id)
else:
for token in final_tokens[:min(args.n_best, len(final_tokens))]:
line = format_token(token,
lattice.utterance_id,
network.vocabulary,
log_scale,
args.output)
args.output_file.write(line + "\n")
gc.collect()
def format_token(token, utterance_id, vocabulary, log_scale, output_format):
"""Formats an output line from a token and an utterance ID.
Reads word IDs from the history list of ``token`` and converts them to words
using ``vocabulary``. The history may contain also OOV words as text, so any
``str`` will be printed literally.
:type token: Token
:param token: a token whose history will be formatted
:type utterance_id: str
:param utterance_id: utterance ID for full output
:type vocabulary: Vocabulary
:param vocabulary: mapping from word IDs to words
:type log_scale: float
:param log_scale: divide log probabilities by this number to convert the log
base
:type output_format: str
:param output_format: which format to write, one of "ref" (utterance ID,
words), "trn" (words, utterance ID in parentheses), "full" (utterance
ID, acoustic and LM scores, number of words, words)
:rtype: str
:returns: the formatted output line
"""
words = token.history_words(vocabulary)
if output_format == 'ref':
return "{} {}".format(utterance_id, ' '.join(words))
elif output_format == 'trn':
return "{} ({})".format(' '.join(words), utterance_id)
elif output_format == 'full':
return "{} {} {} {} {} {}".format(
utterance_id,
token.ac_logprob / log_scale,
token.graph_logprob / log_scale,
token.total_logprob / log_scale,
len(words),
' '.join(words))
else:
print("Invalid output format requested:", args.output)
sys.exit(1)
| {
"content_hash": "9caee970a425d5674306ea398d9018a4",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 82,
"avg_line_length": 45.60967741935484,
"alnum_prop": 0.6134097178018247,
"repo_name": "senarvi/theanolm",
"id": "b2a3ee1a654e8c7ec2e167366b4ab6414e3e73f5",
"size": "14187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "theanolm/commands/decode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "615374"
},
{
"name": "Shell",
"bytes": "66403"
}
],
"symlink_target": ""
} |
import logging
import random
from google.appengine.ext import webapp
from common import const, framework
class Index(framework.BaseRequestHandler):
def get(self):
msg = self.request.get('msg')
msg_type = self.request.get('msg_type', 'info')
self.render(['index', 'indexAbout'], locals())
class AboutProfile(framework.BaseRequestHandler):
def get(self):
msg = self.request.get('msg')
msg_type = self.request.get('msg_type', 'info')
self.render(['about', 'aboutProfile'], locals())
class AboutWorld(framework.BaseRequestHandler):
def get(self):
msg = self.request.get('msg')
msg_type = self.request.get('msg_type', 'info')
self.render(['about', 'aboutWorld'], locals())
# Map URLs to our RequestHandler classes above
_URLS = [
('^/about/?', Index),
('^/about/profile/?', AboutProfile),
('^/about/world/?', AboutWorld),
]
def main():
if not random.randint(0, 25):
framework.profile_main(_URLS)
else:
framework.real_main(_URLS)
if __name__ == '__main__':
main()
| {
"content_hash": "cfa35c598a0b21ba3dc5d7228f17ed67",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 50,
"avg_line_length": 24.4390243902439,
"alnum_prop": 0.6786427145708582,
"repo_name": "PatrickKennedy/Sybil",
"id": "544d34863e0252d57fc5e1140ed3bc996a0b0354",
"size": "2417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/about.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "96879"
},
{
"name": "Python",
"bytes": "2126986"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from .validators import has_valid_date
@python_2_unicode_compatible
class MaintenanceMessage(models.Model):
planned_date = models.DateTimeField(validators=[has_valid_date])
reason = models.CharField(max_length=100)
def __str__(self):
return "Planned: %s\nReason: %s" % (
self.planned_date, self.reason)
| {
"content_hash": "025a35782981e4c7327a2d68a0b1ed09",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 68,
"avg_line_length": 29.4375,
"alnum_prop": 0.721868365180467,
"repo_name": "MattBlack85/django-about",
"id": "68e9b9a9e6218259b8cb4a41af1bac4d47c6c05a",
"size": "471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djabout/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5714"
}
],
"symlink_target": ""
} |
from logging import debug
from typing import Dict, List
from gitlabform.gitlab import GitLab
from gitlabform.processors.abstract_processor import AbstractProcessor
class SchedulesProcessor(AbstractProcessor):
def __init__(self, gitlab: GitLab):
super().__init__("schedules", gitlab)
def _process_configuration(self, project_and_group: str, configuration: dict):
existing_schedules = self.gitlab.get_all_pipeline_schedules(project_and_group)
schedule_ids_by_description = self.__group_schedule_ids_by_description(
existing_schedules
)
for schedule_description in sorted(configuration["schedules"]):
schedule_ids = schedule_ids_by_description.get(schedule_description)
if configuration.get("schedules|" + schedule_description + "|delete"):
if schedule_ids:
debug("Deleting pipeline schedules '%s'", schedule_description)
for schedule_id in schedule_ids:
self.gitlab.delete_pipeline_schedule(
project_and_group, schedule_id
)
else:
debug(
"Not deleting pipeline schedules '%s', because none exist",
schedule_description,
)
else:
if schedule_ids and len(schedule_ids) == 1:
debug(
"Changing existing pipeline schedule '%s'", schedule_description
)
updated_schedule = self.gitlab.update_pipeline_schedule(
project_and_group,
schedule_ids[0],
configuration.get("schedules|" + schedule_description),
)
self.gitlab.take_ownership(
project_and_group, updated_schedule["id"]
)
self.__set_schedule_variables(
project_and_group,
updated_schedule.get("id"),
configuration.get(
"schedules|" + schedule_description + "|variables"
),
)
elif schedule_ids:
debug(
"Replacing existing pipeline schedules '%s'",
schedule_description,
)
for schedule_id in schedule_ids:
self.gitlab.delete_pipeline_schedule(
project_and_group, schedule_id
)
self.create_schedule_with_variables(
configuration, project_and_group, schedule_description
)
else:
debug("Creating pipeline schedule '%s'", schedule_description)
self.create_schedule_with_variables(
configuration, project_and_group, schedule_description
)
def create_schedule_with_variables(
self, configuration, project_and_group, schedule_description
):
data = configuration.get("schedules|" + schedule_description)
created_schedule = self.gitlab.create_pipeline_schedule(
project_and_group,
schedule_description,
data.get("ref"),
data.get("cron"),
optional_data=data,
)
self.__set_schedule_variables(
project_and_group,
created_schedule.get("id"),
configuration.get("schedules|" + schedule_description + "|variables"),
)
def __set_schedule_variables(self, project_and_group, schedule_id, variables):
schedule = self.gitlab.get_pipeline_schedule(project_and_group, schedule_id)
existing_variables = schedule.get("variables")
if existing_variables:
debug(
"Deleting variables for pipeline schedule '%s'", schedule["description"]
)
for variable in existing_variables:
self.gitlab.delete_pipeline_schedule_variable(
project_and_group, schedule_id, variable.get("key")
)
if variables:
for variable_key, variable_data in variables.items():
self.gitlab.create_pipeline_schedule_variable(
project_and_group,
schedule_id,
variable_key,
variable_data.get("value"),
variable_data,
)
@staticmethod
def __group_schedule_ids_by_description(schedules) -> Dict[str, List[str]]:
schedule_ids_by_description: Dict[str, List[str]] = {}
for schedule in schedules:
description = schedule["description"]
schedule_ids_by_description.setdefault(description, []).append(
schedule["id"]
)
return schedule_ids_by_description
| {
"content_hash": "9087f707e4f6f37b98868ba78dc8bd56",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 88,
"avg_line_length": 41.57377049180328,
"alnum_prop": 0.5264195583596214,
"repo_name": "egnyte/gitlabform",
"id": "cd3e5e6a23d8efa6700bd4461f740e5e339bea7c",
"size": "5072",
"binary": false,
"copies": "1",
"ref": "refs/heads/dependabot/pip/coverage-6.2",
"path": "gitlabform/processors/project/schedules_processor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "591"
},
{
"name": "Python",
"bytes": "137279"
},
{
"name": "Shell",
"bytes": "3171"
}
],
"symlink_target": ""
} |
from app.configuration import get_value
from app.helper import output_start, php, output_error
def execute():
output_start('phpmetrics')
metric_dir = get_value('metric-dir')
scan_dir = get_value('project-dir')+get_value('scan-dir')
excludes = '|'.join(get_value('exclude-dirs'))
if excludes != '':
excludes = '--excluded-dirs="'+excludes+'"'
print('>>> Metric dir: '+metric_dir)
print('>>> Excludes: '+excludes)
code = php('phpmetrics.phar', '--extensions=php --report-xml='+metric_dir+'phpmetrics.xml --report-html='+metric_dir+'phpmetrics.html '+excludes+' '+scan_dir)
if code != 0:
output_error('There was a error/exception while executing phpmetrics.')
| {
"content_hash": "05751fe32639d55a4fd0bd1b8bcc1a94",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 162,
"avg_line_length": 34.095238095238095,
"alnum_prop": 0.6508379888268156,
"repo_name": "mi-schi/php-code-checker",
"id": "71b690ce2a57d0238e47aa8bf069c4890f702443",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/metric/phpmetrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16348"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import numpy as np
import os
from numpy import r_
from math import ceil
from viz_utils import saveCurrentPlot, colorForLabel, nameFromDir
from ..utils import sequence as seq
# from ..utils.arrays import meanNormalizeCols, stdNormalizeCols, zNormalizeCols
from ..transforms import *
from ..datasets import read_ucr as ucr
UCR_DATASETS_DIR = "~/Desktop/datasets/ucr_data"
# ================================================================ Utils
def getAllUCRDatasetDirs():
datasetsPath = os.path.expanduser(UCR_DATASETS_DIR)
files = os.listdir(datasetsPath)
for i in range(len(files)):
files[i] = os.path.join(datasetsPath, files[i])
dirs = filter(os.path.isdir, files)
return dirs
def ensureDirExists(dir):
dirPath = os.path.expanduser(dir)
if not os.path.exists(dirPath):
os.mkdir(dirPath)
def znormalize(x):
std = np.std(x)
if std == 0:
return x # don't die for constant arrays
return (x - np.mean(x)) / std
# ================================================================ Plotting
def plotExamples(X, Y, maxPerClass, normalize, transforms):
numPlottedForClasses = np.arange(max(Y) + 1)
plt.figure()
for row in range(X.shape[0]):
# only plot a fixed number of examples of each
# class so that the plots come out legible
lbl = Y[row]
if (numPlottedForClasses[lbl] > maxPerClass):
continue
numPlottedForClasses[lbl] += 1
# possibly z-normalize
data = X[row, :]
if normalize:
data = znormalize(data)
if transforms:
for transform in transforms:
data = transform(data)
plt.plot(data, color=colorForLabel(lbl))
def imgExamples(X, Y, maxPerClass=np.inf, transforms=None):
MAX_EXAMPLES = 2000
EXAMPLES_PER_COL = 400
DEFAULT_INTER_CLASS_PADDING = 1
FIG_HEIGHT = 10
FIG_WIDTH = 12
# if input too large, resample X
if len(X) > MAX_EXAMPLES:
idxs = np.linspace(0,len(X) - 1, MAX_EXAMPLES, dtype=np.int)
X = X[idxs, :]
# apply transforms, if present
Xnew = []
if transforms and len(transforms):
for row in range(len(X)):
Xt = X[row, :]
for transform in transforms:
Xt = transform(Xt)
Xnew.append(Xt)
X = np.array(Xnew)
# x stats
minVal = np.min(X)
maxVal = np.max(X)
exampleLen = len(X[0])
# y stats
classes = seq.uniqueElements(Y)
classes = sorted(classes)
numClasses = len(classes)
# figure out how many columns to have
fractionalCols = len(X) / float(EXAMPLES_PER_COL)
numCols = max(1, int(ceil(fractionalCols)))
# create padding to visually separate clases--thicker if it
# falls on column boundaries
paddingWidth = DEFAULT_INTER_CLASS_PADDING
paddingVal = minVal
if (numCols >= numClasses) and (numCols % numClasses == 0):
paddingWidth *= 5
padding = np.zeros((paddingWidth, exampleLen)) + paddingVal
# split rows of X by class label
examplesForClasses = seq.splitElementsBy(lambda i, el: Y[i], X)
# concatenate all the data for each class, separated by a
# few empty rows so we can distinguish the classes
examplesWithPadding = np.zeros((1, exampleLen))
for lbl in sorted(examplesForClasses.keys()):
examples = np.asarray(examplesForClasses[lbl])
examplesWithPadding = np.r_[examplesWithPadding,
examples, padding]
examplesWithPadding = examplesWithPadding[:(-paddingWidth), :]
# split data into columns so we can see it better
if numCols > 1:
totalRows = len(examplesWithPadding)
fig, axes = plt.subplots(1, numCols, figsize=(FIG_WIDTH, FIG_HEIGHT))
for i, ax, in enumerate(axes):
startIdx = int(totalRows / numCols * i)
endIdx = int(totalRows / numCols * (i + 1))
subset = examplesWithPadding[startIdx:endIdx, :]
# hack for same scale everywhere
subset[0, 0] = minVal
subset[-1, -1] = maxVal
ax.imshow(subset, aspect='auto')
ax.set_yticklabels(())
else:
plt.figure(figsize=(FIG_WIDTH, FIG_HEIGHT))
plt.imshow(examplesWithPadding, aspect='auto')
plt.tight_layout()
# plt.show()
def plotDataset(X, Y, name, img=False, normalizeCols=False,
maxPerClass=np.inf, transforms=None):
print("plotting dataset: " + name + "...")
if normalizeCols:
X = meanNormalizeCols(X)
if img:
imgExamples(X, Y, maxPerClass, transforms)
plt.suptitle(name)
else:
plotExamples(X, Y, maxPerClass, normalize, transforms)
plt.title(name)
suffix = ""
subdir = ""
if img:
suffix += "_img"
subdir += "_img"
if normalizeCols:
suffix += "_colnormalized"
subdir += "_colnormalized"
if maxPerClass < np.inf:
suffix += "_" + str(maxPerClass)
subdir += "_" + str(maxPerClass)
if transforms:
for transform in transforms:
name = '_' + str(transform.__name__)
suffix += name
subdir += name
saveCurrentPlot(name, suffix=suffix, subdir=subdir)
def plotDatasetInDir(datasetDir, img=False, normalizeCols=False,
maxPerClass=np.inf, transforms=None):
# superimpose all the data in one plot, color-coded by class
X, Y = readAllData(datasetDir)
plotDataset(X, Y, nameFromDir(datasetDir), img, normalizeCols,
maxPerClass, transforms)
# ================================================================ Main
def main():
# d = '/Users/davis/Desktop/datasets/ucr_data/ItalyPowerDemand'
# plotDatasetInDir(d, img=True)
# # plotDatasetInDir(d, img=True, transforms=[downsample8, znormalize, sax8])
# return
# for datasetDir in getAllUCRDatasetDirs():
# for i, datasetDir in enumerate(getAllUCRDatasetDirs()):
for i, dataset in enumerate(ucr.getAllUCRDatasets()):
# plotDatasetInDir(datasetDir,normalize=True)
# plotDatasetInDir(datasetDir,normalize=False, maxPerClass=20)
# plotDatasetInDir(datasetDir,normalize=True, maxPerClass=20)
# plotDatasetInDir(datasetDir,normalize=True,
# normalizeCols=True, maxPerClass=20)
# plotDatasetInDir(datasetDir, normalize=True, transforms=[fftMag])
# plotDatasetInDir(datasetDir, transforms=[fftMag], maxPerClass=20)
# plotDatasetInDir(datasetDir, transforms=[fftPhase])
# plotDatasetInDir(datasetDir, img=True)
# plotDatasetInDir(datasetDir, img=True, transforms=[znormalize])
# plotDatasetInDir(datasetDir, img=True, transforms=[znormalize, np.cumsum])
# plotDatasetInDir(datasetDir, normalizeCols=True, img=True,
# transforms=[znormalize])
# plotDatasetInDir(datasetDir, img=True, transforms=[znormalize, firstDeriv])
# plotDatasetInDir(datasetDir, img=True, transforms=[firstDeriv, znormalize])
# if i > 5: return
# plotDatasetInDir(datasetDir, img=True, transforms=[downsample2, znormalize])
# plotDatasetInDir(datasetDir, img=True, transforms=[downsample4, znormalize])
# plotDatasetInDir(datasetDir, img=True, transforms=[downsample8, znormalize])
# plotDatasetInDir(datasetDir, img=True, transforms=[downsample4, znormalize, sax8])
# plotDatasetInDir(datasetDir, img=True, transforms=[downsample8, znormalize, sax8])
X = np.vstack((dataset.Xtrain, dataset.Xtest))
Y = np.hstack((dataset.Ytrain, dataset.Ytest))
plotDataset(X, Y, dataset.name, img=True, transforms=[resampleToLength64, znormalize, sax8])
if __name__ == '__main__':
main()
| {
"content_hash": "b5440bf769c1504bc6268ebd928113bf",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 94,
"avg_line_length": 31.22421524663677,
"alnum_prop": 0.700847335918426,
"repo_name": "dblalock/flock",
"id": "a5ef4a9d5793cb179e1843e81fabf2f3f12c7ece",
"size": "6982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/viz/one_dimensional.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "745112"
},
{
"name": "Python",
"bytes": "753394"
}
],
"symlink_target": ""
} |
import sys
args = sys.argv
arg_len = len(args)
if arg_len <3:
print("\n**** Written by DJP, 23/11/15 in Python 3.4 ****\n")
print("This program takes a wrapped fasta file as input.")
print("It outputs an unwrapped fasta file.")
print("\n**** USAGE **** \n")
print("unwrap_fasta.py [name of fasta file] [name of output file]\n")
else:
input_fasta = args[1]
output_fasta = args[2]
output_file = open(output_fasta, "w")
count = 0
in_file = open(input_fasta)
for line in in_file:
count = count + 1
line = line.rstrip("\n")
if line.startswith(">") and count == 1:
output_file.write(line + "\n")
elif line.startswith(">") and count > 1:
output_file.write("\n" + line + "\n")
else:
output_file.write(line) | {
"content_hash": "0f90bcbf71843e551720b220106fe9bc",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 70,
"avg_line_length": 24.774193548387096,
"alnum_prop": 0.6015625,
"repo_name": "DarrenJParker/fasta_tools",
"id": "7927404b3451f90cb40dd249595cdfc06cd976a3",
"size": "793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unwrap_fasta.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56307"
}
],
"symlink_target": ""
} |
'''Taken from URL:
https://gist.githubusercontent.com/xively-gists/5565335/raw/RaspberryPiTutorial.py
Referred by tutorial:
https://xively.com/dev/tutorials/pi/
'''
import os
import xively
import subprocess
import time
import datetime
import requests
# extract feed_id and api_key from environment variables
FEED_ID = os.environ["FEED_ID"]
API_KEY = os.environ["API_KEY"]
DEBUG = os.environ["DEBUG"] or false
# initialize api client
api = xively.XivelyAPIClient(API_KEY)
# function to read 1 minute load average from system uptime command
def read_loadavg():
if DEBUG:
print "Reading load average"
return subprocess.check_output(["awk '{print $1}' /proc/loadavg"], shell=True)
# function to return a datastream object. This either creates a new datastream,
# or returns an existing one
def get_datastream(feed):
try:
datastream = feed.datastreams.get("load_avg")
if DEBUG:
print "Found existing datastream"
return datastream
except:
if DEBUG:
print "Creating new datastream"
datastream = feed.datastreams.create("load_avg", tags="load_01")
return datastream
# main program entry point - runs continuously updating our datastream with the
# current 1 minute load average
def run():
print "Starting Xively tutorial script"
feed = api.feeds.get(FEED_ID)
datastream = get_datastream(feed)
datastream.max_value = None
datastream.min_value = None
while True:
load_avg = read_loadavg()
if DEBUG:
print "Updating Xively feed with value: %s" % load_avg
datastream.current_value = load_avg
datastream.at = datetime.datetime.utcnow()
try:
datastream.update()
except requests.HTTPError as e:
print "HTTPError({0}): {1}".format(e.errno, e.strerror)
time.sleep(10)
run()
| {
"content_hash": "d7afbd17b62709655aa4ad5dcf5ef344",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 85,
"avg_line_length": 25.884057971014492,
"alnum_prop": 0.7127659574468085,
"repo_name": "medovarsky/sms-tx",
"id": "7f318f888d3d3c43884ff80e42f2dffb3f242ca2",
"size": "1809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xively/xively.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PHP",
"bytes": "1103"
},
{
"name": "Python",
"bytes": "2311"
}
],
"symlink_target": ""
} |
import unittest
import pydoop
pp = pydoop.import_version_specific_module('_pipes')
from pydoop.pipes import Factory, RecordReader
class test_record_reader(RecordReader):
DEFAULT_VALUE = 'The quick red fox jumped on the lazy brown dog'
KEY_FORMAT = 'key-%d'
NUMBER_RECORDS = 10
def __init__(self, ctx):
RecordReader.__init__(self)
self.ctx = ctx
self.counter = 0
def next(self):
if self.counter < self.NUMBER_RECORDS:
self.counter += 1
return (True, self.KEY_FORMAT % self.counter, self.DEFAULT_VALUE)
else:
return (False, '', '')
def getProgress(self):
return float(self.counter)/self.NUMBER_RECORDS
class record_reader_tc(unittest.TestCase):
def setUp(self):
pass
def test_record_reader_from_cpluplus(self):
d = {'input_key' : 'inputkey',
'input_value' : 'inputvalue',
'input_split' : 'inputsplit',
'input_key_class' : 'keyclass',
'input_value_class' : 'valueclass',
'job_conf' : {}}
ctx = pp.get_MapContext_object(d)
self.assertEqual(ctx.getInputKey(), d['input_key'])
self.assertEqual(ctx.getInputValue(), d['input_value'])
self.assertEqual(ctx.getInputSplit(), d['input_split'])
self.assertEqual(ctx.getInputKeyClass(), d['input_key_class'])
self.assertEqual(ctx.getInputValueClass(), d['input_value_class'])
f = Factory(None, None, test_record_reader)
rr = f.createRecordReader(ctx)
for i in range(test_record_reader.NUMBER_RECORDS):
(f, k, v) = pp.get_record_from_record_reader(rr)
self.assertTrue(f)
self.assertEqual(k, test_record_reader.KEY_FORMAT % (i+1))
self.assertEqual(v, test_record_reader.DEFAULT_VALUE)
self.assertAlmostEqual(pp.get_progress_from_record_reader(rr),
float(i+1)/test_record_reader.NUMBER_RECORDS)
(f, k, v) = pp.get_record_from_record_reader(rr)
self.assertFalse(f)
def suite():
suite = unittest.TestSuite()
suite.addTest(record_reader_tc('test_record_reader_from_cpluplus'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run((suite()))
| {
"content_hash": "2e4b7813b42765c7d67dfc4c267c2769",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 74,
"avg_line_length": 31.231884057971016,
"alnum_prop": 0.6556844547563805,
"repo_name": "jkahn/pydoop-code",
"id": "d629146feb94192dea53adc366e88296fcfc4b4a",
"size": "2771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_record_reader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "731584"
},
{
"name": "C++",
"bytes": "513848"
},
{
"name": "Java",
"bytes": "480901"
},
{
"name": "Python",
"bytes": "291616"
},
{
"name": "Ruby",
"bytes": "9955"
},
{
"name": "Shell",
"bytes": "1945"
}
],
"symlink_target": ""
} |
import collections
import contextlib
import mock
import netaddr
from oslo.config import cfg
import testtools
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.openstack.common import importutils
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.common import constants
from neutron.tests import base
from neutron.tests.unit.ofagent import fake_oflib
NOTIFIER = ('neutron.plugins.ml2.rpc.AgentNotifierApi')
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
def _mock_port(is_neutron=True, normalized_name=None):
p = mock.Mock()
p.is_neutron_port.return_value = is_neutron
if normalized_name:
p.normalized_port_name.return_value = normalized_name
return p
class OFAAgentTestCase(base.BaseTestCase):
_AGENT_NAME = 'neutron.plugins.ofagent.agent.ofa_neutron_agent'
def setUp(self):
super(OFAAgentTestCase, self).setUp()
self.fake_oflib_of = fake_oflib.patch_fake_oflib_of().start()
self.mod_agent = importutils.import_module(self._AGENT_NAME)
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
self.ryuapp = mock.Mock()
cfg.CONF.register_cli_opts([
cfg.StrOpt('ofp-listen-host', default='',
help='openflow listen host'),
cfg.IntOpt('ofp-tcp-listen-port', default=6633,
help='openflow tcp listen port')
])
cfg.CONF.set_override('root_helper', 'fake_helper', group='AGENT')
class CreateAgentConfigMap(OFAAgentTestCase):
def test_create_agent_config_map_succeeds(self):
self.assertTrue(self.mod_agent.create_agent_config_map(cfg.CONF))
def test_create_agent_config_map_fails_for_invalid_tunnel_config(self):
# An ip address is required for tunneling but there is no default,
# verify this for both gre and vxlan tunnels.
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE],
group='AGENT')
with testtools.ExpectedException(ValueError):
self.mod_agent.create_agent_config_map(cfg.CONF)
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_VXLAN],
group='AGENT')
with testtools.ExpectedException(ValueError):
self.mod_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_enable_tunneling(self):
# Verify setting only enable_tunneling will default tunnel_type to GRE
cfg.CONF.set_override('tunnel_types', None, group='AGENT')
cfg.CONF.set_override('enable_tunneling', True, group='OVS')
cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS')
cfgmap = self.mod_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['tunnel_types'], [p_const.TYPE_GRE])
def test_create_agent_config_map_fails_no_local_ip(self):
# An ip address is required for tunneling but there is no default
cfg.CONF.set_override('enable_tunneling', True, group='OVS')
with testtools.ExpectedException(ValueError):
self.mod_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_fails_for_invalid_tunnel_type(self):
cfg.CONF.set_override('tunnel_types', ['foobar'], group='AGENT')
with testtools.ExpectedException(ValueError):
self.mod_agent.create_agent_config_map(cfg.CONF)
def test_create_agent_config_map_multiple_tunnel_types(self):
cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS')
cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE,
p_const.TYPE_VXLAN], group='AGENT')
cfgmap = self.mod_agent.create_agent_config_map(cfg.CONF)
self.assertEqual(cfgmap['tunnel_types'],
[p_const.TYPE_GRE, p_const.TYPE_VXLAN])
class TestOFANeutronAgentOVSBridge(OFAAgentTestCase):
def setUp(self):
super(TestOFANeutronAgentOVSBridge, self).setUp()
self.br_name = 'bridge1'
self.root_helper = 'fake_helper'
self.ovs = self.mod_agent.OVSBridge(
self.br_name, self.root_helper, self.ryuapp)
def test_find_datapath_id(self):
with mock.patch.object(self.ovs, 'get_datapath_id',
return_value='12345'):
self.ovs.find_datapath_id()
self.assertEqual(self.ovs.datapath_id, '12345')
def _fake_get_datapath(self, app, datapath_id):
if self.ovs.retry_count >= 2:
datapath = mock.Mock()
datapath.ofproto_parser = mock.Mock()
return datapath
self.ovs.retry_count += 1
return None
def test_get_datapath_normal(self):
self.ovs.retry_count = 0
with mock.patch.object(self.mod_agent.ryu_api, 'get_datapath',
new=self._fake_get_datapath):
self.ovs.datapath_id = '0x64'
self.ovs.get_datapath(retry_max=4)
self.assertEqual(self.ovs.retry_count, 2)
def test_get_datapath_retry_out_by_default_time(self):
cfg.CONF.set_override('get_datapath_retry_times', 3, group='AGENT')
with mock.patch.object(self.mod_agent.ryu_api, 'get_datapath',
return_value=None) as mock_get_datapath:
with testtools.ExpectedException(SystemExit):
self.ovs.datapath_id = '0x64'
self.ovs.get_datapath(retry_max=3)
self.assertEqual(mock_get_datapath.call_count, 3)
def test_get_datapath_retry_out_by_specified_time(self):
with mock.patch.object(self.mod_agent.ryu_api, 'get_datapath',
return_value=None) as mock_get_datapath:
with testtools.ExpectedException(SystemExit):
self.ovs.datapath_id = '0x64'
self.ovs.get_datapath(retry_max=2)
self.assertEqual(mock_get_datapath.call_count, 2)
def test_setup_ofp_default_par(self):
with contextlib.nested(
mock.patch.object(self.ovs, 'set_protocols'),
mock.patch.object(self.ovs, 'set_controller'),
mock.patch.object(self.ovs, 'find_datapath_id'),
mock.patch.object(self.ovs, 'get_datapath'),
) as (mock_set_protocols, mock_set_controller,
mock_find_datapath_id, mock_get_datapath):
self.ovs.setup_ofp()
mock_set_protocols.assert_called_with('OpenFlow13')
mock_set_controller.assert_called_with(['tcp:127.0.0.1:6633'])
mock_get_datapath.assert_called_with(
cfg.CONF.AGENT.get_datapath_retry_times)
self.assertEqual(mock_find_datapath_id.call_count, 1)
def test_setup_ofp_specify_par(self):
controller_names = ['tcp:192.168.10.10:1234', 'tcp:172.17.16.20:5555']
with contextlib.nested(
mock.patch.object(self.ovs, 'set_protocols'),
mock.patch.object(self.ovs, 'set_controller'),
mock.patch.object(self.ovs, 'find_datapath_id'),
mock.patch.object(self.ovs, 'get_datapath'),
) as (mock_set_protocols, mock_set_controller,
mock_find_datapath_id, mock_get_datapath):
self.ovs.setup_ofp(controller_names=controller_names,
protocols='OpenFlow133',
retry_max=11)
mock_set_protocols.assert_called_with('OpenFlow133')
mock_set_controller.assert_called_with(controller_names)
mock_get_datapath.assert_called_with(11)
self.assertEqual(mock_find_datapath_id.call_count, 1)
def test_setup_ofp_with_except(self):
with contextlib.nested(
mock.patch.object(self.ovs, 'set_protocols',
side_effect=RuntimeError),
mock.patch.object(self.ovs, 'set_controller'),
mock.patch.object(self.ovs, 'find_datapath_id'),
mock.patch.object(self.ovs, 'get_datapath'),
) as (mock_set_protocols, mock_set_controller,
mock_find_datapath_id, mock_get_datapath):
with testtools.ExpectedException(SystemExit):
self.ovs.setup_ofp()
class TestOFANeutronAgent(OFAAgentTestCase):
def setUp(self):
super(TestOFANeutronAgent, self).setUp()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
# Avoid rpc initialization for unit tests
cfg.CONF.set_override('rpc_backend',
'neutron.openstack.common.rpc.impl_fake')
kwargs = self.mod_agent.create_agent_config_map(cfg.CONF)
class MockFixedIntervalLoopingCall(object):
def __init__(self, f):
self.f = f
def start(self, interval=0):
self.f()
def _mk_test_dp(name):
ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3')
ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser')
dp = mock.Mock()
dp.ofproto = ofp
dp.ofproto_parser = ofpp
dp.__repr__ = lambda _self: name
return dp
def _mk_test_br(name):
dp = _mk_test_dp(name)
br = mock.Mock()
br.datapath = dp
br.ofproto = dp.ofproto
br.ofparser = dp.ofproto_parser
return br
with contextlib.nested(
mock.patch.object(self.mod_agent.OFANeutronAgent,
'setup_integration_br',
return_value=mock.Mock()),
mock.patch.object(self.mod_agent.OFANeutronAgent,
'setup_ancillary_bridges',
return_value=[]),
mock.patch.object(self.mod_agent.OVSBridge,
'get_local_port_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.agent.linux.utils.get_interface_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.openstack.common.loopingcall.'
'FixedIntervalLoopingCall',
new=MockFixedIntervalLoopingCall)):
self.agent = self.mod_agent.OFANeutronAgent(self.ryuapp, **kwargs)
self.agent.sg_agent = mock.Mock()
self.int_dp = _mk_test_dp('int_br')
self.agent.int_br.ofparser = self.int_dp.ofproto_parser
self.agent.int_br.datapath = self.int_dp
self.agent.tun_br = _mk_test_br('tun_br')
self.agent.phys_brs['phys-net1'] = _mk_test_br('phys_br1')
self.agent.phys_ofports['phys-net1'] = 777
self.agent.int_ofports['phys-net1'] = 666
self.datapath = _mk_test_dp('phys_br')
def _create_tunnel_port_name(self, tunnel_ip, tunnel_type):
tunnel_ip_hex = '%08x' % netaddr.IPAddress(tunnel_ip, version=4)
return '%s-%s' % (tunnel_type, tunnel_ip_hex)
def _mock_port_bound(self, ofport=None, new_local_vlan=None,
old_local_vlan=None):
port = mock.Mock()
port.ofport = ofport
net_uuid = 'my-net-uuid'
ofp = self.agent.int_br.datapath.ofproto
ofpp = self.agent.int_br.datapath.ofproto_parser
expected_msg = ofpp.OFPFlowMod(
self.agent.int_br.datapath,
match=ofpp.OFPMatch(in_port=port.ofport),
table_id=ofp.OFPTT_ALL,
command=ofp.OFPFC_DELETE,
out_group=ofp.OFPG_ANY,
out_port=ofp.OFPP_ANY
)
if old_local_vlan is not None:
self.agent.local_vlan_map[net_uuid] = (
self.mod_agent.LocalVLANMapping(
old_local_vlan, None, None, None))
with contextlib.nested(
mock.patch.object(self.mod_agent.OVSBridge,
'set_db_attribute', return_value=True),
mock.patch.object(self.mod_agent.OVSBridge,
'db_get_val', return_value=str(old_local_vlan)),
mock.patch.object(self.agent, 'ryu_send_msg')
) as (set_ovs_db_func, get_ovs_db_func, ryu_send_msg_func):
self.agent.port_bound(port, net_uuid, 'local', None, None)
get_ovs_db_func.assert_called_once_with("Port", mock.ANY, "tag")
if new_local_vlan != old_local_vlan:
set_ovs_db_func.assert_called_once_with(
"Port", mock.ANY, "tag", str(new_local_vlan))
if ofport != -1:
ryu_send_msg_func.assert_called_once_with(expected_msg)
else:
self.assertFalse(ryu_send_msg_func.called)
else:
self.assertFalse(set_ovs_db_func.called)
self.assertFalse(ryu_send_msg_func.called)
def test_port_bound_deletes_flows_for_valid_ofport(self):
self._mock_port_bound(ofport=1, new_local_vlan=1)
def test_port_bound_ignores_flows_for_invalid_ofport(self):
self._mock_port_bound(ofport=-1, new_local_vlan=1)
def test_port_bound_does_not_rewire_if_already_bound(self):
self._mock_port_bound(ofport=-1, new_local_vlan=1, old_local_vlan=1)
def _test_port_dead(self, cur_tag=None):
port = mock.Mock()
port.ofport = 1
ofpp = self.agent.int_br.datapath.ofproto_parser
expected_msg = ofpp.OFPFlowMod(
self.agent.int_br.datapath,
priority=2,
match=ofpp.OFPMatch(in_port=port.ofport)
)
with contextlib.nested(
mock.patch.object(self.mod_agent.OVSBridge,
'set_db_attribute', return_value=True),
mock.patch.object(self.mod_agent.OVSBridge,
'db_get_val', return_value=cur_tag),
mock.patch.object(self.agent, 'ryu_send_msg')
) as (set_ovs_db_func, get_ovs_db_func, ryu_send_msg_func):
self.agent.port_dead(port)
get_ovs_db_func.assert_called_once_with("Port", mock.ANY, "tag")
if cur_tag == self.mod_agent.DEAD_VLAN_TAG:
self.assertFalse(set_ovs_db_func.called)
self.assertFalse(ryu_send_msg_func.called)
else:
set_ovs_db_func.assert_called_once_with(
"Port", mock.ANY, "tag", str(self.mod_agent.DEAD_VLAN_TAG))
ryu_send_msg_func.assert_called_once_with(expected_msg)
def test_port_dead(self):
self._test_port_dead()
def test_port_dead_with_port_already_dead(self):
self._test_port_dead(self.mod_agent.DEAD_VLAN_TAG)
def mock_scan_ports(self, port_set=None, registered_ports=None,
updated_ports=None, port_tags_dict=None):
port_tags_dict = port_tags_dict or {}
with contextlib.nested(
mock.patch.object(self.agent, '_get_ofport_names',
return_value=port_set),
mock.patch.object(self.agent.int_br, 'get_port_tag_dict',
return_value=port_tags_dict)
):
return self.agent.scan_ports(registered_ports, updated_ports)
def test_scan_ports_returns_current_only_for_unchanged_ports(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 3])
expected = {'current': vif_port_set}
actual = self.mock_scan_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
def test_scan_ports_returns_port_changes(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
expected = dict(current=vif_port_set, added=set([3]), removed=set([2]))
actual = self.mock_scan_ports(vif_port_set, registered_ports)
self.assertEqual(expected, actual)
def _test_scan_ports_with_updated_ports(self, updated_ports):
vif_port_set = set([1, 3, 4])
registered_ports = set([1, 2, 4])
expected = dict(current=vif_port_set, added=set([3]),
removed=set([2]), updated=set([4]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_scan_ports_finds_known_updated_ports(self):
self._test_scan_ports_with_updated_ports(set([4]))
def test_scan_ports_ignores_unknown_updated_ports(self):
# the port '5' was not seen on current ports. Hence it has either
# never been wired or already removed and should be ignored
self._test_scan_ports_with_updated_ports(set([4, 5]))
def test_scan_ports_ignores_updated_port_if_removed(self):
vif_port_set = set([1, 3])
registered_ports = set([1, 2])
updated_ports = set([1, 2])
expected = dict(current=vif_port_set, added=set([3]),
removed=set([2]), updated=set([1]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_scan_ports_no_vif_changes_returns_updated_port_only(self):
vif_port_set = set([1, 2, 3])
registered_ports = set([1, 2, 3])
updated_ports = set([2])
expected = dict(current=vif_port_set, updated=set([2]))
actual = self.mock_scan_ports(vif_port_set, registered_ports,
updated_ports)
self.assertEqual(expected, actual)
def test_update_ports_returns_lost_vlan_port(self):
port = mock.Mock(port_name='tap00000001-00', ofport=1)
lvm = self.mod_agent.LocalVLANMapping(
vlan=1, network_type='1', physical_network=None, segmentation_id=1,
vif_ports={port.port_name: port})
local_vlan_map = {'1': lvm}
port_set = set(['tap00000001-00',
'tap00000003-00'])
registered_ports = set(['tap00000001-00', 'tap00000002-00'])
port_tags_dict = {'tap00000001-00': []}
expected = dict(
added=set(['tap00000003-00']),
current=set(['tap00000001-00', 'tap00000003-00']),
removed=set(['tap00000002-00']),
updated=set(['tap00000001-00'])
)
with mock.patch.dict(self.agent.local_vlan_map, local_vlan_map):
actual = self.mock_scan_ports(
port_set, registered_ports, port_tags_dict=port_tags_dict)
self.assertEqual(expected, actual)
def test_treat_devices_added_returns_true_for_missing_device(self):
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'get_device_details',
side_effect=Exception()),
mock.patch.object(self.agent, '_get_ports',
return_value=[_mock_port(True, 'xxx')])):
self.assertTrue(self.agent.treat_devices_added_or_updated(['xxx']))
def _mock_treat_devices_added_updated(self, details, port, all_ports,
func_name):
"""Mock treat devices added or updated.
:param details: the details to return for the device
:param port: port name to process
:param all_ports: the port that _get_ports return
:param func_name: the function that should be called
:returns: whether the named function was called
"""
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'get_device_details',
return_value=details),
mock.patch.object(self.agent, '_get_ports',
return_value=all_ports),
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'),
mock.patch.object(self.agent.plugin_rpc, 'update_device_down'),
mock.patch.object(self.agent, func_name)
) as (get_dev_fn, _get_ports, upd_dev_up, upd_dev_down, func):
self.assertFalse(self.agent.treat_devices_added_or_updated([port]))
_get_ports.assert_called_once_with(self.agent.int_br)
return func.called
def test_treat_devices_added_updated_ignores_invalid_ofport(self):
port_name = 'hoge'
p1 = _mock_port(True, port_name)
p1.ofport = -1
self.assertFalse(self._mock_treat_devices_added_updated(
mock.MagicMock(), port_name, [p1], 'port_dead'))
def test_treat_devices_added_updated_marks_unknown_port_as_dead(self):
port_name = 'hoge'
p1 = _mock_port(True, port_name)
p1.ofport = 1
self.assertTrue(self._mock_treat_devices_added_updated(
mock.MagicMock(), port_name, [p1], 'port_dead'))
def test_treat_devices_added_does_not_process_missing_port(self):
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'get_device_details'),
mock.patch.object(self.agent.int_br, 'get_vif_port_by_id',
return_value=None)
) as (get_dev_fn, get_vif_func):
self.assertFalse(get_dev_fn.called)
def test_treat_devices_added_updated_updates_known_port(self):
port_name = 'tapd3315981-0b'
p1 = _mock_port(False)
p2 = _mock_port(True, port_name)
ports = [p1, p2]
details = mock.MagicMock()
details.__contains__.side_effect = lambda x: True
self.assertTrue(self._mock_treat_devices_added_updated(
details, port_name, ports, 'treat_vif_port'))
def test_treat_devices_added_updated_put_port_down(self):
fake_details_dict = {'admin_state_up': False,
'port_id': 'xxx',
'device': 'xxx',
'network_id': 'yyy',
'physical_network': 'foo',
'segmentation_id': 'bar',
'network_type': 'baz'}
with contextlib.nested(
mock.patch.object(self.agent.plugin_rpc, 'get_device_details',
return_value=fake_details_dict),
mock.patch.object(self.agent, '_get_ports',
return_value=[_mock_port(True, 'xxx')]),
mock.patch.object(self.agent.plugin_rpc, 'update_device_up'),
mock.patch.object(self.agent.plugin_rpc, 'update_device_down'),
mock.patch.object(self.agent, 'treat_vif_port')
) as (get_dev_fn, _get_ports, upd_dev_up,
upd_dev_down, treat_vif_port):
self.assertFalse(self.agent.treat_devices_added_or_updated(
['xxx']))
self.assertTrue(treat_vif_port.called)
self.assertTrue(upd_dev_down.called)
_get_ports.assert_called_once_with(self.agent.int_br)
def test_treat_devices_removed_returns_true_for_missing_device(self):
with mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
side_effect=Exception()):
self.assertTrue(self.agent.treat_devices_removed([{}]))
def _mock_treat_devices_removed(self, port_exists):
details = dict(exists=port_exists)
with mock.patch.object(self.agent.plugin_rpc, 'update_device_down',
return_value=details):
with mock.patch.object(self.agent, 'port_unbound') as port_unbound:
self.assertFalse(self.agent.treat_devices_removed([{}]))
self.assertTrue(port_unbound.called)
def test_treat_devices_removed_unbinds_port(self):
self._mock_treat_devices_removed(True)
def test_treat_devices_removed_ignores_missing_port(self):
self._mock_treat_devices_removed(False)
def _test_process_network_ports(self, port_info):
with contextlib.nested(
mock.patch.object(self.agent.sg_agent, "setup_port_filters"),
mock.patch.object(self.agent, "treat_devices_added_or_updated",
return_value=False),
mock.patch.object(self.agent, "treat_devices_removed",
return_value=False)
) as (setup_port_filters, device_added_updated, device_removed):
self.assertFalse(self.agent.process_network_ports(port_info))
setup_port_filters.assert_called_once_with(
port_info['added'], port_info.get('updated', set()))
device_added_updated.assert_called_once_with(
port_info['added'] | port_info.get('updated', set()))
device_removed.assert_called_once_with(port_info['removed'])
def test_process_network_ports(self):
self._test_process_network_ports(
{'current': set(['tap0']),
'removed': set(['eth0']),
'added': set(['eth1'])})
def test_process_network_port_with_updated_ports(self):
self._test_process_network_ports(
{'current': set(['tap0', 'tap1']),
'updated': set(['tap1', 'eth1']),
'removed': set(['eth0']),
'added': set(['eth1'])})
def test_report_state(self):
with mock.patch.object(self.agent.state_rpc,
"report_state") as report_st:
self.agent.int_br_device_count = 5
self.agent._report_state()
report_st.assert_called_with(self.agent.context,
self.agent.agent_state)
self.assertNotIn("start_flag", self.agent.agent_state)
self.assertEqual(
self.agent.agent_state["configurations"]["devices"],
self.agent.int_br_device_count
)
def test_network_delete(self):
with contextlib.nested(
mock.patch.object(self.agent, "reclaim_local_vlan"),
mock.patch.object(self.agent.tun_br, "cleanup_tunnel_port")
) as (recl_fn, clean_tun_fn):
self.agent.network_delete("unused_context",
network_id="123")
self.assertFalse(recl_fn.called)
self.agent.local_vlan_map["123"] = "LVM object"
self.agent.network_delete("unused_context",
network_id="123")
self.assertFalse(clean_tun_fn.called)
recl_fn.assert_called_with("123")
def test_port_update(self):
port = {"id": "b1981919-f516-11e3-a8f4-08606e7f74e7",
"network_id": "124",
"admin_state_up": False}
self.agent.port_update("unused_context",
port=port,
network_type="vlan",
segmentation_id="1",
physical_network="physnet")
self.assertEqual(set(['tapb1981919-f5']), self.agent.updated_ports)
def test_setup_physical_bridges(self):
with contextlib.nested(
mock.patch.object(ip_lib, "device_exists"),
mock.patch.object(utils, "execute"),
mock.patch.object(self.mod_agent.OVSBridge, "add_port"),
mock.patch.object(self.mod_agent.OVSBridge, "delete_port"),
mock.patch.object(self.mod_agent.OVSBridge, "set_protocols"),
mock.patch.object(self.mod_agent.OVSBridge, "set_controller"),
mock.patch.object(self.mod_agent.OVSBridge, "get_datapath_id",
return_value='0xa'),
mock.patch.object(self.agent.int_br, "add_port"),
mock.patch.object(self.agent.int_br, "delete_port"),
mock.patch.object(ip_lib.IPWrapper, "add_veth"),
mock.patch.object(ip_lib.IpLinkCommand, "delete"),
mock.patch.object(ip_lib.IpLinkCommand, "set_up"),
mock.patch.object(ip_lib.IpLinkCommand, "set_mtu"),
mock.patch.object(self.mod_agent.ryu_api, "get_datapath",
return_value=self.datapath)
) as (devex_fn, utilsexec_fn,
ovs_addport_fn, ovs_delport_fn, ovs_set_protocols_fn,
ovs_set_controller_fn, ovs_datapath_id_fn, br_addport_fn,
br_delport_fn, addveth_fn, linkdel_fn, linkset_fn, linkmtu_fn,
ryu_api_fn):
devex_fn.return_value = True
parent = mock.MagicMock()
parent.attach_mock(utilsexec_fn, 'utils_execute')
parent.attach_mock(linkdel_fn, 'link_delete')
parent.attach_mock(addveth_fn, 'add_veth')
addveth_fn.return_value = (ip_lib.IPDevice("int-br-eth1"),
ip_lib.IPDevice("phy-br-eth1"))
ovs_addport_fn.return_value = "25"
br_addport_fn.return_value = "11"
self.agent.setup_physical_bridges({"physnet1": "br-eth"})
expected_calls = [mock.call.link_delete(),
mock.call.utils_execute(['/sbin/udevadm',
'settle',
'--timeout=10']),
mock.call.add_veth('int-br-eth',
'phy-br-eth')]
parent.assert_has_calls(expected_calls, any_order=False)
self.assertEqual(self.agent.int_ofports["physnet1"],
"11")
self.assertEqual(self.agent.phys_ofports["physnet1"],
"25")
def test_port_unbound(self):
with mock.patch.object(self.agent, "reclaim_local_vlan") as reclvl_fn:
self.agent.enable_tunneling = True
lvm = mock.Mock()
lvm.network_type = "gre"
lvm.vif_ports = {"vif1": mock.Mock()}
self.agent.local_vlan_map["netuid12345"] = lvm
self.agent.port_unbound("vif1", "netuid12345")
self.assertTrue(reclvl_fn.called)
reclvl_fn.called = False
lvm.vif_ports = {}
self.agent.port_unbound("vif1", "netuid12345")
self.assertEqual(reclvl_fn.call_count, 2)
lvm.vif_ports = {"vif1": mock.Mock()}
self.agent.port_unbound("vif3", "netuid12345")
self.assertEqual(reclvl_fn.call_count, 2)
def _prepare_l2_pop_ofports(self):
LVM = collections.namedtuple('LVM', 'net, vlan, segid, ip')
self.lvms = [LVM(net='net1', vlan=11, segid='21', ip='1.1.1.1'),
LVM(net='net2', vlan=12, segid='22', ip='2.2.2.2')]
self.tunnel_type = 'gre'
self.tun_name1 = self._create_tunnel_port_name(self.lvms[0].ip,
self.tunnel_type)
self.tun_name2 = self._create_tunnel_port_name(self.lvms[1].ip,
self.tunnel_type)
lvm1 = mock.Mock()
lvm1.network_type = self.tunnel_type
lvm1.vlan = self.lvms[0].vlan
lvm1.segmentation_id = self.lvms[0].segid
lvm1.tun_ofports = set(['1'])
lvm2 = mock.Mock()
lvm2.network_type = self.tunnel_type
lvm2.vlan = self.lvms[1].vlan
lvm2.segmentation_id = self.lvms[1].segid
lvm2.tun_ofports = set(['1', '2'])
self.agent.local_vlan_map = {self.lvms[0].net: lvm1,
self.lvms[1].net: lvm2}
self.agent.tun_br_ofports = {self.tunnel_type:
{self.lvms[0].ip: '1',
self.lvms[1].ip: '2'}}
def test_fdb_ignore_network(self):
self._prepare_l2_pop_ofports()
fdb_entry = {'net3': {}}
with contextlib.nested(
mock.patch.object(self.agent, 'ryu_send_msg'),
mock.patch.object(self.agent, '_setup_tunnel_port'),
mock.patch.object(self.agent, 'cleanup_tunnel_port')
) as (ryu_send_msg_fn, add_tun_fn, clean_tun_fn):
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(add_tun_fn.called)
self.agent.fdb_remove(None, fdb_entry)
self.assertFalse(clean_tun_fn.called)
self.assertFalse(ryu_send_msg_fn.called)
def test_fdb_ignore_self(self):
self._prepare_l2_pop_ofports()
self.agent.local_ip = 'agent_ip'
fdb_entry = {self.lvms[1].net:
{'network_type': self.tunnel_type,
'segment_id': 'tun2',
'ports':
{'agent_ip':
[['mac', 'ip'],
n_const.FLOODING_ENTRY]}}}
with mock.patch.object(self.agent.tun_br,
"defer_apply_on") as defer_fn:
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(defer_fn.called)
self.agent.fdb_remove(None, fdb_entry)
self.assertFalse(defer_fn.called)
def test_fdb_add_flows(self):
self._prepare_l2_pop_ofports()
fdb_entry = {self.lvms[0].net:
{'network_type': self.tunnel_type,
'segment_id': 'tun1',
'ports':
{self.lvms[1].ip:
[['mac', 'ip'],
n_const.FLOODING_ENTRY]}}}
with contextlib.nested(
mock.patch.object(self.agent, 'ryu_send_msg'),
mock.patch.object(self.agent.tun_br, '_setup_tunnel_port'),
) as (ryu_send_msg_fn, add_tun_fn):
add_tun_fn.return_value = '2'
self.agent.fdb_add(None, fdb_entry)
self.assertEqual(ryu_send_msg_fn.call_count, 2)
def test_fdb_del_flows(self):
self._prepare_l2_pop_ofports()
fdb_entry = {self.lvms[1].net:
{'network_type': self.tunnel_type,
'segment_id': 'tun2',
'ports':
{self.lvms[1].ip:
[['mac', 'ip'],
n_const.FLOODING_ENTRY]}}}
with mock.patch.object(self.agent,
'ryu_send_msg') as ryu_send_msg_fn:
self.agent.fdb_remove(None, fdb_entry)
self.assertEqual(ryu_send_msg_fn.call_count, 3)
def test_fdb_add_port(self):
self._prepare_l2_pop_ofports()
tunnel_ip = '10.10.10.10'
tun_name = self._create_tunnel_port_name(tunnel_ip,
self.tunnel_type)
fdb_entry = {self.lvms[0].net:
{'network_type': self.tunnel_type,
'segment_id': 'tun1',
'ports': {self.lvms[0].ip: [['mac', 'ip']]}}}
with contextlib.nested(
mock.patch.object(self.agent, 'ryu_send_msg'),
mock.patch.object(self.agent, '_setup_tunnel_port')
) as (ryu_send_msg_fn, add_tun_fn):
self.agent.fdb_add(None, fdb_entry)
self.assertFalse(add_tun_fn.called)
fdb_entry[self.lvms[0].net]['ports'][tunnel_ip] = [['mac', 'ip']]
self.agent.fdb_add(None, fdb_entry)
add_tun_fn.assert_called_with(
tun_name, tunnel_ip, self.tunnel_type)
def test_fdb_del_port(self):
self._prepare_l2_pop_ofports()
fdb_entry = {self.lvms[1].net:
{'network_type': self.tunnel_type,
'segment_id': 'tun2',
'ports': {self.lvms[1].ip: [n_const.FLOODING_ENTRY]}}}
with contextlib.nested(
mock.patch.object(self.agent, 'ryu_send_msg'),
mock.patch.object(self.agent.tun_br, 'delete_port')
) as (ryu_send_msg_fn, del_port_fn):
self.agent.fdb_remove(None, fdb_entry)
del_port_fn.assert_called_once_with(self.tun_name2)
def test_recl_lv_port_to_preserve(self):
self._prepare_l2_pop_ofports()
self.agent.enable_tunneling = True
with mock.patch.object(
self.agent.tun_br, 'cleanup_tunnel_port'
) as clean_tun_fn:
self.agent.reclaim_local_vlan(self.lvms[0].net)
self.assertFalse(clean_tun_fn.called)
def test_recl_lv_port_to_remove(self):
self._prepare_l2_pop_ofports()
self.agent.enable_tunneling = True
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'delete_port'),
mock.patch.object(self.agent, 'ryu_send_msg')
) as (del_port_fn, ryu_send_msg_fn):
self.agent.reclaim_local_vlan(self.lvms[1].net)
del_port_fn.assert_called_once_with(self.tun_name2)
def test_daemon_loop_uses_polling_manager(self):
with mock.patch(
'neutron.agent.linux.polling.get_polling_manager'
) as mock_get_pm:
fake_pm = mock.Mock()
mock_get_pm.return_value = fake_pm
fake_pm.__enter__ = mock.Mock()
fake_pm.__exit__ = mock.Mock()
with mock.patch.object(
self.agent, 'ovsdb_monitor_loop'
) as mock_loop:
self.agent.daemon_loop()
mock_get_pm.assert_called_once_with(True, 'fake_helper',
constants.DEFAULT_OVSDBMON_RESPAWN)
mock_loop.assert_called_once_with(polling_manager=fake_pm.__enter__())
def test__setup_tunnel_port_error_negative(self):
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_tunnel_port',
return_value='-1'),
mock.patch.object(self.mod_agent.LOG, 'error')
) as (add_tunnel_port_fn, log_error_fn):
ofport = self.agent._setup_tunnel_port(
'gre-1', 'remote_ip', p_const.TYPE_GRE)
add_tunnel_port_fn.assert_called_once_with(
'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
self.agent.vxlan_udp_port, self.agent.dont_fragment)
log_error_fn.assert_called_once_with(
_("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
self.assertEqual(ofport, 0)
def test__setup_tunnel_port_error_not_int(self):
with contextlib.nested(
mock.patch.object(self.agent.tun_br, 'add_tunnel_port',
return_value=None),
mock.patch.object(self.mod_agent.LOG, 'exception'),
mock.patch.object(self.mod_agent.LOG, 'error')
) as (add_tunnel_port_fn, log_exc_fn, log_error_fn):
ofport = self.agent._setup_tunnel_port(
'gre-1', 'remote_ip', p_const.TYPE_GRE)
add_tunnel_port_fn.assert_called_once_with(
'gre-1', 'remote_ip', self.agent.local_ip, p_const.TYPE_GRE,
self.agent.vxlan_udp_port, self.agent.dont_fragment)
log_exc_fn.assert_called_once_with(
_("ofport should have a value that can be "
"interpreted as an integer"))
log_error_fn.assert_called_once_with(
_("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': p_const.TYPE_GRE, 'ip': 'remote_ip'})
self.assertEqual(ofport, 0)
def test_tunnel_sync(self):
self.agent.local_ip = 'agent_ip'
self.agent.context = 'fake_context'
self.agent.tunnel_types = ['vxlan']
with mock.patch.object(
self.agent.plugin_rpc, 'tunnel_sync'
) as tunnel_sync_rpc_fn:
self.agent.tunnel_sync()
tunnel_sync_rpc_fn.assert_called_once_with(
self.agent.context,
self.agent.local_ip,
self.agent.tunnel_types[0])
def test__provision_local_vlan_inbound_for_tunnel(self):
with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg:
self.agent._provision_local_vlan_inbound_for_tunnel(1, 'gre', 3)
ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3')
ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser')
expected_msg = ofpp.OFPFlowMod(
self.agent.tun_br.datapath,
instructions=[
ofpp.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS,
[
ofpp.OFPActionPushVlan(),
ofpp.OFPActionSetField(vlan_vid=1 |
ofp.OFPVID_PRESENT),
]),
ofpp.OFPInstructionGotoTable(
table_id=constants.LEARN_FROM_TUN),
],
match=ofpp.OFPMatch(tunnel_id=3),
priority=1,
table_id=constants.TUN_TABLE['gre'])
sendmsg.assert_has_calls([mock.call(expected_msg)])
def test__provision_local_vlan_outbound(self):
with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg:
self.agent._provision_local_vlan_outbound(888, 999, 'phys-net1')
ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3')
ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser')
expected_msg = ofpp.OFPFlowMod(
self.agent.phys_brs['phys-net1'].datapath,
instructions=[
ofpp.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS,
[
ofpp.OFPActionSetField(vlan_vid=999),
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
]
)
],
match=ofpp.OFPMatch(
in_port=777,
vlan_vid=888 | ofp.OFPVID_PRESENT
),
priority=4)
sendmsg.assert_has_calls([mock.call(expected_msg)])
def test__provision_local_vlan_inbound(self):
with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg:
self.agent._provision_local_vlan_inbound(888, 999, 'phys-net1')
ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3')
ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser')
expected_msg = ofpp.OFPFlowMod(
self.agent.int_br.datapath,
instructions=[
ofpp.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS,
[
ofpp.OFPActionSetField(
vlan_vid=888 | ofp.OFPVID_PRESENT
),
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
]
)
],
match=ofpp.OFPMatch(in_port=666, vlan_vid=999),
priority=3)
sendmsg.assert_has_calls([mock.call(expected_msg)])
def test__reclaim_local_vlan_outbound(self):
lvm = mock.Mock()
lvm.network_type = p_const.TYPE_VLAN
lvm.segmentation_id = 555
lvm.vlan = 444
lvm.physical_network = 'phys-net1'
with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg:
self.agent._reclaim_local_vlan_outbound(lvm)
ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3')
ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser')
expected_msg = ofpp.OFPFlowMod(
self.agent.phys_brs['phys-net1'].datapath,
command=ofp.OFPFC_DELETE,
match=ofpp.OFPMatch(
in_port=777,
vlan_vid=444 | ofp.OFPVID_PRESENT
),
out_group=ofp.OFPG_ANY,
out_port=ofp.OFPP_ANY,
table_id=ofp.OFPTT_ALL)
sendmsg.assert_has_calls([mock.call(expected_msg)])
def test__reclaim_local_vlan_inbound(self):
lvm = mock.Mock()
lvm.network_type = p_const.TYPE_VLAN
lvm.segmentation_id = 555
lvm.vlan = 444
lvm.physical_network = 'phys-net1'
with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg:
self.agent._reclaim_local_vlan_inbound(lvm)
ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3')
ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser')
expected_msg = ofpp.OFPFlowMod(
self.agent.int_br.datapath,
command=ofp.OFPFC_DELETE,
match=ofpp.OFPMatch(
in_port=666,
vlan_vid=555 | ofp.OFPVID_PRESENT
),
out_group=ofp.OFPG_ANY,
out_port=ofp.OFPP_ANY,
table_id=ofp.OFPTT_ALL)
sendmsg.assert_has_calls([mock.call(expected_msg)])
def test__provision_local_vlan_outbound_flat(self):
ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3')
ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser')
with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg:
self.agent._provision_local_vlan_outbound(888, ofp.OFPVID_NONE,
'phys-net1')
expected_msg = ofpp.OFPFlowMod(
self.agent.phys_brs['phys-net1'].datapath,
instructions=[
ofpp.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS,
[
ofpp.OFPActionPopVlan(),
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
]
)
],
match=ofpp.OFPMatch(
in_port=777,
vlan_vid=888 | ofp.OFPVID_PRESENT
),
priority=4)
sendmsg.assert_has_calls([mock.call(expected_msg)])
def test__provision_local_vlan_inbound_flat(self):
ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3')
ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser')
with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg:
self.agent._provision_local_vlan_inbound(888, ofp.OFPVID_NONE,
'phys-net1')
expected_msg = ofpp.OFPFlowMod(
self.agent.int_br.datapath,
instructions=[
ofpp.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS,
[
ofpp.OFPActionPushVlan(),
ofpp.OFPActionSetField(
vlan_vid=888 | ofp.OFPVID_PRESENT
),
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
]
)
],
match=ofpp.OFPMatch(in_port=666, vlan_vid=ofp.OFPVID_NONE),
priority=3)
sendmsg.assert_has_calls([mock.call(expected_msg)])
def test__reclaim_local_vlan_outbound_flat(self):
lvm = mock.Mock()
lvm.network_type = p_const.TYPE_FLAT
lvm.segmentation_id = 555
lvm.vlan = 444
lvm.physical_network = 'phys-net1'
with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg:
self.agent._reclaim_local_vlan_outbound(lvm)
ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3')
ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser')
expected_msg = ofpp.OFPFlowMod(
self.agent.phys_brs['phys-net1'].datapath,
command=ofp.OFPFC_DELETE,
match=ofpp.OFPMatch(
in_port=777,
vlan_vid=444 | ofp.OFPVID_PRESENT
),
out_group=ofp.OFPG_ANY,
out_port=ofp.OFPP_ANY,
table_id=ofp.OFPTT_ALL)
sendmsg.assert_has_calls([mock.call(expected_msg)])
def test__reclaim_local_vlan_inbound_flat(self):
lvm = mock.Mock()
lvm.network_type = p_const.TYPE_FLAT
lvm.segmentation_id = 555
lvm.vlan = 444
lvm.physical_network = 'phys-net1'
with mock.patch.object(self.agent, 'ryu_send_msg') as sendmsg:
self.agent._reclaim_local_vlan_inbound(lvm)
ofp = importutils.import_module('ryu.ofproto.ofproto_v1_3')
ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser')
expected_msg = ofpp.OFPFlowMod(
self.agent.int_br.datapath,
command=ofp.OFPFC_DELETE,
match=ofpp.OFPMatch(
in_port=666,
vlan_vid=ofp.OFPVID_NONE
),
out_group=ofp.OFPG_ANY,
out_port=ofp.OFPP_ANY,
table_id=ofp.OFPTT_ALL)
sendmsg.assert_has_calls([mock.call(expected_msg)])
def test__get_ports(self):
ofpp = importutils.import_module('ryu.ofproto.ofproto_v1_3_parser')
reply = [ofpp.OFPPortDescStatsReply(body=[ofpp.OFPPort(name='hoge',
port_no=8)])]
sendmsg = mock.Mock(return_value=reply)
self.mod_agent.ryu_api.send_msg = sendmsg
result = self.agent._get_ports(self.agent.int_br)
result = list(result) # convert generator to list.
self.assertEqual(1, len(result))
self.assertEqual('hoge', result[0].port_name)
self.assertEqual(8, result[0].ofport)
expected_msg = ofpp.OFPPortDescStatsRequest(
datapath=self.agent.int_br.datapath)
sendmsg.assert_has_calls([mock.call(app=self.agent.ryuapp,
msg=expected_msg, reply_cls=ofpp.OFPPortDescStatsReply,
reply_multi=True)])
def test__get_ofport_names(self):
names = ['p111', 'p222', 'p333']
ps = [_mock_port(True, x) for x in names]
with mock.patch.object(self.agent, '_get_ports',
return_value=ps) as _get_ports:
result = self.agent._get_ofport_names('hoge')
_get_ports.assert_called_once_with('hoge')
self.assertEqual(set(names), result)
class AncillaryBridgesTest(OFAAgentTestCase):
def setUp(self):
super(AncillaryBridgesTest, self).setUp()
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
# Avoid rpc initialization for unit tests
cfg.CONF.set_override('rpc_backend',
'neutron.openstack.common.rpc.impl_fake')
cfg.CONF.set_override('report_interval', 0, 'AGENT')
self.kwargs = self.mod_agent.create_agent_config_map(cfg.CONF)
def _test_ancillary_bridges(self, bridges, ancillary):
device_ids = ancillary[:]
def pullup_side_effect(self, *args):
result = device_ids.pop(0)
return result
with contextlib.nested(
mock.patch.object(self.mod_agent.OFANeutronAgent,
'setup_integration_br',
return_value=mock.Mock()),
mock.patch('neutron.agent.linux.utils.get_interface_mac',
return_value='00:00:00:00:00:01'),
mock.patch.object(self.mod_agent.OVSBridge,
'get_local_port_mac',
return_value='00:00:00:00:00:01'),
mock.patch('neutron.agent.linux.ovs_lib.get_bridges',
return_value=bridges),
mock.patch(
'neutron.agent.linux.ovs_lib.get_bridge_external_bridge_id',
side_effect=pullup_side_effect)):
self.agent = self.mod_agent.OFANeutronAgent(
self.ryuapp, **self.kwargs)
self.assertEqual(len(ancillary), len(self.agent.ancillary_brs))
if ancillary:
bridges = [br.br_name for br in self.agent.ancillary_brs]
for br in ancillary:
self.assertIn(br, bridges)
def test_ancillary_bridges_single(self):
bridges = ['br-int', 'br-ex']
self._test_ancillary_bridges(bridges, ['br-ex'])
def test_ancillary_bridges_none(self):
bridges = ['br-int']
self._test_ancillary_bridges(bridges, [])
def test_ancillary_bridges_multiple(self):
bridges = ['br-int', 'br-ex1', 'br-ex2']
self._test_ancillary_bridges(bridges, ['br-ex1', 'br-ex2'])
| {
"content_hash": "abaa11423934135e1668f0d8ec9c4f9a",
"timestamp": "",
"source": "github",
"line_count": 1154,
"max_line_length": 79,
"avg_line_length": 45.25649913344888,
"alnum_prop": 0.5616934094129361,
"repo_name": "shakamunyi/neutron-dvr",
"id": "aaec36c9d62b614ac686478c3874d4d87a6d6f8c",
"size": "53076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/ofagent/test_ofa_neutron_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1451"
},
{
"name": "Python",
"bytes": "9657058"
},
{
"name": "Shell",
"bytes": "9202"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="x", parent_name="mesh3d.lightposition", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 100000),
min=kwargs.pop("min", -100000),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "1808f4efaed4c9c90511a4cb77143282",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 86,
"avg_line_length": 37.42857142857143,
"alnum_prop": 0.5820610687022901,
"repo_name": "plotly/python-api",
"id": "607535cd1109df831170f626465f91660dac77f1",
"size": "524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/mesh3d/lightposition/_x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name = 'pyscale',
version = '0.1.1',
platforms = 'linux',
license = 'GPLv3',
url = 'https://github.com/alexcepoi/pyscale',
download_url = 'https://github.com/alexcepoi/pyscale/zipball/master',
description = 'General purpose Python framework for writing highly scalable applications',
long_description = open('README.rst').read(),
packages = ['pyscale', 'pyscale.lib', 'pyscale.tools', 'pyscale.utils', 'pyscale.utils.gevsubprocess', 'pyscale.zmq'],
scripts = ['bin/pyscale'],
include_package_data = True,
author = 'Alexandru Cepoi',
author_email = 'alex.cepoi@gmail.com',
maintainer = 'Alexandru Cepoi',
maintainer_email = 'alex.cepoi@gmail.com',
install_requires = ['pyzmq', 'gevent', 'gevent_zeromq', 'cake', 'argparse', 'jinja2', 'nose'],
)
| {
"content_hash": "680e97c9d9dde664b33f3446e8151c36",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 119,
"avg_line_length": 29.77777777777778,
"alnum_prop": 0.695273631840796,
"repo_name": "alexcepoi/pyscale",
"id": "8d56b5e93267dce4358138c854faed718dc95979",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "82"
},
{
"name": "Python",
"bytes": "40170"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.datetime_safe
class Migration(migrations.Migration):
dependencies = [
('resources', '0002_auto_20141213_1454'),
]
operations = [
migrations.AddField(
model_name='film',
name='release_date',
field=models.DateField(default=django.utils.datetime_safe.date.today),
preserve_default=False,
),
]
| {
"content_hash": "7886486703c34ba8fa3b456d58cc23c6",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 82,
"avg_line_length": 24.2,
"alnum_prop": 0.6260330578512396,
"repo_name": "phalt/swapi",
"id": "a521b369de1383f9051126082f128a370ac767bd",
"size": "508",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "resources/migrations/0003_film_release_date.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "193"
},
{
"name": "HTML",
"bytes": "16929"
},
{
"name": "Makefile",
"bytes": "1193"
},
{
"name": "Python",
"bytes": "48196"
}
],
"symlink_target": ""
} |
from __future__ import division
from phoneclassification.phoneclassification.confusion_matrix import confusion_matrix
import numpy as np
import argparse,collections
from phoneclassification.phoneclassification._fast_EM import EM, e_step, m_step
"""
Extract the set of data associated with a set of phones
and give a label set, also initialize the components using the
basic components from the model
"""
def get_use_phns_row_ids(W_meta,use_phns,phones_dict):
row_ids = []
for phn_id, phn in enumerate(use_phns):
phn_row_ids = np.where(W_meta[:,0]==phones_dict[phn])[0]
row_ids.extend(phn_row_ids)
W_meta[phn_row_ids,0] = phn_id
return W_meta, np.array(row_ids)
def get_reduced_meta(W_meta,leehon_dict):
W_meta_leehon = W_meta.copy()
# keep a record of the component id under the
# reduced set of labels
component_count = collections.defaultdict(int)
for w_id, w in enumerate(W_meta):
W_meta_leehon[w_id,0] = leehon_dict[w[0]]
W_meta_leehon[w_id,1] = component_count[W_meta_leehon[w_id,0]]
component_count[W_meta_leehon[w_id,0]] += 1
return W_meta_leehon
parser = argparse.ArgumentParser("""File to run a basic test of the pegasos multiclass
SVM solver over the scattering features""")
parser.add_argument('--root_dir',default='/home/mark/Research/acoustic_cues',type=str,help='root directory for where to look for things')
parser.add_argument('--in_prefix',type=str,help='prefix for path containing the feature indices and number of non-zero features per datum')
parser.add_argument('--in_suffix',type=str,help='suffix for path containing the data files')
parser.add_argument('--label_in_prefix',type=str,help='prefix for path containing the feature indices and number of non-zero features per datum')
parser.add_argument('--label_in_suffix',type=str,help='suffix for path containing the data files')
parser.add_argument('--label_in_suffix_test',type=str,help='suffix for path containing the data files')
parser.add_argument('--out_prefix',type=str,help='prefix for path to save the output to')
parser.add_argument('--out_suffix',type=str,help='suffix for path to save the output to')
parser.add_argument('--total_iter',type=np.intc,help='Number of iterations to run this for')
parser.add_argument('--total_init',type=np.intc,help='Number of initializations to use in estimating the models')
parser.add_argument('--min_counts',type=np.intc,help='Minimum number of examples for each component')
parser.add_argument('--tol',type=float,help='Convergence criterion')
parser.add_argument('--ncomponents',type=np.intc,help='Maximum number of components per model')
# parser.add_argument('--',type=,help='')
args = parser.parse_args()
rootdir = args.root_dir[:]
confdir='%s/conf'%rootdir
leehon=np.loadtxt('%s/phones.48-39' % confdir,dtype=str)
phones39 = np.unique(np.sort(leehon[:,1]))
phones39_dict = dict( (v,i) for i,v in enumerate(phones39))
phones48_dict = dict( (v,i) for i,v in enumerate(leehon[:,0]))
leehon_dict = dict( (phones48_dict[p],
phones39_dict[q]) for p,q in leehon)
leehon_dict_array = np.zeros(48,dtype=int)
for k,v in leehon_dict.items():
leehon_dict_array[k] = int(v)
leehon_phn_dict = dict( (p,q) for p,q in leehon)
leehon39to48 = collections.defaultdict(list)
for phn in leehon[:,0]:
leehon39to48[leehon_phn_dict[phn]].append(phn)
use_phns39 = list(phones39[:])
use_phns48 = leehon[:,0]
patch_codes = np.load('%spatch_codes_train_%s' % (args.in_prefix,
args.in_suffix),
)
patch_rownnz = np.load('%spatch_rownnz_train_%s' % (args.in_prefix,
args.in_suffix),
)
patch_rowstartidx = np.zeros(patch_rownnz.shape[0] + 1,dtype=np.intc)
patch_rowstartidx[1:] = np.cumsum(patch_rownnz)
patch_codes_test = np.load('%spatch_codes_train_%s' % (args.in_prefix,
args.in_suffix),
)
patch_rownnz_test = np.load('%spatch_rownnz_train_%s' % (args.in_prefix,
args.in_suffix),
)
patch_rowstartidx_test = np.zeros(patch_rownnz.shape[0] + 1,dtype=np.intc)
patch_rowstartidx_test[1:] = np.cumsum(patch_rownnz_test)
n_train_data = patch_rownnz.shape[0]
n_test_data = patch_rownnz_test.shape[0]
dim = np.intc(max(patch_codes.max(),patch_codes_test.max()) + 1)
y = np.load('%sy_%s' % (args.label_in_prefix,
args.label_in_suffix),
).astype(np.int16)
y_test = np.load('%sy_%s' % (args.label_in_prefix,
args.label_in_suffix_test),
).astype(np.int16)
n_test_data = y_test.shape[0]
y_test39 = np.array([ leehon_dict[phone_id] for phone_id in y_test]).astype(np.int16)
test_accuracy = lambda W : np.sum(leehon_dict_array[weights_classes[sparse_dotmm(patch_codes_test,patch_rownnz_test,patch_rowstartidx_test,W.ravel().copy(),n_test_data,W.shape[1],W.shape[0]).argmax(1)]] == y_test39)/float(len(y_test39))
max_n_classifiers = args.ncomponents * 48
classifier_id = 0
for phn_id, phn in enumerate(leehon[:,0]):
print "Working on phone %s which has id %d" % (phn, phn_id)
print "classifier_id = %d" % classifier_id
phn_n_data = (y == phn_id).sum()
phn_rownnz = patch_rownnz[y==phn_id].copy()
phn_start_idx = np.where(y==phn_id)[0].min()
phn_end_idx = np.where(y==phn_id)[0].max()+1
if (phn_end_idx - phn_start_idx) != len(phn_rownnz):
import pdb; pdb.set_trace()
phn_rowstartidx = patch_rowstartidx[phn_start_idx:phn_end_idx+1].copy()
phn_feature_ind = patch_codes[phn_rowstartidx[0]:phn_rowstartidx[-1]].copy()
phn_rowstartidx -= phn_rowstartidx[0]
converged = False
cur_ncomponents = args.ncomponents
if phn_id == 0:
avgs = np.zeros((max_n_classifiers,
dim) )
counts = np.zeros(max_n_classifiers
)
# will keep track of which average belongs to which
# phone and mixture component--this allows us to
# drop mixture components if they are potentially
# not helping
all_weights = np.zeros(max_n_classifiers,dtype=float)
meta = np.zeros((max_n_classifiers
,2),dtype=int)
n_init = 0
tol = float(args.tol)
total_iter = np.intc(args.total_iter)
while n_init < args.total_init:
A = np.zeros((phn_n_data,cur_ncomponents),dtype=float)
A[np.arange(phn_n_data),np.random.randint(cur_ncomponents,size=phn_n_data)] = 1
A = A.reshape(A.size)
P = np.zeros(dim*cur_ncomponents,dtype=float)
weights = np.zeros(cur_ncomponents,dtype=float)
# m_step(phn_feature_ind, phn_rownnz, phn_rowstartidx,
# P,weights, A, phn_n_data, dim, cur_ncomponents)
# import pdb; pdb.set_trace()
P,weights, A, loglikelihood = EM(phn_feature_ind, phn_rownnz, phn_rowstartidx,
phn_n_data,dim,cur_ncomponents,tol, total_iter,
A)
A = A.reshape(phn_n_data,cur_ncomponents)
P = P.reshape(cur_ncomponents, dim)
component_counts = A.sum(0)
good_components = component_counts >= args.min_counts
n_good = good_components.sum()
while np.any(component_counts < args.min_counts):
good_components = component_counts >= args.min_counts
n_good = good_components.sum()
P = P[good_components]
weights = weights[good_components]
A = np.zeros((phn_n_data,n_good),dtype=float)
A = A.reshape(A.size)
P = P.reshape(P.size)
likelihood = e_step(phn_feature_ind,
phn_rownnz,
phn_rowstartidx,
P,
weights,
A, phn_n_data, dim, n_good )
P,weights, A, loglikelihood = EM(phn_feature_ind, phn_rownnz, phn_rowstartidx,
phn_n_data,dim,n_good,args.tol, args.total_iter,
A)
A = A.reshape(phn_n_data,n_good)
P = P.reshape(n_good, dim)
component_counts = A.sum(0)
if n_init == 0:
bestP = P.copy()
bestweights = weights.copy()
best_ll = loglikelihood
n_use_components = n_good
elif loglikelihood > best_ll:
print "Updated best loglikelihood to : %g " % loglikelihood
bestP = P.copy()
bestweights = weights.copy()
best_ll = loglikelihood
n_use_components = n_good
n_init += 1
# add the components
avgs[classifier_id:classifier_id + n_use_components] = bestP[:]
all_weights[classifier_id:classifier_id + n_use_components] = bestweights[:]
meta[classifier_id:classifier_id+n_use_components,0] = phn_id
meta[classifier_id:classifier_id+n_use_components,1] = np.arange(n_use_components)
classifier_id += n_use_components
print "Total of %d models" % classifier_id
np.save('%s/avgs_%s' % (args.out_prefix, args.out_suffix),
avgs[:classifier_id])
np.save('%s/weights_%s' % (args.out_prefix, args.out_suffix),
weights[:classifier_id])
np.save('%s/meta_%s' % (args.out_prefix, args.out_suffix),
meta[:classifier_id])
# now we test the model to see what happens
avgs = avgs.reshape(avgs.shape[0],
dim)
W = np.zeros((len(avgs),dim+1))
W[:,:-1] = np.log(avgs) - np.log(1-avgs)
W[:,-1] = np.log(1-avgs).sum(-1)
W_meta = meta.astype(np.intc)
# need to construct W_meta39 to use 39 labels
W_meta39 = get_reduced_meta(W_meta,leehon_dict).astype(np.intc)
# now we get the use_phns39 row ids
weights = W.ravel().copy()
weights_classes = W_meta[:,0].copy()
weights_components = W_meta[:,1].copy()
sorted_component_ids = np.argsort(weights_components,kind='mergesort')
sorted_components = weights_components[sorted_component_ids]
sorted_weights_classes = weights_classes[sorted_component_ids]
stable_sorted_weights_classes_ids = np.argsort(sorted_weights_classes,kind='mergesort')
weights_classes = sorted_weights_classes[stable_sorted_weights_classes_ids]
weights_components = sorted_components[stable_sorted_weights_classes_ids]
W = W[sorted_component_ids][stable_sorted_weights_classes_ids]
n_classes = 48
print "n_classes=%d" % n_classes
accuracy = test_accuracy(W)
print "test accuracy = %g" % accuracy
| {
"content_hash": "b22a147935301860cd5ec9e354aabb24",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 236,
"avg_line_length": 40.61363636363637,
"alnum_prop": 0.6197537772803582,
"repo_name": "markstoehr/acoustic_cues",
"id": "07d7e24966c479b28ffdbd73bac4a205774c5368",
"size": "10722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "local/fast_48phone_EM.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "676583"
},
{
"name": "Python",
"bytes": "74916"
},
{
"name": "Shell",
"bytes": "2963"
}
],
"symlink_target": ""
} |
from .cfg_fast import CFGFast
from .cfg_accurate import CFGAccurate
from .cfg import CFG
from .cfb import CFBlanket
# things to make your life easier
from .cfg_arch_options import CFGArchOptions
from .cfg_utils import CFGUtils
from .cfg_node import CFGNode
| {
"content_hash": "cc25dd26feb633bdebe9effa226789d6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 44,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.8062015503875969,
"repo_name": "tyb0807/angr",
"id": "706dda88c2f9ad60cebb4abd009ded18bec9e874",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/analyses/cfg/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "39875"
},
{
"name": "Makefile",
"bytes": "610"
},
{
"name": "Python",
"bytes": "3884780"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
from keras.layers import Lambda, Dense, Concatenate
from keras.models import Model, Input
from .architectures import get_network_by_name
from .generic_layer_utils import slice_vector
logger = logging.getLogger(__name__)
class BaseDecoder(object):
def __init__(self, latent_dim, data_dim, network_architecture='synthetic', name='decoder'):
logger.info("Initialising {} model with {}-dimensional data "
"and {}-dimensional latent input.".format(name, data_dim, latent_dim))
self.name = '_'.join(name.lower().split())
self.data_dim = data_dim
self.latent_dim = latent_dim
self.network_architecture = network_architecture
self.data_input = Input(shape=(self.data_dim,), name='dec_ll_estimator_data_input')
self.latent_input = Input(shape=(self.latent_dim,), name='dec_latent_input')
def __call__(self, *args, **kwargs):
return None
class StandardDecoder(BaseDecoder):
"""
A StandardDecoder model has inputs comprising of a latent encoding given by an Encoder model, a prior sampler
or other custom input and the raw Encoder data input, which is needed to estimate the reconstructed
data log likelihood. It can be visualised as:
Data Latent
| |
| -----------
| | Decoder |
| -----------
| |
| Output
| probability ---> Generated data
| |
---> Log Likelihood ---> -(reconstruction loss)
Note that the reconstruction loss is not used when the model training ends. It serves only the purpose to
define a measure of loss which is optimised.
"""
def __init__(self, latent_dim, data_dim, network_architecture='synthetic', name=None):
"""
Args:
latent_dim: int, the flattened dimensionality of the latent space
data_dim: int, the flattened dimensionality of the output space (data space)
network_architecture: str, the architecture name for the body of the StandardDecoder model
name: str, identifier of the model
"""
super(StandardDecoder, self).__init__(latent_dim=latent_dim, data_dim=data_dim,
network_architecture=network_architecture,
name=name or 'Standard Decoder')
generator_body = get_network_by_name['decoder'][network_architecture](self.latent_input, name_prefix=self.name)
# NOTE: all decoder layers have names prefixed by `dec`.
# This is essential for the partial model freezing during training.
sampler_params = Dense(self.data_dim, activation='sigmoid',
name=self.name + 'dec_sampler_params')(generator_body)
# a probability clipping is necessary for the Bernoulli `log_prob` property produces NaNs in the border cases.
sampler_params = Lambda(lambda x: 1e-6 + (1 - 2e-6) * x,
name=self.name + 'dec_probs_clipper')(sampler_params)
def bernoulli_log_probs(args):
from tensorflow.contrib.distributions import Bernoulli
mu, x = args
log_px = Bernoulli(probs=mu, name=self.name + 'dec_bernoulli').log_prob(x)
return log_px
log_probs = Lambda(bernoulli_log_probs, name=self.name + 'dec_bernoulli_logprob')([sampler_params,
self.data_input])
self.generator = Model(inputs=self.latent_input, outputs=sampler_params, name=self.name + 'dec_sampling')
self.ll_estimator = Model(inputs=[self.data_input, self.latent_input], outputs=log_probs,
name=self.name + 'dec_trainable')
def __call__(self, *args, **kwargs):
"""
Make the StandardDecoder model callable on lists of Input layers or tensors.
Args:
*args: a list of input layers or tensors or numpy arrays, or a single input layer, tensor or numpy array.
Keyword Args:
is_learning: bool, whether the model is used for training or data generation. The output is either
the reconstruction log likelihood or the output probabilities in the data space respectively.
Returns:
A StandardDecoder model in `training` or `data generation` mode.
"""
is_learninig = kwargs.get('is_learning', True)
if is_learninig:
return self.ll_estimator(args[0])
else:
return self.generator(args[0])
class ConjointDecoder(object):
"""
A ConjointDecoder model takes multiple latent dimensions and from each generates a data output. There is also a
shared latent input which is used by all decoders:
Latent_1 -- Latent_2 -- Latent_shared <-- concatenated
/ \
Latent_1-Latent_shared Latent_2-Latent_shared
| |
------------- -------------
| Decoder_1 | | Decoder_2 |
------------- -------------
| |
Output_1 Output_2
For more details about the loss computation, see the StandardDecoder class.
"""
def __init__(self, latent_dims, data_dims, network_architecture='synthetic'):
"""
Args:
latent_dims: tuple, the flattened dimensions of the private latent spaces and the shared latent space
data_dims: int, the flattened dimensions of the output spaces (data spaces)
network_architecture: str, the architecture name for the body of all the decoders
"""
assert len(latent_dims) == len(data_dims) + 1, \
"Expected too receive {} private latent spaces and one shared for {} data inputs " \
"but got {} instead.".format(len(data_dims) + 1, len(data_dims), len(latent_dims))
name = "Conjoint Decoder"
logger.info("Initialising {} model with {}-dimensional outputs "
"and {}-dimensional latent inputs.".format(name, data_dims, latent_dims))
def bernoulli_log_probs(args):
from tensorflow.contrib.distributions import Bernoulli
mu, x = args
log_px = Bernoulli(probs=mu, name='dec_bernoulli').log_prob(x)
return log_px
data_inputs = [Input(shape=(d,), name='dec_data_input_{}'.format(i)) for i, d in enumerate(data_dims)]
latent_input = Input(shape=(sum(latent_dims),), name='dec_latent_input')
shared_latent_factors = Lambda(slice_vector, arguments={'start': -latent_dims[-1], 'stop': None},
name='dec_slice_shared_lat')(latent_input)
sampling_outputs, log_probs = [], []
stop_id = 0
for i in range(len(data_dims)):
start_id = stop_id
stop_id += latent_dims[i]
latent_i = Lambda(slice_vector, arguments={'start': start_id, 'stop': stop_id},
name='dec_slice_{}'.format(i))(latent_input)
latent_i = Concatenate(axis=-1, name='dec_merged_latent_{}'.format(i))([latent_i, shared_latent_factors])
generator_body = get_network_by_name['conjoint_decoder'][network_architecture](latent_i,
'dec_conj_{}'.format(i))
sampler_params = Dense(data_dims[i], activation='sigmoid',
name='dec_sampler_params_{}'.format(i))(generator_body)
# probability clipping is necessary for the Bernoulli `log_prob` property produces NaNs in the limit cases
sampler_params = Lambda(lambda x: 1e-6 + (1 - 2e-6) * x,
name='dec_probs_clipper_{}'.format(i))(sampler_params)
log_prob = Lambda(bernoulli_log_probs,
name='dec_bernoulli_logprob_{}'.format(i))([sampler_params, data_inputs[i]])
sampling_outputs.append(sampler_params)
log_probs.append(log_prob)
log_probs = Concatenate(axis=-1, name='dec_conc_log_probs')(log_probs)
self.generator = Model(inputs=latent_input, outputs=sampling_outputs, name='dec_sampling')
self.ll_estimator = Model(inputs=data_inputs + [latent_input], outputs=log_probs, name='dec_trainable')
def __call__(self, *args, **kwargs):
"""
Make the StandardDecoder model callable on lists of Input layers or tensors.
Args:
*args: a list of input layers or tensors or numpy arrays, or a single input layer, tensor or numpy array.
Keyword Args:
is_learning: bool, whether the model is used for training or data generation. The output is either
the reconstruction log likelihood or the output probabilities in the data space respectively.
Returns:
A StandardDecoder model in `training` or `data generation` mode.
"""
is_learninig = kwargs.get('is_learning', True)
if is_learninig:
return self.ll_estimator(args[0])
else:
return self.generator(args[0])
| {
"content_hash": "bade64b3970268fd8fcf75ebd211fd72",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 119,
"avg_line_length": 49.036458333333336,
"alnum_prop": 0.582793414763675,
"repo_name": "gdikov/vae-playground",
"id": "99e36724d0a14259cfc7782e26c6b7244b764732",
"size": "9415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "playground/models/networks/decoder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "212774"
}
],
"symlink_target": ""
} |
"""One-line documentation for from_to_tests module.
A detailed description of from_to_tests.
"""
__author__ = 'scottakirkwood@gmail.com (Scott Kirkwood)'
import sys
sys.path.append('..')
import mm2notes
import unittest
import codecs
import glob
import difflib
class FromToTests(unittest.TestCase):
def RunOne(self, mm_file, html_file):
mm2n = mm2notes.Mm2Notes()
lines = mm2n.open(mm_file)
test_name = 'last_output.html'
outfile = codecs.open(test_name, 'w', 'utf-8')
mm2n.set_order_by_time(False)
mm2n.write(outfile, lines)
assert self.CompareFiles(html_file, test_name)
def CompareFiles(self, good_file, test_file):
diff = difflib.Differ()
good_lines = open(good_file).read().splitlines()
test_lines = open(test_file).read().splitlines()
diffs = diff.compare(good_lines, test_lines)
differs = False
for line in diffs:
if not line.startswith(' '):
if not differs:
differs = True
print 'Files %s and %s differ' % (good_file, test_file)
print line
return differs
def runTest(self):
for fname in glob.glob('*.mm'):
fnamehtml = fname.replace('.mm', '.html')
self.RunOne(fname, fnamehtml)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "19deea6fb8426f0ed66ee1c701616706",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 65,
"avg_line_length": 26.354166666666668,
"alnum_prop": 0.6490118577075099,
"repo_name": "scottkirkwood/mm2notes",
"id": "49685cfa37e0531a105cdc0dcfaf283a4304a90a",
"size": "1290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mm2notes/tests/from_to_tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "15741"
},
{
"name": "Python",
"bytes": "26778"
},
{
"name": "Shell",
"bytes": "260"
}
],
"symlink_target": ""
} |
"""
WSGI config for ligo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ligo.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "27523457a924df4e3341c9f90e282509",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.357142857142858,
"alnum_prop": 0.7702349869451697,
"repo_name": "hikaMaeng/bsShortURL",
"id": "39b106373b244342f5acd0d26dd5b0fcf3e1cbed",
"size": "383",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "django/ligo/wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "67260"
},
{
"name": "Python",
"bytes": "15898"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import tempfile
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from ..management.commands.importusers import create_user
from ..management.commands.importusers import infer_and_create_class
from ..management.commands.importusers import infer_facility
from ..management.commands.importusers import validate_username
from ..models import Classroom
from ..models import FacilityUser
from .helpers import setup_device
class UserImportTestCase(TestCase):
"""
Tests for functions used in userimport command.
"""
def setUp(self):
self.facility, self.superuser = setup_device()
def test_validate_username_no_username(self):
with self.assertRaises(CommandError):
validate_username({})
def test_validate_username_none_username(self):
with self.assertRaises(CommandError):
validate_username({'username': None})
def test_infer_facility_not_specified(self):
default = {}
user = {}
self.assertEqual(infer_facility(user, default), default)
def test_infer_facility_none(self):
default = {}
user = {'facility': None}
self.assertEqual(infer_facility(user, default), default)
def test_infer_facility_by_id(self):
default = {}
user = {'facility': self.facility.id}
self.assertEqual(infer_facility(user, default), self.facility)
def test_infer_facility_by_name(self):
default = {}
user = {'facility': self.facility.name}
self.assertEqual(infer_facility(user, default), self.facility)
def test_infer_facility_fail(self):
default = {}
user = {'facility': 'garbage'}
with self.assertRaises(CommandError):
infer_facility(user, default)
def test_infer_class_no_class_no_effect(self):
user = {}
infer_and_create_class(user, self.facility)
self.assertEqual(Classroom.objects.count(), 0)
def test_infer_class_falsy_class_no_effect(self):
user = {'class': ''}
infer_and_create_class(user, self.facility)
self.assertEqual(Classroom.objects.count(), 0)
def test_infer_class_by_id(self):
classroom = Classroom.objects.create(name='testclass', parent=self.facility)
user = {'class': classroom.id}
self.assertEqual(infer_and_create_class(user, self.facility), classroom)
def test_infer_class_by_name(self):
classroom = Classroom.objects.create(name='testclass', parent=self.facility)
user = {'class': classroom.name}
self.assertEqual(infer_and_create_class(user, self.facility), classroom)
def test_infer_class_create(self):
user = {'class': 'testclass'}
self.assertEqual(infer_and_create_class(user, self.facility), Classroom.objects.get(name='testclass'))
def test_create_user_test_header_row(self):
user = {
'class': 'class',
'facility': 'facility',
'username': 'username',
}
self.assertFalse(create_user(0, user))
def test_create_user_exists(self):
user = {
'username': self.superuser.username,
}
self.assertFalse(create_user(1, user, default_facility=self.facility))
def test_create_user_exists_add_classroom(self):
user = {
'username': self.superuser.username,
'class': 'testclass',
}
create_user(1, user, default_facility=self.facility)
self.assertTrue(self.superuser.is_member_of(Classroom.objects.get(name='testclass')))
def test_create_user_not_exist(self):
user = {
'username': 'testuser',
}
self.assertTrue(create_user(1, user, default_facility=self.facility))
def test_create_user_not_exist_add_classroom(self):
user = {
'username': 'testuser',
'class': 'testclass',
}
create_user(1, user, default_facility=self.facility)
self.assertTrue(FacilityUser.objects.get(username='testuser').is_member_of(Classroom.objects.get(name='testclass')))
def test_create_user_not_exist_bad_username(self):
user = {
'username': 'test$user',
}
self.assertFalse(create_user(1, user, default_facility=self.facility))
class UserImportCommandTestCase(TestCase):
"""
Tests for userimport command.
"""
def setUp(self):
self.csvfile, self.csvpath = tempfile.mkstemp(suffix='csv')
def test_device_not_setup(self):
with self.assertRaisesRegexp(CommandError, 'No default facility exists'):
call_command('importusers', self.csvpath)
def test_setup_headers_no_username(self):
setup_device()
with open(self.csvpath, 'w') as f:
f.write('class,facility')
with self.assertRaisesRegexp(CommandError, 'No usernames specified'):
call_command('importusers', self.csvpath)
def test_setup_headers_invalid_header(self):
setup_device()
with open(self.csvpath, 'w') as f:
f.write('class,username,dogfood')
with self.assertRaisesRegexp(CommandError, 'Mix of valid and invalid header'):
call_command('importusers', self.csvpath)
def test_setup_headers_make_user(self):
setup_device()
with open(self.csvpath, 'w') as f:
f.write('username\n')
f.write('testuser')
call_command('importusers', self.csvpath)
self.assertTrue(FacilityUser.objects.filter(username='testuser').exists())
def test_setup_no_headers_make_user(self):
setup_device()
with open(self.csvpath, 'w') as f:
f.write('Test User,testuser')
call_command('importusers', self.csvpath)
self.assertTrue(FacilityUser.objects.filter(username='testuser').exists())
def test_setup_no_headers_bad_user_good_user(self):
setup_device()
with open(self.csvpath, 'w') as f:
f.write('Test User,testuser\nOther User,te$tuser')
call_command('importusers', self.csvpath)
self.assertTrue(FacilityUser.objects.filter(username='testuser').exists())
self.assertFalse(FacilityUser.objects.filter(username='te$tuser').exists())
| {
"content_hash": "54061ffd341dde5f10d7b04a3d8cbde9",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 124,
"avg_line_length": 36.42045454545455,
"alnum_prop": 0.646801872074883,
"repo_name": "DXCanas/kolibri",
"id": "bc5bc258df28ea08cf5caf36208645b421af2df2",
"size": "6410",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/core/auth/test/test_user_import.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "864"
},
{
"name": "CSS",
"bytes": "32872"
},
{
"name": "Dockerfile",
"bytes": "4332"
},
{
"name": "Gherkin",
"bytes": "115979"
},
{
"name": "HTML",
"bytes": "14251"
},
{
"name": "JavaScript",
"bytes": "890295"
},
{
"name": "Makefile",
"bytes": "9885"
},
{
"name": "Python",
"bytes": "1363204"
},
{
"name": "Shell",
"bytes": "10407"
},
{
"name": "Vue",
"bytes": "944905"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
settings = {
'name': 'ipaddress',
'version': '1.0.6',
'description': 'IPv4/IPv6 manipulation library',
'long_description': 'Port of the 3.3+ ipaddress module to 2.6 and 2.7',
'author': 'Philipp Hagemeister',
'author_email': 'phihag@phihag.de',
'url': 'https://github.com/phihag/ipaddress',
'license': 'Python Software Foundation License',
'classifiers': (
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Python Software Foundation License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3'
),
'py_modules': ['ipaddress']
}
setup(**settings)
| {
"content_hash": "f2dc0dc495a9c171fa8c42490fdc70a5",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 33.785714285714285,
"alnum_prop": 0.6236786469344608,
"repo_name": "NikolayDachev/jadm",
"id": "af9268cb86db2da39e2c2a526b71c4bc9b8fba9c",
"size": "993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/ipaddress-1.0.6/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "71037"
},
{
"name": "Groff",
"bytes": "10854"
},
{
"name": "HTML",
"bytes": "698709"
},
{
"name": "Makefile",
"bytes": "137"
},
{
"name": "Python",
"bytes": "1125171"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
import json
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.datastructures import SortedDict
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from corehq.apps.domain.models import Domain
from dimagi.utils.couch.cache import cache_core
from dimagi.utils.logging import notify_exception
from dimagi.utils.web import json_handler
import corehq.apps.style.utils as style_utils
register = template.Library()
@register.filter
def JSON(obj):
return mark_safe(json.dumps(obj, default=json_handler))
@register.filter
def to_javascript_string(obj):
# seriously: http://stackoverflow.com/a/1068548/8207
return mark_safe(JSON(obj).replace('</script>', '<" + "/script>'))
@register.filter
def BOOL(obj):
try:
obj = obj.to_json()
except AttributeError:
pass
return 'true' if obj else 'false'
@register.filter
def dict_lookup(dict, key):
'''Get an item from a dictionary.'''
return dict.get(key)
@register.filter
def array_lookup(array, index):
'''Get an item from an array.'''
if index < len(array):
return array[index]
@register.simple_tag
def dict_as_query_string(dict, prefix=""):
'''Convert a dictionary to a query string, minus the initial ?'''
return "&".join(["%s%s=%s" % (prefix, key, value) for key, value in dict.items()])
@register.filter
def add_days(date, days=1):
'''Return a date with some days added'''
span = timedelta(days=days)
try:
return date + span
except:
return datetime.strptime(date,'%m/%d/%Y').date() + span
@register.filter
def concat(str1, str2):
"""Concatenate two strings"""
return "%s%s" % (str1, str2)
try:
from resource_versions import resource_versions
except (ImportError, SyntaxError):
resource_versions = {}
@register.simple_tag
def static(url):
resource_url = url
version = resource_versions.get(resource_url)
url = settings.STATIC_URL + url
is_less = url.endswith('.less')
if version and not is_less:
url += "?version=%s" % version
return url
@register.simple_tag
def cachebuster(url):
return resource_versions.get(url, "")
@register.simple_tag
def new_static(url, **kwargs):
"""Caching must explicitly be defined on tags with any of the extensions
that could be compressed by django compressor. The static tag above will
eventually turn into this tag.
:param url:
:param kwargs:
:return:
"""
can_be_compressed = url.endswith(('.less', '.css', '.js'))
use_cache = kwargs.pop('cache', False)
use_versions = not can_be_compressed or use_cache
resource_url = url
url = settings.STATIC_URL + url
if use_versions:
version = resource_versions.get(resource_url)
if version:
url += "?version=%s" % version
return url
@register.simple_tag
def domains_for_user(request, selected_domain=None):
"""
Generate pulldown menu for domains.
Cache the entire string alongside the couch_user's doc_id that can get invalidated when
the user doc updates via save.
"""
domain_list = []
if selected_domain != 'public':
cached_domains = cache_core.get_cached_prop(request.couch_user.get_id, 'domain_list')
if cached_domains:
domain_list = [Domain.wrap(x) for x in cached_domains]
else:
try:
domain_list = Domain.active_for_user(request.couch_user)
cache_core.cache_doc_prop(request.couch_user.get_id, 'domain_list', [x.to_json() for x in domain_list])
except Exception:
if settings.DEBUG:
raise
else:
domain_list = Domain.active_for_user(request.user)
notify_exception(request)
domain_list = [dict(
url=reverse('domain_homepage', args=[d.name]),
name=d.long_display_name()
) for d in domain_list]
context = {
'is_public': selected_domain == 'public',
'domain_list': domain_list,
'current_domain': selected_domain,
}
template = {
style_utils.BOOTSTRAP_2: 'hqwebapp/partials/domain_list_dropdown.html',
style_utils.BOOTSTRAP_3: 'style/includes/domain_list_dropdown.html',
}[style_utils.bootstrap_version(request)]
return mark_safe(render_to_string(template, context))
@register.simple_tag
def list_my_orgs(request):
org_list = request.couch_user.get_organizations()
lst = list()
lst.append('<ul class="nav nav-pills nav-stacked">')
for org in org_list:
default_url = reverse("orgs_landing", args=[org.name])
lst.append('<li><a href="%s">%s</a></li>' % (default_url, org.title))
lst.append('</ul>')
return "".join(lst)
@register.simple_tag
def commcare_user():
return _(settings.COMMCARE_USER_TERM)
@register.simple_tag
def hq_web_user():
return _(settings.WEB_USER_TERM)
@register.filter
def mod(value, arg):
return value % arg
# This is taken from https://code.djangoproject.com/ticket/15583
@register.filter(name='sort')
def listsort(value):
if isinstance(value, dict):
new_dict = SortedDict()
key_list = value.keys()
key_list.sort()
for key in key_list:
new_dict[key] = value[key]
return new_dict
elif isinstance(value, list):
new_list = list(value)
new_list.sort()
return new_list
else:
return value
listsort.is_safe = True
@register.filter(name='getattr')
def get_attribute(obj, arg):
""" Get attribute from obj
Usage: {{ couch_user|getattr:"full_name" }}
"""
return getattr(obj, arg, None)
@register.filter
def pretty_doc_info(doc_info):
return render_to_string('hqwebapp/pretty_doc_info.html', {
'doc_info': doc_info,
})
@register.filter
def toggle_enabled(request, toggle_name):
import corehq.toggles
toggle = getattr(corehq.toggles, toggle_name)
return (
(hasattr(request, 'user') and toggle.enabled(request.user.username)) or
(hasattr(request, 'domain') and toggle.enabled(request.domain))
)
| {
"content_hash": "1bd0125ef0c3ef67aceed24beb4456dd",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 119,
"avg_line_length": 27.633187772925766,
"alnum_prop": 0.6494943109987358,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "50314180fd2de97e50da6c0359234aee42818999",
"size": "6328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/hqwebapp/templatetags/hq_shared_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
} |
import sys, re, os, shutil, bisect, textwrap, shlex
import svntest
from svntest import main, actions, tree
from svntest import Failure
if sys.version_info[0] >= 3:
# Python >=3.0
from io import StringIO
else:
# Python <3.0
from cStringIO import StringIO
def make(wc_dir, commands, prev_status=None, prev_disk=None, verbose=True):
"""The Factory Invocation Function. This is typically the only one
called from outside this file. See top comment in factory.py.
Prints the resulting py script to stdout when verbose is True and
returns the resulting line-list containing items as:
[ ['pseudo-shell input line #1', ' translation\n to\n py #1'], ...]"""
fac = TestFactory(wc_dir, prev_status, prev_disk)
fac.make(commands)
fac.print_script()
return fac.lines
class TestFactory:
"""This class keeps all state around a factory.make() call."""
def __init__(self, sbox, prev_status=None, prev_disk=None):
self.sbox = sbox
# The input lines and their translations.
# Each translation usually has multiple output lines ('\n' characters).
self.lines = [] # [ ['in1', 'out1'], ['in2', 'out'], ...
# Any expected_status still there from a previous verification
self.prev_status = None
if prev_status:
self.prev_status = [None, prev_status] # svntest.wc.State
# Any expected_disk still there from a previous verification
self.prev_disk = None
if prev_disk:
reparented_prev_disk = svntest.wc.State(prev_disk.wc_dir, {});
reparented_prev_disk.add_state(sbox.wc_dir, prev_disk);
self.prev_disk = [None, reparented_prev_disk]
# Those command line options that expect an argument following
# which is not a path. (don't expand args following these)
self.keep_args_of = ['--depth', '--encoding', '-r',
'--changelist', '-m', '--message']
# A stack of $PWDs, to be able to chdir back after a chdir.
self.prevdirs = []
# The python variables we want to be declared at the beginning.
# These are path variables like "A_D = os.path.join(wc_dir, 'A', 'D')".
# The original wc_dir and url vars are not kept here.
self.vars = {}
# An optimized list kept up-to-date by variable additions
self.sorted_vars_by_pathlen = []
# Wether we ever used the variables 'wc_dir' and 'url' (tiny tweak)
self.used_wc_dir = False
self.used_url = False
# The alternate working copy directories created that need to be
# registered with sbox (are not inside another working copy).
self.other_wc_dirs = {}
def make(self, commands):
"internal main function, delegates everything except final output."
# keep a spacer for init
self.add_line(None, None)
init = ""
if not self.sbox.is_built():
self.sbox.build()
init += "sbox.build()\n"
try:
# split input args
input_lines = commands.replace(';','\n').splitlines()
for str in input_lines:
if len(str.strip()) > 0:
self.add_line(str)
for i in range(len(self.lines)):
if self.lines[i][0] is not None:
# This is where everything happens:
self.lines[i][1] = self.switch(self.lines[i][0])
# We're done. Add a final greeting.
self.add_line(
None,
"Remember, this only saves you typing. Doublecheck everything.")
# -- Insert variable defs in the first line --
# main wc_dir and url
if self.used_wc_dir:
init += 'wc_dir = sbox.wc_dir\n'
if self.used_url:
init += 'url = sbox.repo_url\n'
# registration of new WC dirs
sorted_names = self.get_sorted_other_wc_dir_names()
for name in sorted_names:
init += name + ' = ' + self.other_wc_dirs[name][0] + '\n'
if len(init) > 0:
init += '\n'
# general variable definitions
sorted_names = self.get_sorted_var_names()
for name in sorted_names:
init += name + ' = ' + self.vars[name][0] + '\n'
# Insert at the first line, being the spacer from above
if len(init) > 0:
self.lines[0][1] = init
# This usually goes to make() below (outside this class)
return self.lines
except:
for line in self.lines:
if line[1] is not None:
print(line[1])
raise
def print_script(self, stream=sys.stdout):
"Output the resulting script of the preceding make() call"
if self.lines is not None:
for line in self.lines:
if line[1] is None:
# fall back to just that line as it was in the source
stripped = line[0].strip()
if not stripped.startswith('#'):
# for comments, don't say this:
stream.write(" # don't know how to handle:\n")
stream.write(" " + line[0].strip() + '\n')
else:
if line[0] is not None:
stream.write( wrap_each_line(line[0].strip(),
" # ", " # ", True) + '\n')
stream.write(wrap_each_line(line[1], " ", " ", False) + '\n\n')
else:
stream.write(" # empty.\n")
stream.flush()
# End of public functions.
# "Shell" command handlers:
def switch(self, line):
"Given one input line, delegates to the appropriate sub-functions."
args = shlex.split(line)
if len(args) < 1:
return ""
first = args[0]
# This is just an if-cascade. Feel free to change that.
if first == 'svn':
second = args[1]
if second == 'add':
return self.cmd_svn(args[1:], False, self.keep_args_of)
if second in ['changelist', 'cl']:
keep_count = 2
if '--remove' in args:
keep_count = 1
return self.cmd_svn(args[1:], False, self.keep_args_of, keep_count)
if second in ['status','stat','st']:
return self.cmd_svn_status(args[2:])
if second in ['commit','ci']:
return self.cmd_svn_commit(args[2:])
if second in ['update','up']:
return self.cmd_svn_update(args[2:])
if second in ['switch','sw']:
return self.cmd_svn_switch(args[2:])
if second in ['copy', 'cp',
'move', 'mv', 'rename', 'ren']:
return self.cmd_svn_copy_move(args[1:])
if second in ['checkout', 'co']:
return self.cmd_svn_checkout(args[2:])
if second in ['propset','pset','ps']:
return self.cmd_svn(args[1:], False,
self.keep_args_of, 3)
if second in ['delete','del','remove', 'rm']:
return self.cmd_svn(args[1:], False,
self.keep_args_of + ['--with-revprop'])
# NOTE that not all commands need to be listed here, since
# some are already adequately handled by self.cmd_svn().
# If you find yours is not, add another self.cmd_svn_xxx().
return self.cmd_svn(args[1:], False, self.keep_args_of)
if first == 'echo':
return self.cmd_echo(args[1:])
if first == 'mkdir':
return self.cmd_mkdir(args[1:])
if first == 'rm':
return self.cmd_rm(args[1:])
if first == 'mv':
return self.cmd_mv(args[1:])
if first == 'cp':
return self.cmd_cp(args[1:])
# if all fails, take the line verbatim
return None
def cmd_svn_standard_run(self, pyargs, runargs, do_chdir, wc):
"The generic invocation of svn, helper function."
pychdir = self.chdir(do_chdir, wc)
code, out, err = main.run_svn("Maybe", *runargs)
if code == 0 and len(err) < 1:
# write a test that expects success
pylist = self.strlist2py(out)
if len(out) <= 1:
py = "expected_stdout = " + pylist + "\n\n"
else:
py = "expected_stdout = verify.UnorderedOutput(" + pylist + ")\n\n"
py += pychdir
py += "actions.run_and_verify_svn2('OUTPUT', expected_stdout, [], 0"
else:
# write a test that expects failure
pylist = self.strlist2py(err)
if len(err) <= 1:
py = "expected_stderr = " + pylist + "\n\n"
else:
py = "expected_stderr = verify.UnorderedOutput(" + pylist + ")\n\n"
py += pychdir
py += ("actions.run_and_verify_svn2('OUTPUT', " +
"[], expected_stderr, " + str(code))
if len(pyargs) > 0:
py += ", " + ", ".join(pyargs)
py += ")\n"
py += self.chdir_back(do_chdir)
return py
def cmd_svn(self, svnargs, append_wc_dir_if_missing = False,
keep_args_of = [], keep_first_count = 1,
drop_with_arg = []):
"Handles all svn calls not handled by more specific functions."
pyargs, runargs, do_chdir, targets = self.args2svntest(svnargs,
append_wc_dir_if_missing, keep_args_of,
keep_first_count, drop_with_arg)
return self.cmd_svn_standard_run(pyargs, runargs, do_chdir,
self.get_first_wc(targets))
def cmd_svn_status(self, status_args):
"Runs svn status, looks what happened and writes the script for it."
pyargs, runargs, do_chdir, targets = self.args2svntest(
status_args, True, self.keep_args_of, 0)
py = ""
for target in targets:
if not target.wc:
py += '# SKIPPING NON-WC ' + target.runarg + '\n'
continue
if '-q' in status_args:
pystatus = self.get_current_status(target.wc, True)
py += (pystatus +
"actions.run_and_verify_status(" + target.wc.py +
", expected_status)\n")
else:
pystatus = self.get_current_status(target.wc, False)
py += (pystatus +
"actions.run_and_verify_unquiet_status(" + target.wc.py +
", expected_status)\n")
return py
def cmd_svn_commit(self, commit_args):
"Runs svn commit, looks what happened and writes the script for it."
# these are the options that are followed by something that should not
# be parsed as a filename in the WC.
commit_arg_opts = [
"--depth",
"--with-revprop",
"--changelist",
# "-F", "--file", these take a file argument, don't list here.
# "-m", "--message", treated separately
]
pyargs, runargs, do_chdir, targets = self.args2svntest(
commit_args, True, commit_arg_opts, 0, ['-m', '--message'])
wc = self.get_first_wc(targets)
pychdir = self.chdir(do_chdir, wc)
code, output, err = main.run_svn("Maybe", 'ci',
'-m', 'log msg',
*runargs)
if code == 0 and len(err) < 1:
# write a test that expects success
output = actions.process_output_for_commit(output)
actual_out = tree.build_tree_from_commit(output)
py = ("expected_output = " +
self.tree2py(actual_out, wc) + "\n\n")
pystatus = self.get_current_status(wc)
py += pystatus
py += pychdir
py += ("actions.run_and_verify_commit(" + wc.py + ", " +
"expected_output, expected_status, " +
"None")
else:
# write a test that expects error
py = "expected_error = " + self.strlist2py(err) + "\n\n"
py += pychdir
py += ("actions.run_and_verify_commit(" + wc.py + ", " +
"None, None, expected_error")
if len(pyargs) > 0:
py += ', ' + ', '.join(pyargs)
py += ")"
py += self.chdir_back(do_chdir)
return py
def cmd_svn_update(self, update_args):
"Runs svn update, looks what happened and writes the script for it."
pyargs, runargs, do_chdir, targets = self.args2svntest(
update_args, True, self.keep_args_of, 0)
wc = self.get_first_wc(targets)
pychdir = self.chdir(do_chdir, wc)
code, output, err = main.run_svn('Maybe', 'up', *runargs)
if code == 0 and len(err) < 1:
# write a test that expects success
actual_out = svntest.wc.State.from_checkout(output).old_tree()
py = ("expected_output = " +
self.tree2py(actual_out, wc) + "\n\n")
pydisk = self.get_current_disk(wc)
py += pydisk
pystatus = self.get_current_status(wc)
py += pystatus
py += pychdir
py += ("actions.run_and_verify_update(" + wc.py + ", " +
"expected_output, expected_disk, expected_status, " +
"None, None, None, None, None, False")
else:
# write a test that expects error
py = "expected_error = " + self.strlist2py(err) + "\n\n"
py += pychdir
py += ("actions.run_and_verify_update(" + wc.py + ", None, None, " +
"None, expected_error, None, None, None, None, False")
if len(pyargs) > 0:
py += ', ' + ', '.join(pyargs)
py += ")"
py += self.chdir_back(do_chdir)
return py
def cmd_svn_switch(self, switch_args):
"Runs svn switch, looks what happened and writes the script for it."
pyargs, runargs, do_chdir, targets = self.args2svntest(
switch_args, True, self.keep_args_of, 0)
# Sort out the targets. We need one URL and one wc node, in that order.
if len(targets) < 2:
raise Failure("Sorry, I'm currently enforcing two targets for svn " +
"switch. If you want to supply less, remove this " +
"check and implement whatever seems appropriate.")
wc_arg = targets[1]
del pyargs[wc_arg.argnr]
del runargs[wc_arg.argnr]
url_arg = targets[0]
del pyargs[url_arg.argnr]
del runargs[url_arg.argnr]
wc = wc_arg.wc
if not wc:
raise Failure("Unexpected argument ordering to factory's 'svn switch'?")
pychdir = self.chdir(do_chdir, wc)
#if '--force' in runargs:
# self.really_safe_rmtree(wc_arg.runarg)
code, output, err = main.run_svn('Maybe', 'sw',
url_arg.runarg, wc_arg.runarg,
*runargs)
py = ""
if code == 0 and len(err) < 1:
# write a test that expects success
actual_out = tree.build_tree_from_checkout(output)
py = ("expected_output = " +
self.tree2py(actual_out, wc) + "\n\n")
pydisk = self.get_current_disk(wc)
py += pydisk
pystatus = self.get_current_status(wc)
py += pystatus
py += pychdir
py += ("actions.run_and_verify_switch(" + wc.py + ", " +
wc_arg.pyarg + ", " + url_arg.pyarg + ", " +
"expected_output, expected_disk, expected_status, " +
"None, None, None, None, None, False")
else:
# write a test that expects error
py = "expected_error = " + self.strlist2py(err) + "\n\n"
py += pychdir
py += ("actions.run_and_verify_switch(" + wc.py + ", " +
wc_arg.pyarg + ", " + url_arg.pyarg + ", " +
"None, None, None, expected_error, None, None, None, None, False")
if len(pyargs) > 0:
py += ', ' + ', '.join(pyargs)
py += ")"
py += self.chdir_back(do_chdir)
return py
def cmd_svn_checkout(self, checkout_args):
"Runs svn checkout, looks what happened and writes the script for it."
pyargs, runargs, do_chdir, targets = self.args2svntest(
checkout_args, True, self.keep_args_of, 0)
# Sort out the targets. We need one URL and one dir, in that order.
if len(targets) < 2:
raise Failure("Sorry, I'm currently enforcing two targets for svn " +
"checkout. If you want to supply less, remove this " +
"check and implement whatever seems appropriate.")
# We need this separate for the call to run_and_verify_checkout()
# that's composed in the output script.
wc_arg = targets[1]
del pyargs[wc_arg.argnr]
del runargs[wc_arg.argnr]
url_arg = targets[0]
del pyargs[url_arg.argnr]
del runargs[url_arg.argnr]
wc = wc_arg.wc
pychdir = self.chdir(do_chdir, wc)
#if '--force' in runargs:
# self.really_safe_rmtree(wc_arg.runarg)
code, output, err = main.run_svn('Maybe', 'co',
url_arg.runarg, wc_arg.runarg,
*runargs)
py = ""
if code == 0 and len(err) < 1:
# write a test that expects success
actual_out = tree.build_tree_from_checkout(output)
pyout = ("expected_output = " +
self.tree2py(actual_out, wc) + "\n\n")
py += pyout
pydisk = self.get_current_disk(wc)
py += pydisk
py += pychdir
py += ("actions.run_and_verify_checkout(" +
url_arg.pyarg + ", " + wc_arg.pyarg +
", expected_output, expected_disk, None, None, None, None")
else:
# write a test that expects failure
pylist = self.strlist2py(err)
if len(err) <= 1:
py += "expected_stderr = " + pylist + "\n\n"
else:
py += "expected_stderr = verify.UnorderedOutput(" + pylist + ")\n\n"
py += pychdir
py += ("actions.run_and_verify_svn2('OUTPUT', " +
"[], expected_stderr, " + str(code) +
", " + url_arg.pyarg + ", " + wc_arg.pyarg)
# Append the remaining args
if len(pyargs) > 0:
py += ', ' + ', '.join(pyargs)
py += ")"
py += self.chdir_back(do_chdir)
return py
def cmd_svn_copy_move(self, args):
"Runs svn copy or move, looks what happened and writes the script for it."
pyargs, runargs, do_chdir, targets = self.args2svntest(args,
False, self.keep_args_of, 1)
if len(targets) == 2 and targets[1].is_url:
# The second argument is a URL.
# This needs a log message. Is one supplied?
has_message = False
for arg in runargs:
if arg.startswith('-m') or arg == '--message':
has_message = True
break
if not has_message:
# add one
runargs += [ '-m', 'copy log' ]
pyargs = []
for arg in runargs:
pyargs += [ self.str2svntest(arg) ]
return self.cmd_svn_standard_run(pyargs, runargs, do_chdir,
self.get_first_wc(targets))
def cmd_echo(self, echo_args):
"Writes a string to a file and writes the script for it."
# split off target
target_arg = None
replace = True
contents = None
for i in range(len(echo_args)):
arg = echo_args[i]
if arg.startswith('>'):
if len(arg) > 1:
if arg[1] == '>':
# it's a '>>'
replace = False
arg = arg[2:]
else:
arg = arg[1:]
if len(arg) > 0:
target_arg = arg
if target_arg is None:
# we need an index (i+1) to exist, and
# we need (i+1) to be the only existing index left in the list.
if i+1 != len(echo_args)-1:
raise Failure("don't understand: echo " + " ".join(echo_args))
target_arg = echo_args[i+1]
else:
# already got the target. no more indexes should exist.
if i != len(echo_args)-1:
raise Failure("don't understand: echo " + " ".join(echo_args))
contents = " ".join(echo_args[:i]) + '\n'
if target_arg is None:
raise Failure("echo needs a '>' pipe to a file name: echo " +
" ".join(echo_args))
target = self.path2svntest(target_arg)
if replace:
main.file_write(target.runarg, contents)
py = "main.file_write("
else:
main.file_append(target.runarg, contents)
py = "main.file_append("
py += target.pyarg + ", " + self.str2svntest(contents) + ")"
return py
def cmd_mkdir(self, mkdir_args):
"Makes a new directory and writes the script for it."
# treat all mkdirs as -p, ignore all -options.
out = ""
for arg in mkdir_args:
if not arg.startswith('-'):
target = self.path2svntest(arg)
# don't check for not being a url,
# maybe it's desired by the test or something.
os.makedirs(target.runarg)
out += "os.makedirs(" + target.pyarg + ")\n"
return out
def cmd_rm(self, rm_args):
"Removes a directory tree and writes the script for it."
# treat all removes as -rf, ignore all -options.
out = ""
for arg in rm_args:
if not arg.startswith('-'):
target = self.path2svntest(arg)
if os.path.isfile(target.runarg):
os.remove(target.runarg)
out += "os.remove(" + target.pyarg + ")\n"
else:
self.really_safe_rmtree(target.runarg)
out += "main.safe_rmtree(" + target.pyarg + ")\n"
return out
def cmd_mv(self, mv_args):
"Moves things in the filesystem and writes the script for it."
# ignore all -options.
out = ""
sources = []
target = None
for arg in mv_args:
if not arg.startswith('-'):
if target is not None:
sources += [target]
target = self.path2svntest(arg)
out = ""
for source in sources:
out += "shutil.move(" + source.pyarg + ", " + target.pyarg + ")\n"
shutil.move(source.runarg, target.runarg)
return out
def cmd_cp(self, mv_args):
"Copies in the filesystem and writes the script for it."
# ignore all -options.
out = ""
sources = []
target = None
for arg in mv_args:
if not arg.startswith('-'):
if target is not None:
sources += [target]
target = self.path2svntest(arg)
if not target:
raise Failure("cp needs a source and a target 'cp wc_dir wc_dir_2'")
out = ""
for source in sources:
if os.path.exists(target.runarg):
raise Failure("cp target exists, remove first: " + target.pyarg)
if os.path.isdir(source.runarg):
shutil.copytree(source.runarg, target.runarg)
out += "shutil.copytree(" + source.pyarg + ", " + target.pyarg + ")\n"
elif os.path.isfile(source.runarg):
shutil.copy2(source.runarg, target.runarg)
out += "shutil.copy2(" + source.pyarg + ", " + target.pyarg + ")\n"
else:
raise Failure("cp copy source does not exist: " + source.pyarg)
return out
# End of "shell" command handling functions.
# Internal helpers:
class WorkingCopy:
"Defines the list of info we need around a working copy."
def __init__(self, py, realpath, suffix):
self.py = py
self.realpath = realpath
self.suffix = suffix
class Target:
"Defines the list of info we need around a command line supplied target."
def __init__(self, pyarg, runarg, argnr, is_url=False, wc=None):
self.pyarg = pyarg
self.runarg = runarg
self.argnr = argnr
self.is_url = is_url
self.wc = wc
def add_line(self, args, translation=None):
"Definition of how to add a new in/out line pair to LINES."
self.lines += [ [args, translation] ]
def really_safe_rmtree(self, dir):
# Safety catch. We don't want to remove outside the sandbox.
if dir.find('svn-test-work') < 0:
raise Failure("Tried to remove path outside working area: " + dir)
main.safe_rmtree(dir)
def get_current_disk(self, wc):
"Probes the given working copy and writes an expected_disk for it."
actual_disk = svntest.wc.State.from_wc(wc.realpath, False, True)
actual_disk.wc_dir = wc.realpath
make_py, prev_disk = self.get_prev_disk(wc)
# The tests currently compare SVNTreeNode trees, so let's do that too.
actual_disk_tree = actual_disk.old_tree()
prev_disk_tree = prev_disk.old_tree()
# find out the tweaks
tweaks = self.diff_trees(prev_disk_tree, actual_disk_tree, wc)
if tweaks == 'Purge':
make_py = ''
else:
tweaks = self.optimize_tweaks(tweaks, actual_disk_tree, wc)
self.remember_disk(wc, actual_disk)
pydisk = make_py + self.tweaks2py(tweaks, "expected_disk", wc)
if len(pydisk) > 0:
pydisk += '\n'
return pydisk
def get_prev_disk(self, wc):
"Retrieves the last used expected_disk tree if any."
make_py = ""
# If a disk was supplied via __init__(), self.prev_disk[0] is set
# to None, in which case we always use it, not checking WC.
if self.prev_disk is None or \
not self.prev_disk[0] in [None, wc.realpath]:
disk = svntest.main.greek_state.copy()
disk.wc_dir = wc.realpath
self.remember_disk(wc, disk)
make_py = "expected_disk = svntest.main.greek_state.copy()\n"
else:
disk = self.prev_disk[1]
return make_py, disk
def remember_disk(self, wc, actual):
"Remembers the current disk tree for future reference."
self.prev_disk = [wc.realpath, actual]
def get_current_status(self, wc, quiet=True):
"Probes the given working copy and writes an expected_status for it."
if quiet:
code, output, err = main.run_svn(None, 'status', '-v', '-u', '-q',
wc.realpath)
else:
code, output, err = main.run_svn(None, 'status', '-v', '-u',
wc.realpath)
if code != 0 or len(err) > 0:
raise Failure("Hmm. `svn status' failed. What now.")
make_py, prev_status = self.get_prev_status(wc)
actual_status = svntest.wc.State.from_status(output)
# The tests currently compare SVNTreeNode trees, so let's do that too.
prev_status_tree = prev_status.old_tree()
actual_status_tree = actual_status.old_tree()
# Get the tweaks
tweaks = self.diff_trees(prev_status_tree, actual_status_tree, wc)
if tweaks == 'Purge':
# The tree is empty (happens with invalid WC dirs)
make_py = "expected_status = wc.State(" + wc.py + ", {})\n"
tweaks = []
else:
tweaks = self.optimize_tweaks(tweaks, actual_status_tree, wc)
self.remember_status(wc, actual_status)
pystatus = make_py + self.tweaks2py(tweaks, "expected_status", wc)
if len(pystatus) > 0:
pystatus += '\n'
return pystatus
def get_prev_status(self, wc):
"Retrieves the last used expected_status tree if any."
make_py = ""
prev_status = None
# re-use any previous status if we are still in the same WC dir.
# If a status was supplied via __init__(), self.prev_status[0] is set
# to None, in which case we always use it, not checking WC.
if self.prev_status is None or \
not self.prev_status[0] in [None, wc.realpath]:
# There is no or no matching previous status. Make new one.
try:
# If it's really a WC, use its base revision
base_rev = actions.get_wc_base_rev(wc.realpath)
except:
# Else, just use zero. Whatever.
base_rev = 0
prev_status = actions.get_virginal_state(wc.realpath, base_rev)
make_py += ("expected_status = actions.get_virginal_state(" +
wc.py + ", " + str(base_rev) + ")\n")
else:
# We will re-use the previous expected_status.
prev_status = self.prev_status[1]
# no need to make_py anything
return make_py, prev_status
def remember_status(self, wc, actual_status):
"Remembers the current status tree for future reference."
self.prev_status = [wc.realpath, actual_status]
def chdir(self, do_chdir, wc):
"Pushes the current dir onto the dir stack, does an os.chdir()."
if not do_chdir:
return ""
self.prevdirs.append(os.getcwd())
os.chdir(wc.realpath)
py = ("orig_dir = os.getcwd() # Need to chdir because of '^/' args\n" +
"os.chdir(" + wc.py + ")\n")
return py
def chdir_back(self, do_chdir):
"Does os.chdir() back to the directory popped from the dir stack's top."
if not do_chdir:
return ""
# If this fails, there's a missing chdir() call:
os.chdir(self.prevdirs.pop())
return "os.chdir(orig_dir)\n"
def get_sorted_vars_by_pathlen(self):
"""Compose a listing of variable names to be expanded in script output.
This is intended to be stored in self.sorted_vars_by_pathlen."""
lst = []
for dict in [self.vars, self.other_wc_dirs]:
for name in dict:
runpath = dict[name][1]
if not runpath:
continue
strlen = len(runpath)
item = [strlen, name, runpath]
bisect.insort(lst, item)
return lst
def get_sorted_var_names(self):
"""Compose a listing of variable names to be declared.
This is used by TestFactory.make()."""
paths = []
urls = []
for name in self.vars:
if name.startswith('url_'):
bisect.insort(urls, [name.lower(), name])
else:
bisect.insort(paths, [name.lower(), name])
list = []
for path in paths:
list += [path[1]]
for url in urls:
list += [url[1]]
return list
def get_sorted_other_wc_dir_names(self):
"""Compose a listing of working copies to be declared with sbox.
This is used by TestFactory.make()."""
list = []
for name in self.other_wc_dirs:
bisect.insort(list, [name.lower(), name])
names = []
for item in list:
names += [item[1]]
return names
def str2svntest(self, str):
"Like str2py(), but replaces any known paths with variable names."
if str is None:
return "None"
str = str2py(str)
quote = str[0]
def replace(str, path, name, quote):
return str.replace(path, quote + " + " + name + " + " + quote)
# We want longer paths first.
for var in reversed(self.sorted_vars_by_pathlen):
name = var[1]
path = var[2]
str = replace(str, path, name, quote)
str = replace(str, self.sbox.wc_dir, 'wc_dir', quote)
str = replace(str, self.sbox.repo_url, 'url', quote)
# now remove trailing null-str adds:
# '' + url_A_C + ''
str = str.replace("'' + ",'').replace(" + ''",'')
# "" + url_A_C + ""
str = str.replace('"" + ',"").replace(' + ""',"")
# just a stupid check. tiny tweak. (don't declare wc_dir and url
# if they never appear)
if not self.used_wc_dir:
self.used_wc_dir = (re.search('\bwc_dir\b', str) is not None)
if not self.used_url:
self.used_url = str.find('url') >= 0
return str
def strlist2py(self, list):
"Given a list of strings, composes a py script that produces the same."
if list is None:
return "None"
if len(list) < 1:
return "[]"
if len(list) == 1:
return "[" + self.str2svntest(list[0]) + "]"
py = "[\n"
for line in list:
py += " " + self.str2svntest(line) + ",\n"
py += "]"
return py
def get_node_path(self, node, wc):
"Tries to return the node path relative to the given working copy."
path = node.get_printable_path()
if path.startswith(wc.realpath + os.sep):
path = path[len(wc.realpath + os.sep):]
elif path.startswith(wc.realpath):
path = path[len(wc.realpath):]
return path
def node2py(self, node, wc, prepend="", drop_empties=True):
"Creates a line like 'A/C' : Item({ ... }) for wc.State composition."
buf = StringIO()
node.print_script(buf, wc.realpath, prepend, drop_empties)
return buf.getvalue()
def tree2py(self, node, wc):
"Writes the wc.State definition for the given SVNTreeNode in given WC."
# svntest.wc.State(wc_dir, {
# 'A/mu' : Item(verb='Sending'),
# 'A/D/G/rho' : Item(verb='Sending'),
# })
buf = StringIO()
tree.dump_tree_script(node, stream=buf, subtree=wc.realpath,
wc_varname=wc.py)
return buf.getvalue()
def diff_trees(self, left, right, wc):
"""Compares the two trees given by the SVNTreeNode instances LEFT and
RIGHT in the given working copy and composes an internal list of
tweaks necessary to make LEFT into RIGHT."""
if not right.children:
return 'Purge'
return self._diff_trees(left, right, wc)
def _diff_trees(self, left, right, wc):
"Used by self.diff_trees(). No need to call this. See there."
# all tweaks collected
tweaks = []
# the current tweak in composition
path = self.get_node_path(left, wc)
tweak = []
# node attributes
if ((left.contents is None) != (right.contents is None)) or \
(left.contents != right.contents):
tweak += [ ["contents", right.contents] ]
for key in left.props:
if key not in right.props:
tweak += [ [key, None] ]
elif left.props[key] != right.props[key]:
tweak += [ [key, right.props[key]] ]
for key in right.props:
if key not in left.props:
tweak += [ [key, right.props[key]] ]
for key in left.atts:
if key not in right.atts:
tweak += [ [key, None] ]
elif left.atts[key] != right.atts[key]:
tweak += [ [key, right.atts[key]] ]
for key in right.atts:
if key not in left.atts:
tweak += [ [key, right.atts[key]] ]
if len(tweak) > 0:
changetweak = [ 'Change', [path], tweak]
tweaks += [changetweak]
if left.children is not None:
for leftchild in left.children:
rightchild = None
if right.children is not None:
rightchild = tree.get_child(right, leftchild.name)
if rightchild is None:
paths = leftchild.recurse(lambda n: self.get_node_path(n, wc))
removetweak = [ 'Remove', paths ]
tweaks += [removetweak]
if right.children is not None:
for rightchild in right.children:
leftchild = None
if left.children is not None:
leftchild = tree.get_child(left, rightchild.name)
if leftchild is None:
paths_and_nodes = rightchild.recurse(
lambda n: [ self.get_node_path(n, wc), n ] )
addtweak = [ 'Add', paths_and_nodes ]
tweaks += [addtweak]
else:
tweaks += self._diff_trees(leftchild, rightchild, wc)
return tweaks
def optimize_tweaks(self, tweaks, actual_tree, wc):
"Given an internal list of tweaks, make them optimal by common sense."
if tweaks == 'Purge':
return tweaks
subtree = actual_tree.find_node(wc.realpath)
if not subtree:
subtree = actual_tree
remove_paths = []
additions = []
changes = []
for tweak in tweaks:
if tweak[0] == 'Remove':
remove_paths += tweak[1]
elif tweak[0] == 'Add':
additions += tweak[1]
else:
changes += [tweak]
# combine removals
removal = []
if len(remove_paths) > 0:
removal = [ [ 'Remove', remove_paths] ]
# combine additions
addition = []
if len(additions) > 0:
addition = [ [ 'Add', additions ] ]
# find those changes that should be done on all nodes at once.
def remove_mod(mod):
for change in changes:
if mod in change[2]:
change[2].remove(mod)
seen = []
tweak_all = []
for change in changes:
tweak = change[2]
for mod in tweak:
if mod in seen:
continue
seen += [mod]
# here we see each single "name=value" tweak in mod.
# Check if the actual tree had this anyway all the way through.
name = mod[0]
val = mod[1]
if name == 'contents' and val is None:
continue;
def check_node(node):
if (
(name == 'contents' and node.contents == val)
or
(node.props and (name in node.props) and node.props[name] == val)
or
(node.atts and (name in node.atts) and node.atts[name] == val)):
# has this same thing set. count on the left.
return [node, None]
return [None, node]
results = subtree.recurse(check_node)
have = []
havent = []
for result in results:
if result[0]:
have += [result[0]]
else:
havent += [result[1]]
if havent == []:
# ok, then, remove all tweaks that are like this, then
# add a generic tweak.
remove_mod(mod)
tweak_all += [mod]
elif len(havent) < len(have) * 3: # this is "an empirical factor"
remove_mod(mod)
tweak_all += [mod]
# record the *other* nodes' actual item, overwritten above
for node in havent:
name = mod[0]
if name == 'contents':
value = node.contents
elif name in node.props:
value = node.props[name]
elif name in node.atts:
value = node.atts[name]
else:
continue
changes += [ ['Change',
[self.get_node_path(node, wc)],
[[name, value]]
]
]
# combine those paths that have exactly the same changes
i = 0
j = 0
while i < len(changes):
# find other changes that are identical
j = i + 1
while j < len(changes):
if changes[i][2] == changes[j][2]:
changes[i][1] += changes[j][1]
del changes[j]
else:
j += 1
i += 1
# combine those changes that have exactly the same paths
i = 0
j = 0
while i < len(changes):
# find other paths that are identical
j = i + 1
while j < len(changes):
if changes[i][1] == changes[j][1]:
changes[i][2] += changes[j][2]
del changes[j]
else:
j += 1
i += 1
if tweak_all != []:
changes = [ ['Change', [], tweak_all ] ] + changes
return removal + addition + changes
def tweaks2py(self, tweaks, var_name, wc):
"Given an internal list of tweaks, write the tweak script for it."
py = ""
if tweaks is None:
return ""
if tweaks == 'Purge':
return var_name + " = wc.State(" + wc.py + ", {})\n"
for tweak in tweaks:
if tweak[0] == 'Remove':
py += var_name + ".remove("
paths = tweak[1]
py += self.str2svntest(paths[0])
for path in paths[1:]:
py += ", " + self.str2svntest(path)
py += ")\n"
elif tweak[0] == 'Add':
# add({'A/D/H/zeta' : Item(status=' ', wc_rev=9), ...})
py += var_name + ".add({"
adds = tweak[1]
for add in adds:
path = add[0]
node = add[1]
py += self.node2py(node, wc, "\n ", False)
py += "\n})\n"
else:
paths = tweak[1]
mods = tweak[2]
if mods != []:
py += var_name + ".tweak("
for path in paths:
py += self.str2svntest(path) + ", "
def mod2py(mod):
return mod[0] + "=" + self.str2svntest(mod[1])
py += mod2py(mods[0])
for mod in mods[1:]:
py += ", " + mod2py(mod)
py += ")\n"
return py
def path2svntest(self, path, argnr=None, do_remove_on_new_wc_path=True):
"""Given an input argument, do one hell of a path expansion on it.
ARGNR is simply inserted into the resulting Target.
Returns a self.Target instance.
"""
wc = self.WorkingCopy('wc_dir', self.sbox.wc_dir, None)
url = self.sbox.repo_url # do we need multiple URLs too??
pathsep = '/'
if path.find('/') < 0 and path.find('\\') >= 0:
pathsep = '\\'
is_url = False
# If you add to these, make sure you add longer ones first, to
# avoid e.g. '$WC_DIR' matching '$WC' first.
wc_dir_wildcards = ['wc_dir', 'wcdir', '$WC_DIR', '$WC']
url_wildcards = ['url', '$URL']
first = path.split(pathsep, 1)[0]
if first in wc_dir_wildcards:
path = path[len(first):]
elif first in url_wildcards:
path = path[len(first):]
is_url = True
else:
for url_scheme in ['^/', 'file:/', 'http:/', 'svn:/', 'svn+ssh:/']:
if path.startswith(url_scheme):
is_url = True
# keep it as it is
pyarg = self.str2svntest(path)
runarg = path
return self.Target(pyarg, runarg, argnr, is_url, None)
for wc_dir_wildcard in wc_dir_wildcards:
if first.startswith(wc_dir_wildcard):
# The first path element starts with "wc_dir" (or similar),
# but it has more attached to it. Like "wc_dir.2" or "wc_dir_other"
# Record a new wc dir name.
# try to figure out a nice suffix to pass to sbox.
# (it will create a new dir called sbox.wc_dir + '.' + suffix)
suffix = ''
if first[len(wc_dir_wildcard)] in ['.','-','_']:
# it's a separator already, don't duplicate the dot. (warm&fuzzy)
suffix = first[len(wc_dir_wildcard) + 1:]
if len(suffix) < 1:
suffix = first[len(wc_dir_wildcard):]
if len(suffix) < 1:
raise Failure("no suffix supplied to other-wc_dir arg")
# Streamline the var name
suffix = suffix.replace('.','_').replace('-','_')
other_wc_dir_varname = 'wc_dir_' + suffix
path = path[len(first):]
real_path = self.get_other_wc_real_path(other_wc_dir_varname,
suffix,
do_remove_on_new_wc_path)
wc = self.WorkingCopy(other_wc_dir_varname,
real_path, suffix)
# found a match, no need to loop further, but still process
# the path further.
break
if len(path) < 1 or path == pathsep:
if is_url:
self.used_url = True
pyarg = 'url'
runarg = url
wc = None
else:
if wc.suffix is None:
self.used_wc_dir = True
pyarg = wc.py
runarg = wc.realpath
else:
pathelements = split_remove_empty(path, pathsep)
# make a new variable, if necessary
if is_url:
pyarg, runarg = self.ensure_url_var(pathelements)
wc = None
else:
pyarg, runarg = self.ensure_path_var(wc, pathelements)
return self.Target(pyarg, runarg, argnr, is_url, wc)
def get_other_wc_real_path(self, varname, suffix, do_remove):
"Create or retrieve the path of an alternate working copy."
if varname in self.other_wc_dirs:
return self.other_wc_dirs[varname][1]
# see if there is a wc already in the sbox
path = self.sbox.wc_dir + '.' + suffix
if path in self.sbox.test_paths:
py = "sbox.wc_dir + '." + suffix + "'"
else:
# else, we must still create one.
path = self.sbox.add_wc_path(suffix, do_remove)
py = "sbox.add_wc_path(" + str2py(suffix)
if not do_remove:
py += ", remove=False"
py += ')'
value = [py, path]
self.other_wc_dirs[varname] = [py, path]
self.sorted_vars_by_pathlen = self.get_sorted_vars_by_pathlen()
return path
def define_var(self, name, value):
"Add a variable definition, don't allow redefinitions."
# see if we already have this var
if name in self.vars:
if self.vars[name] != value:
raise Failure("Variable name collision. Hm, fix factory.py?")
# ok, it's recorded correctly. Nothing needs to happen.
return
# a new variable needs to be recorded
self.vars[name] = value
# update the sorted list of vars for substitution by str2svntest()
self.sorted_vars_by_pathlen = self.get_sorted_vars_by_pathlen()
def ensure_path_var(self, wc, pathelements):
"Given a path in a working copy, make sure we have a variable for it."
# special case: if a path is '.', simply use wc_dir.
if pathelements == ['.']:
return wc.py, wc.realpath
name = "_".join(pathelements)
if wc.suffix is not None:
# This is an "other" working copy (not the default).
# The suffix of the wc_dir variable serves as the prefix:
# wc_dir_other ==> other_A_D = os.path.join(wc_dir_other, 'A', 'D')
name = wc.suffix + "_" + name
if name[0].isdigit():
name = "_" + name
else:
self.used_wc_dir = True
py = 'os.path.join(' + wc.py
if len(pathelements) > 0:
py += ", '" + "', '".join(pathelements) + "'"
py += ')'
wc_dir_real_path = wc.realpath
run = os.path.join(wc_dir_real_path, *pathelements)
value = [py, run]
self.define_var(name, value)
return name, run
def ensure_url_var(self, pathelements):
"Given a path in the test repository, ensure we have a url var for it."
name = "url_" + "_".join(pathelements)
joined = "/" + "/".join(pathelements)
py = 'url'
if len(pathelements) > 0:
py += " + " + str2py(joined)
self.used_url = True
run = self.sbox.repo_url + joined
value = [py, run]
self.define_var(name, value)
return name, run
def get_first_wc(self, target_list):
"""In a list of Target instances, find the first one that is in a
working copy and return that WorkingCopy. Default to sbox.wc_dir.
This is useful if we need a working copy for a '^/' URL."""
for target in target_list:
if target.wc:
return target.wc
return self.WorkingCopy('wc_dir', self.sbox.wc_dir, None)
def args2svntest(self, args, append_wc_dir_if_missing = False,
keep_args_of = [], keep_first_count = 1,
drop_with_arg = []):
"""Tries to be extremely intelligent at parsing command line arguments.
It needs to know which args are file targets that should be in a
working copy. File targets are magically expanded.
args: list of string tokens as passed to factory.make(), e.g.
['svn', 'commit', '--force', 'wc_dir2']
append_wc_dir_if_missing: It's a switch.
keep_args_of: See TestFactory.keep_args_of (comment in __init__)
keep_first_count: Don't expand the first N non-option args. This is used
to preserve e.g. the token 'update' in '[svn] update wc_dir'
(the 'svn' is usually split off before this function is called).
drop_with_arg: list of string tokens that are commandline options with
following argument which we want to drop from the list of args
(e.g. -m message).
"""
wc_dir = self.sbox.wc_dir
url = self.sbox.repo_url
target_supplied = False
pyargs = []
runargs = []
do_chdir = False
targets = []
wc_dirs = []
i = 0
while i < len(args):
arg = args[i]
if arg in drop_with_arg:
# skip this and the next arg
if not arg.startswith('--') and len(arg) > 2:
# it is a concatenated arg like -r123 instead of -r 123
# skip only this one. Do nothing.
i = i
else:
# skip this and the next arg
i += 1
elif arg.startswith('-'):
# keep this option arg verbatim.
pyargs += [ self.str2svntest(arg) ]
runargs += [ arg ]
# does this option expect a non-filename argument?
# take that verbatim as well.
if arg in keep_args_of:
i += 1
if i < len(args):
arg = args[i]
pyargs += [ self.str2svntest(arg) ]
runargs += [ arg ]
elif keep_first_count > 0:
# args still to be taken verbatim.
pyargs += [ self.str2svntest(arg) ]
runargs += [ arg ]
keep_first_count -= 1
elif arg.startswith('^/'):
# this is a ^/url, keep it verbatim.
# if we use "^/", we need to chdir(wc_dir).
do_chdir = True
pyarg = str2py(arg)
targets += [ self.Target(pyarg, arg, len(pyargs), True, None) ]
pyargs += [ pyarg ]
runargs += [ arg ]
else:
# well, then this must be a filename or url, autoexpand it.
target = self.path2svntest(arg, argnr=len(pyargs))
pyargs += [ target.pyarg ]
runargs += [ target.runarg ]
target_supplied = True
targets += [ target ]
i += 1
if not target_supplied and append_wc_dir_if_missing:
# add a simple wc_dir target
self.used_wc_dir = True
wc = self.WorkingCopy('wc_dir', wc_dir, None)
targets += [ self.Target('wc_dir', wc_dir, len(pyargs), False, wc) ]
pyargs += [ 'wc_dir' ]
runargs += [ wc_dir ]
return pyargs, runargs, do_chdir, targets
###### END of the TestFactory class ######
# Quotes-preserving text wrapping for output
def find_quote_end(text, i):
"In string TEXT, find the end of the qoute that starts at TEXT[i]"
# don't handle """ quotes
quote = text[i]
i += 1
while i < len(text):
if text[i] == '\\':
i += 1
elif text[i] == quote:
return i
i += 1
return len(text) - 1
class MyWrapper(textwrap.TextWrapper):
"A textwrap.TextWrapper that doesn't break a line within quotes."
### TODO regexes would be nice, maybe?
def _split(self, text):
parts = []
i = 0
start = 0
# This loop will break before and after each space, but keep
# quoted strings in one piece. Example, breaks marked '/':
# /(one,/ /two(blagger),/ /'three three three',)/
while i < len(text):
if text[i] in ['"', "'"]:
# handle """ quotes. (why, actually?)
if text[i:i+3] == '"""':
end = text[i+3:].find('"""')
if end >= 0:
i += end + 2
else:
i = len(text) - 1
else:
# handle normal quotes
i = find_quote_end(text, i)
elif text[i].isspace():
# split off previous section, if any
if start < i:
parts += [text[start:i]]
start = i
# split off this space
parts += [text[i]]
start = i + 1
i += 1
if start < len(text):
parts += [text[start:]]
return parts
def wrap_each_line(str, ii, si, blw):
"""Wrap lines to a defined width (<80 chars). Feed the lines single to
MyWrapper, so that it preserves the current line endings already in there.
We only want to insert new wraps, not remove existing newlines."""
wrapper = MyWrapper(77, initial_indent=ii,
subsequent_indent=si)
lines = str.splitlines()
for i in range(0,len(lines)):
if lines[i] != '':
lines[i] = wrapper.fill(lines[i])
return '\n'.join(lines)
# Other miscellaneous helpers
def sh2str(string):
"un-escapes away /x sequences"
if string is None:
return None
return string.decode("string-escape")
def get_quote_style(str):
"""find which quote is the outer one, ' or "."""
quote_char = None
at = None
found = str.find("'")
found2 = str.find('"')
# If found == found2, both must be -1, so nothing was found.
if found != found2:
# If a quote was found
if found >= 0 and found2 >= 0:
# If both were found, invalidate the later one
if found < found2:
found2 = -1
else:
found = -1
# See which one remains.
if found >= 0:
at = found + 1
quote_char = "'"
elif found2 >= 0:
at = found2 + 1
quote_char = '"'
return quote_char, at
def split_remove_empty(str, sep):
"do a split, then remove empty elements."
list = str.split(sep)
return filter(lambda item: item and len(item) > 0, list)
def str2py(str):
"returns the string enclosed in quotes, suitable for py scripts."
if str is None:
return "None"
# try to make a nice choice of quoting character
if str.find("'") >= 0:
return '"' + str.encode("string-escape"
).replace("\\'", "'"
).replace('"', '\\"') + '"'
else:
return "'" + str.encode("string-escape") + "'"
return str
### End of file.
| {
"content_hash": "5a4871c4954685a63d8b86c20b7b37e8",
"timestamp": "",
"source": "github",
"line_count": 1654,
"max_line_length": 79,
"avg_line_length": 31.081620314389358,
"alnum_prop": 0.5660098426345581,
"repo_name": "jmckaskill/subversion",
"id": "cb387e83e241cc3b8a03a0f15b4522bbb69c9e94",
"size": "61376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "subversion/tests/cmdline/svntest/factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "16930676"
},
{
"name": "C#",
"bytes": "6994"
},
{
"name": "C++",
"bytes": "509265"
},
{
"name": "Emacs Lisp",
"bytes": "467395"
},
{
"name": "Java",
"bytes": "1463304"
},
{
"name": "Objective-C",
"bytes": "255507"
},
{
"name": "Perl",
"bytes": "227763"
},
{
"name": "Python",
"bytes": "5030884"
},
{
"name": "Ruby",
"bytes": "436873"
},
{
"name": "Shell",
"bytes": "260031"
},
{
"name": "VimL",
"bytes": "4070"
}
],
"symlink_target": ""
} |
from datetime import datetime
import types
import mongoengine as me
import cow.server as server
import cow.plugins.mongoengine_plugin as mongoengine_plugin
from preggy import expect
import derpconf.config as config
import tornado.web
import bzz
import bzz.signals as signals
import bzz.utils as utils
import tests.base as base
RESPONSE_400 = '<html><title>400:badrequest</title><body>400:badrequest</body></html>'
class MongoEngineEndToEndTestCase(base.ApiTestCase):
def __get_test_data(self):
to_json = lambda body: load_json(body)
return [
('GET', '/user', {}, 200, to_json, []),
('POST', '/user', dict(body="name=test%20user&age=32"), 200, None, None),
('GET', '/user', {}, 200, to_json, self.__assert_user_data(name="test user", age=32)),
('GET', '/user/test%20user', {}, 200, to_json, self.__assert_user_data(name="test user", age=32)),
('PUT', '/user/test%20user', dict(body="age=31"), 200, None, None),
('GET', '/user/test%20user', {}, 200, to_json, self.__assert_user_data(name="test user", age=31)),
('POST', '/user', dict(body="name=test-user2&age=32"), 200, None, None),
('DELETE', '/user/test-user2', {}, 200, None, None),
('GET', '/user', {}, 200, to_json, self.__assert_len(1)),
('GET', '/team', {}, 200, to_json, []),
('POST', '/team', dict(body="code=team-1&owner=test%20user"), 200, None, None),
('GET', '/team/team-1', {}, 200, to_json, self.__assert_team_data(name="team-1", owner="test user")),
('POST', '/user', dict(body="name=test-user3&age=32"), 200, None, None),
('PUT', '/team/team-1', dict(body="owner=test-user3"), 200, None, None),
('PUT', '/team/team-1', dict(body="members[]=test-user3"), 400, None, RESPONSE_400),
('GET', '/team/team-1', {}, 200, to_json, self.__assert_team_data(name='team-1', member_count=0)),
('POST', '/team', dict(body="code=team-2&owner=test%20user&members[]=test%20user"), 200, None, None),
('GET', '/team/team-2', {}, 200, to_json, self.__assert_team_data(name='team-2', member_count=1)),
('POST', '/team', dict(body="code=team-3&owner=test%20user&members[]=test%20user&members[]=test-user3"),
200, None, None),
('GET', '/team/team-3', {}, 200, to_json, self.__assert_team_data(name='team-3', member_count=2)),
('DELETE', '/team/team-2', {}, 200, None, None),
('DELETE', '/team/team-3', {}, 200, None, None),
('GET', '/team', {}, 200, to_json, self.__assert_len(1)),
('POST', '/team/team-1/members', dict(body="members[]=test%20user"), 200, None, None),
('GET', '/team/team-1/members', {}, 200, to_json, self.__assert_len(1)),
('POST', '/user', dict(body="name=test-user4&age=32"), 200, None, None),
('POST', '/team/team-1/members', dict(body="members[]=test-user4"), 200, None, None),
('DELETE', '/team/team-1/members/test-user4', {}, 200, None, None),
('PUT', '/team/team-1/members/test-user4', dict(body=""), 400, None, RESPONSE_400),
('GET', '/team/team-1/members', {}, 200, to_json, self.__assert_len(1)),
('GET', '/user/test-user4', {}, 200, to_json, self.__assert_user_data(name="test-user4", age=32)),
('POST', '/team/team-1/projects', dict(body="name=project-1&module.name=module-name"), 200, None, None),
('GET', '/team/team-1/projects', {}, 200, to_json, self.__assert_len(1)),
('GET', '/team/team-1/projects/project-1', {}, 200, to_json,
self.__assert_project_data(name="project-1", module="module-name")),
('GET', '/team/team-1/projects/project-1/module', {}, 200, to_json, self.__assert_module_data(name="module-name")),
]
def test_end_to_end_flow(self):
data = self.__get_test_data()
print("")
print("")
print("")
print("Doing end-to-end test:")
print("")
for url_arguments in data:
self.validate_request(url_arguments)
print("")
def setUp(self):
super(MongoEngineEndToEndTestCase, self).setUp()
signals.post_create_instance.receivers = {}
signals.post_update_instance.receivers = {}
signals.post_delete_instance.receivers = {}
EndToEndUser.objects.delete()
EndToEndTeam.objects.delete()
def get_config(self):
return dict(
MONGO_DATABASES={
'default': {
'host': 'localhost',
'port': 3334,
'database': 'bzz_test'
}
},
)
def get_server(self):
cfg = config.Config(**self.get_config())
self.server = TestServer(config=cfg)
return self.server
def __assert_project_data(self, name=None, module=None):
def handle(obj):
if name is not None:
expect(obj['name']).to_be_like(name)
if module is not None:
expect(obj['module']).not_to_be_null()
expect(obj['module']['name']).to_be_like(module)
return handle
def __assert_module_data(self, name=None):
def handle(obj):
if name is not None:
expect(obj['name']).to_be_like(name)
return handle
def __assert_user_data(self, created_at=None, age=None, id_=None, name=None):
def handle(obj):
if isinstance(obj, (list, tuple)):
obj = obj[0]
if created_at is not None:
expect(obj['created_at']).to_be_like(created_at)
if age is not None:
expect(obj['age']).to_equal(age)
if id_ is not None:
expect(obj['id']).to_be_like(id_)
if name is not None:
expect(obj['name']).to_equal(name)
return handle
def __assert_team_data(self, owner=None, name=None, member_count=None):
def handle(obj):
if isinstance(obj, (list, tuple)):
obj = obj[0]
if owner is not None:
expect(obj['owner']).not_to_be_null()
expect(obj['owner']['name']).to_equal(owner)
if name is not None:
expect(obj['code']).to_equal(name)
if member_count is not None:
expect(obj['members']).to_length(member_count)
return handle
def __assert_len(self, expected_length):
def handle(obj):
expect(obj).not_to_be_null()
expect(obj).to_be_instance_of(list)
expect(obj).to_length(expected_length)
return handle
def validate_request(self, url_arguments):
method, url, options, expected_status_code, transform_body, expected_body = url_arguments
print("B %s %s..." % (method, url))
self.http_client.fetch(
self.get_url(url),
method=method,
callback=self.stop,
**options
)
response = self.wait()
expect(response.code).to_equal(expected_status_code)
body = response.body
if transform_body is not None:
body = transform_body(response.body)
if expected_body and isinstance(expected_body, types.FunctionType):
expected_body(body)
elif expected_body:
expect(body).to_be_like(expected_body)
print("A %s %s - %s" % (method, url, response.code))
def load_json(json_string):
try:
return utils.loads(json_string)
except ValueError:
return utils.loads(json_string.decode('utf-8'))
class NamedEmbeddedDocument(me.EmbeddedDocument):
meta = {'allow_inheritance': True}
name = me.StringField()
class EndToEndUser(me.Document):
meta = {'collection': 'EndToEndUser'}
name = me.StringField()
age = me.IntField()
created_at = me.DateTimeField(default=datetime.now)
@classmethod
def get_id_field_name(self):
return EndToEndUser.name
class EndToEndTeam(me.Document):
meta = {'collection': 'EndToEndTeam'}
code = me.StringField()
owner = me.ReferenceField(EndToEndUser)
members = me.ListField(me.ReferenceField(EndToEndUser))
projects = me.ListField(me.EmbeddedDocumentField("Project"))
@classmethod
def get_id_field_name(self):
return EndToEndTeam.code
def to_dict(self):
return {
'code': self.code,
'owner': {
'name': self.owner.name
},
'members': [member.name for member in self.members],
'projects': self.projects
}
class Project(NamedEmbeddedDocument):
module = me.EmbeddedDocumentField("Module")
@classmethod
def get_id_field_name(self):
return Project.name
class Module(NamedEmbeddedDocument):
@classmethod
def get_id_field_name(self):
return Module.name
class VersionHandler(tornado.web.RequestHandler):
def get(self):
self.write(bzz.__version__)
class TestServer(server.Server):
def get_plugins(self):
return [
mongoengine_plugin.MongoEnginePlugin
]
def get_handlers(self):
routes = [
bzz.ModelHive.routes_for('mongoengine', EndToEndUser, resource_name="user"),
bzz.ModelHive.routes_for('mongoengine', EndToEndTeam, resource_name="team"),
('/version', VersionHandler),
]
return bzz.flatten(routes)
| {
"content_hash": "f7d6c49cc4ec6d2ab0ebd6c173913edf",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 127,
"avg_line_length": 36.16603773584906,
"alnum_prop": 0.5623956594323873,
"repo_name": "bzz-framework/bzz",
"id": "dc523a42ff68954838e846bc10ffdc57292931a3",
"size": "9835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_model_hive_e2e.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182833"
}
],
"symlink_target": ""
} |
import lief
import sys
import time
# Do not parse dyld info
config = lief.MachO.ParserConfig()
config.parse_dyld_bindings = False
config.parse_dyld_exports = False
config.parse_dyld_rebases = False
t1 = time.time()
lief.MachO.parse(sys.argv[1], config)
t2 = time.time()
print(f"Done in {t2 - t1}s")
# Parse the dyld information
config.full_dyldinfo(True)
t1 = time.time()
lief.MachO.parse(sys.argv[1], config)
t2 = time.time()
print(f"Done in {t2 - t1}s")
| {
"content_hash": "cb3c356624e956dda02a11701582d941",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 37,
"avg_line_length": 20.130434782608695,
"alnum_prop": 0.714902807775378,
"repo_name": "lief-project/LIEF",
"id": "7d6f483bf7be72079e3b07766c6bf1c793f13af6",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/python/macho_parser_tweak.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "115380"
},
{
"name": "C++",
"bytes": "5516502"
},
{
"name": "CMake",
"bytes": "185657"
},
{
"name": "Dockerfile",
"bytes": "994"
},
{
"name": "Objective-C",
"bytes": "736"
},
{
"name": "Python",
"bytes": "305524"
},
{
"name": "Shell",
"bytes": "21907"
},
{
"name": "SourcePawn",
"bytes": "130615"
}
],
"symlink_target": ""
} |
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from tinymce import models as tinymce_models
from econordeste.current_user import get_current_user
class Calendar(models.Model):
date_pub = models.DateTimeField(_(u'Data da Publicação'),
auto_now_add=True)
date_start = models.DateTimeField(_(u'Data de Início'))
date_end = models.DateTimeField(_(u'Data de Final'))
created_by = models.ForeignKey(User, verbose_name=u'Criado por',
editable=False, default=get_current_user)
title = models.CharField(_(u'Título do Evento'), max_length=150)
slug = models.SlugField(_(u'Link do Evento'), max_length=150,
unique=True)
body = tinymce_models.HTMLField(_(u'Descrição do Evento'))
def save(self, *args, **kwargs):
super(Calendar, self).save(*args, **kwargs)
def get_absolute_url(self):
# return reverse('event:calendar_date_detail',
# kwargs={"slug": self.slug})
return reverse('event:calendar_date_detail',
kwargs={'year': self.date_start.year,
'month': self.date_start.strftime('%m'),
'day': self.date_start.strftime('%d'),
'slug': self.slug})
def __unicode__(self):
return unicode(self.title)
class Meta:
verbose_name = _(u'Calendário de Evento')
verbose_name_plural = _(u'Calendário de Eventos')
ordering = ['-date_start']
| {
"content_hash": "80ee654ec1ff747a23887feffb9b4c44",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 76,
"avg_line_length": 41.146341463414636,
"alnum_prop": 0.5939537640782454,
"repo_name": "klebercode/econordeste",
"id": "e10300e05796661868acb5352f6a2ff2868083ab",
"size": "1711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "econordeste/event/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "148731"
},
{
"name": "HTML",
"bytes": "47249"
},
{
"name": "JavaScript",
"bytes": "86271"
},
{
"name": "Python",
"bytes": "45500"
}
],
"symlink_target": ""
} |
from lamp_ivideon.settings import LOGGER
class Lamp:
def __init__(self):
self._on = False
self._color = (None, None, None)
self._actions = []
@property
def on(self):
return self._on
@on.setter
def on(self, value):
self._on = value
@property
def actions(self):
return self._actions
@actions.setter
def actions(self, value):
if value not in self._actions:
self._actions.append(value)
@property
def color(self):
return self._color
@color.setter
def color(self, value):
self._color = value
def execute_command(self, tag, value):
execute = False
for it in self._actions:
if it.is_execute(tag):
it.execute(value)
execute = True
if not execute:
LOGGER.debug('unknowm command')
def draw(self):
return 'Status lamp: {}; color: {}-{}-{}'.format(self._on, *self._color)
| {
"content_hash": "e6dca1b11709f173bc32062db6f5a6b0",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 21.782608695652176,
"alnum_prop": 0.5419161676646707,
"repo_name": "mvshalamov/ivideon_test",
"id": "36a5f87e13179a439b274202ea68b6eba99575fd",
"size": "1002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lamp_ivideon/lamp.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "7218"
}
],
"symlink_target": ""
} |
from random import randrange, choice
description = '''
Monopoly odds
Problem 84
In the game, Monopoly, the standard board is set up in the following way:
GO A1 2=CC1 A2 T1 R1 B1 7=CH1 B2 B3 JAIL
H2 C1
T2 U1
H1 C2
36=CH3 C3
R4 R2
G3 D1
33=CC3 CC2=17
G2 D2
G1 D3
G2J F3 U2 F2 F1 R3 E3 E2 22=CH2 E1 FP
A player starts on the GO square and adds the scores on two 6-sided dice to determine the number of squares they advance in a clockwise direction. Without any further rules we would expect to visit each square with equal probability: 2.5%. However, landing on G2J (Go To Jail), CC (community chest), and CH (chance) changes this distribution.
In addition to G2J, and one card from each of CC and CH, that orders the player to go directly to jail, if a player rolls three consecutive doubles, they do not advance the result of their 3rd roll. Instead they proceed directly to jail.
At the beginning of the game, the CC and CH cards are shuffled. When a player lands on CC or CH they take a card from the top of the respective pile and, after following the instructions, it is returned to the bottom of the pile. There are sixteen cards in each pile, but for the purpose of this problem we are only concerned with cards that order a movement; any instruction not concerned with movement will be ignored and the player will remain on the CC/CH square.
Community Chest (2/16 cards):
Advance to GO
Go to JAIL
Chance (10/16 cards):
Advance to GO
Go to JAIL
Go to C1
Go to E3
Go to H2
Go to R1
Go to next R (railway company)
Go to next R
Go to next U (utility company)
Go back 3 squares.
The heart of this problem concerns the likelihood of visiting a particular square. That is, the probability of finishing at that square after a roll. For this reason it should be clear that, with the exception of G2J for which the probability of finishing on it is zero, the CH squares will have the lowest probabilities, as 5/8 request a movement to another square, and it is the final square that the player finishes at on each roll that we are interested in. We shall make no distinction between "Just Visiting" and being sent to JAIL, and we shall also ignore the rule about requiring a double to "get out of jail", assuming that they pay to get out on their next turn.
By starting at GO and numbering the squares sequentially from 00 to 39 we can concatenate these two-digit numbers to produce strings that correspond with sets of squares.
Statistically it can be shown that the three most popular squares, in order, are JAIL (6.24%) = Square 10, E3 (3.18%) = Square 24, and GO (3.09%) = Square 00. So these three most popular squares can be listed with the six-digit modal string: 102400.
If, instead of using two 6-sided dice, two 4-sided dice are used, find the six-digit modal string.
'''
GO = 0
COMMUNITY_CHEST = [2, 17, 33]
CHANCE = [7, 22, 36]
JAIL = 10
GO_TO_JAIL = 30
UTILITY1 = 12
UTILITY2 = 28
def community_chest(pos):
return choice([JAIL, GO] + [pos]*14)
def next_utility(pos):
return (UTILITY2 if pos < UTILITY2 and pos > UTILITY1 else UTILITY1)
def next_rail(pos):
if pos < 5 or pos > 35: return 5
if pos < 15: return 15
if pos < 25: return 25
return 35
def chance(pos):
return choice([GO, JAIL, 11, 24, 39, 5, next_utility(pos), pos-3, next_rail(pos), next_rail(pos)] + [pos]*6)
def simulation(N):
counts = [0] * 40
pos = 0
for turn in range(0, N):
d1 = randrange(1, 5)
d2 = randrange(1, 5)
if d1 == d2:
doubles += 1
else:
doubles = 0
pos = (pos + d1 + d2) % len(counts)
if pos in COMMUNITY_CHEST:
pos = community_chest(pos)
elif pos in CHANCE:
pos = chance(pos)
elif pos == GO_TO_JAIL or doubles == 3:
pos = JAIL
counts[pos] += 1
return counts
indexedCounts = sorted(enumerate(simulation(500000)), key=lambda pr: pr[1], reverse=True)
top3 = [pr[0] for pr in indexedCounts[:3]]
print(top3)
| {
"content_hash": "f0f4f595ca42e655bb6e640e0e3752b0",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 673,
"avg_line_length": 40.791666666666664,
"alnum_prop": 0.7211440245148111,
"repo_name": "mbuhot/mbuhot-euler-solutions",
"id": "ef4a5e4c18a6a4ba75925d6d243c38f409cd8ea5",
"size": "3941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/problem-084.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Julia",
"bytes": "58575"
},
{
"name": "Pony",
"bytes": "6146"
},
{
"name": "Python",
"bytes": "135708"
},
{
"name": "Swift",
"bytes": "13417"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.