text stringlengths 4 1.02M | meta dict |
|---|---|
__author__ = 'Daniel'
from glue.models import *
from swampdragon.serializers.model_serializer import ModelSerializer
class UserSerializer(ModelSerializer):
class Meta:
model = User
publish_fields = ('id', 'username')
class LocationSerializer(ModelSerializer):
class Meta:
model = 'glue.Location'
class PlayerConfigSerializer(ModelSerializer):
class Meta:
model = 'glue.PlayerConfig'
class PlayerSerializer(ModelSerializer):
class Meta:
model = 'glue.Player'
user = UserSerializer
class GameRoomSerializer(ModelSerializer):
class Meta:
model = 'glue.GameRoom'
users = PlayerSerializer
class MobTypeSerializer(ModelSerializer):
class Meta:
model = 'glue.MobType'
class MobSerializer(ModelSerializer):
class Meta:
model = 'glue.Mob'
class StageSerializer(ModelSerializer):
class Meta:
model = 'glue.Stage'
class CurrentStageSerializer(ModelSerializer):
class Meta:
model = 'glue.CurrentStage'
| {
"content_hash": "a061d06bbd4794794f738126a29db653",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 68,
"avg_line_length": 18.836363636363636,
"alnum_prop": 0.6891891891891891,
"repo_name": "Valchris/tdoa",
"id": "3becd23e66f68e54168ab64893c4477e8c0d7291",
"size": "1036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "245528"
},
{
"name": "HTML",
"bytes": "877389"
},
{
"name": "JavaScript",
"bytes": "345466"
},
{
"name": "Python",
"bytes": "26372"
},
{
"name": "Ruby",
"bytes": "1183"
},
{
"name": "Shell",
"bytes": "350"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='orders',
version='0.0.1',
description='Orders microservice',
long_description=long_description,
url='https://github.com/pap/simplebank',
author='Simplebank Engineering',
author_email='engineering@simplebank.book',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
"Programming Language :: Python",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
],
keywords='microservices orders',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'nameko==2.12.0',
'logstash_formatter==0.5.17',
'circuitbreaker==1.3.0',
'gutter==0.5.0',
'request-id==1.0',
'statsd==3.3.0',
'nameko-sentry==1.0.0',
'pyopenssl==19.1.0',
'Flask==1.1.2',
'jaeger-client == 4.3.0',
'requests==2.23.0',
'opentracing==2.3.0',
'opentracing_instrumentation==3.2.1',
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
| {
"content_hash": "1e1d89cbf564767d1973b078f27b0177",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 71,
"avg_line_length": 29.611940298507463,
"alnum_prop": 0.5912298387096774,
"repo_name": "morganjbruce/microservices-in-action",
"id": "ce261d5e3ab0767efa4d40c7bf740a236d20a866",
"size": "1984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chapter-12/orders/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "5454"
},
{
"name": "Groovy",
"bytes": "3962"
},
{
"name": "JavaScript",
"bytes": "368"
},
{
"name": "Python",
"bytes": "75781"
},
{
"name": "Ruby",
"bytes": "1518"
},
{
"name": "Shell",
"bytes": "1851"
}
],
"symlink_target": ""
} |
import itertools, weakref
from .. colors import make
from .. util import color_list, log
class Pixels:
"""
Wrap pixels attached to a layout for easy access and
better error reporting.
"""
def __init__(self, builder):
self.builder = weakref.ref(builder)
def __getitem__(self, index):
"""
Returns the r, g, b pixel at a location in the layout. May only be
called if self.is_running is true.
"""
index = self._check_index(index)
return self.layout.get(*index)
def __setitem__(self, index, color):
"""
Sets the r, g, b pixel at a location in the layout. May only be called
if self.is_running is true.
"""
index = self._check_index(index)
try:
color = make.color(color)
except:
log.error('Do not understand color %s', color)
raise
index.append(color)
return self.layout.set(*index)
def clear(self):
cl = self.layout.color_list
color_list.Math(cl).clear(cl)
@property
def layout(self):
b = self.builder()
if not (b and b.project):
raise ValueError('Cannot get layout before Builder has started')
return b.project.layout
@property
def shape(self):
return self.layout.shape
def _check_index(self, index):
if isinstance(index, int):
index = [index]
else:
index = list(index)
l1, l2 = len(self.shape), len(index)
if l1 != l2:
msg = 'Expected %d coordinates but got %d: %s'
raise ValueError(msg % (l1, l2, index))
for i, s in enumerate(self.shape):
if index[i] < 0:
index[i] += s
if not (0 <= index[i] < s):
raise IndexError('Index %d was out of range' % i)
return index
| {
"content_hash": "f6f0cad3e0a3601bee2df85890cbb511",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 27.565217391304348,
"alnum_prop": 0.544689800210305,
"repo_name": "ManiacalLabs/BiblioPixel",
"id": "e29ad6d2fbeff648e9dfeec68c6ba9a3f955dcec",
"size": "1902",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bibliopixel/builder/pixels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20651"
},
{
"name": "HTML",
"bytes": "3310"
},
{
"name": "JavaScript",
"bytes": "5140"
},
{
"name": "Python",
"bytes": "674175"
},
{
"name": "Shell",
"bytes": "2973"
}
],
"symlink_target": ""
} |
from pyherc.test.bdd.features.helpers import get_character
from pyherc.test.cutesy import make, wait_
from pyherc.test.cutesy.dictionary import get_history_value
@then('time should pass for {character_name}')
def impl(context, character_name):
character = get_character(context, character_name)
old_time = get_history_value(character, 'tick')
new_time = character.tick
assert new_time > old_time
@when('{character_name} waits')
def step_impl(context, character_name):
character = get_character(context, character_name)
make(character, wait_())
| {
"content_hash": "11c91dd64de915244baedfaaf1bd5d5c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 59,
"avg_line_length": 30.210526315789473,
"alnum_prop": 0.7369337979094077,
"repo_name": "tuturto/pyherc",
"id": "49943a399cdd99516d7a6c81b1d6bb15d132652f",
"size": "1716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pyherc/test/bdd/features/steps/time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "8825"
},
{
"name": "HTML",
"bytes": "529"
},
{
"name": "Hy",
"bytes": "603756"
},
{
"name": "Python",
"bytes": "975380"
}
],
"symlink_target": ""
} |
''' The data in airports.json is a subset of US airports with field
elevations > 1500 meters. The query result was taken from
.. code-block:: none
http://services.nationalmap.gov/arcgis/rest/services/GlobalMap/GlobalMapWFS/MapServer/10/query
on October 15, 2015.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import json
# External imports
# Bokeh imports
from ..util.dependencies import import_required
from ..util.sampledata import external_path
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data():
'''
'''
pd = import_required('pandas', 'airports sample data requires Pandas (http://pandas.pydata.org) to be installed')
with open(external_path('airports.json'), 'r') as f:
content = f.read()
airports = json.loads(content)
schema = [['attributes', 'nam'], ['attributes', 'zv3'], ['geometry', 'x'], ['geometry', 'y']]
data = pd.io.json.json_normalize(airports['features'], meta=schema)
data.rename(columns={'attributes.nam': 'name', 'attributes.zv3': 'elevation'}, inplace=True)
data.rename(columns={'geometry.x': 'x', 'geometry.y': 'y'}, inplace=True)
return data
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
data = _read_data()
| {
"content_hash": "de80e4ff4ac3dd0670b54157e963dff1",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 117,
"avg_line_length": 35,
"alnum_prop": 0.37183098591549296,
"repo_name": "timsnyder/bokeh",
"id": "2f9125f712e35142bd10cc7dcb4957e5eab550ef",
"size": "2816",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/sampledata/airports.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "24877"
},
{
"name": "Dockerfile",
"bytes": "4099"
},
{
"name": "HTML",
"bytes": "54062"
},
{
"name": "JavaScript",
"bytes": "27797"
},
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "PowerShell",
"bytes": "713"
},
{
"name": "Python",
"bytes": "3827067"
},
{
"name": "Roff",
"bytes": "495"
},
{
"name": "Shell",
"bytes": "9953"
},
{
"name": "TypeScript",
"bytes": "2145262"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
## reports view
urlpatterns = patterns('aquatest_reports.views',
(r'^reports$', 'reports'),
(r'^sampling_points$', 'sampling_points'),
(r'^report_testers$', 'testers'),
(r'^date_range$', 'date_range'),
(r'^create_report$', 'create_report'),
(r'^export_csv$', 'export_csv'),
(r'^export_pdf$', 'pdf_view'),
(r'^parameters$', 'parameters'),
)
| {
"content_hash": "3b7e92369341140a56d23aa9c3fe49f0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 48,
"avg_line_length": 26,
"alnum_prop": 0.5961538461538461,
"repo_name": "commtrack/temp-aquatest",
"id": "7b092c43f8c4cc3917e802802e62d4232f157a21",
"size": "416",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/aquatest_reports/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "742874"
},
{
"name": "PHP",
"bytes": "2863"
},
{
"name": "Python",
"bytes": "3707591"
},
{
"name": "Shell",
"bytes": "490"
}
],
"symlink_target": ""
} |
""" Preprocessing Module for Real-time Scan
Set of utilities for apply specified preprocessing steps to data during a
real-time run.
"""
import sys
import logging
import io
import contextlib
import zmq
import numpy as np
import nibabel as nib
from nipy.algorithms.registration import HistogramRegistration, Rigid
class Preprocessor:
""" Preprocessing class.
This is the main Preprocessing module that gets instantiated by Pyneal, and
will handle executing specific preprocessing routines on incoming volumes
throughout the scan.
"""
def __init__(self, settings):
""" Initialize the class
Parameters
----------
settings : dict
dictionary that contains all of the pyneal settings for the current
session. This dictionary is loaded/configured by the GUI once
Pyneal is first launched
"""
# set up logger
self.logger = logging.getLogger('PynealLog')
self.settings = settings
self.affine = None
# start the motion thread
self.motionProcessor = MotionProcessor(logger=self.logger, refVolIdx=4)
# create the socket to send data to dashboard (if dashboard there be)
if self.settings['launchDashboard']:
self.dashboard = True
context = zmq.Context.instance()
self.dashboardSocket = context.socket(zmq.REQ)
self.dashboardSocket.connect('tcp://127.0.0.1:{}'.format(self.settings['dashboardPort']))
def set_affine(self, affine):
""" Set a local reference to the RAS+ affine transformation for the
current series
Parameters
----------
affine : (4,4) numpy array-like
affine matrix mapping the current series to RAS+ space
"""
self.affine = affine
def runPreprocessing(self, vol, volIdx):
""" Run preprocessing on the supplied volume
This is a function that Pyneal can call in order to execute the
specified preprocessing routines for this series.
Parameters
----------
vol : numpy-array
3D array of voxel data for the current volume
volIdx : int
0-based index indicating where, in time (4th dimension), the volume
belongs
Returns
-------
vol : numpy-array
preprocessed 3D array of voxel data for the current volume
"""
### calculate the motion parameters on this volume. motionParams are
# returned as dictionary with keys for 'rms_abs', and 'rms_rel';
# NOTE: estimateMotion needs the input vol to be a nibabel nifti obj
# the nostdout bit suppresses verbose estimation output to stdOut
self.logger.debug('started volIdx {}'.format(volIdx))
if self.settings['estimateMotion']:
with nostdout():
motionParams = self.motionProcessor.estimateMotion(
nib.Nifti1Image(vol, self.affine),
volIdx)
### send to dashboard (if specified)
if self.settings['launchDashboard']:
if motionParams is not None:
# send to the dashboard
self.sendToDashboard(topic='motion',
content={'volIdx': volIdx,
'rms_abs': motionParams['rms_abs'],
'rms_rel': motionParams['rms_rel']})
self.logger.info('preprocessed volIdx {}'.format(volIdx))
return vol
def sendToDashboard(self, topic=None, content=None):
""" Send a msg to the Pyneal dashboard.
The dashboard expects messages formatted in specific way, namely a
dictionary with 2 keys: 'topic', and 'content'
Parameters
----------
topic : string
topic of the message. For instance, topic = 'motion', would tell
the dashboard to parse this message for data to use in the motion
plot
content : dict
dictionary containing all of the key:value pairs you want to
include in your message
"""
if self.dashboard:
dashboardMsg = {'topic': topic,
'content': content}
self.dashboardSocket.send_json(dashboardMsg)
response = self.dashboardSocket.recv_string()
class MotionProcessor():
""" Tool to estimate motion during a real-time run.
The motion estimates will be made relative to a reference volume,
specifed by `refVolIdx` (0-based index), and relative to the previous
volume.
See Also
--------
Motion estimation based on:
https://github.com/cni/rtfmri/blob/master/rtfmri/analyzers.py &
https://www.sciencedirect.com/science/article/pii/S1053811917306729#bib32
"""
def __init__(self, logger=None, refVolIdx=4):
""" Initialize the class
Parameters
----------
logger : logger object, optional
reference to the logger object where you want to write log messages
refVolIdx : int, optional
The index of the volume to make absolute motion estimates relative
to. 0-based index (default: 4)
"""
self.logger = logger
self.refVolIdx = refVolIdx
self.refVol = None
# initialize
self.refVol_T = Rigid(np.eye(4))
self.prevVol_T = Rigid(np.eye(4))
def estimateMotion(self, niiVol, volIdx):
""" Estimate the motion parameters for the current volume.
This tool will first estimate the transformation needed to align the
current volume to the reference volume. This transformation can be
expressed as a rigid body transformation with 6 degrees of freedom
(translation x,y,z; rotation x,y,z).
Using the estimated transformation matrix, we can compute RMS deviation
as a single value representing the displacement (in mm) between the
current volume and the reference volume (abs rms) or the current volume
and the previous volume (relative rms).
This approach for estimating motion borrows heavily from:
https://github.com/cni/rtfmri/blob/master/rtfmri/analyzers.py
RMS calculations:
https://www.fmrib.ox.ac.uk/datasets/techrep/tr99mj1/tr99mj1.pdf
Parameters
----------
niiVol : nibabel-like image
nibabel-like 3D data object, representing the current volume
volIdx : int
the 0-based index of the current volume along the 4th dim
(i.e. time)
"""
if volIdx < self.refVolIdx:
return None
elif volIdx == self.refVolIdx:
self.refVol = niiVol # set the reference volume
return None
elif volIdx > self.refVolIdx:
# create a regisitration object
reg = HistogramRegistration(niiVol, self.refVol, interp='tri')
# estimate optimal transformation
T = reg.optimize(self.prevVol_T.copy(), ftol=0.1, maxfun=30)
# compute RMS relative to reference vol (rms abs)
rms_abs = self.computeRMS(self.refVol_T, T)
# compute RMS relative to previous vol (rms rel)
rms_rel = self.computeRMS(self.prevVol_T, T)
# # get the realignment parameters:
# rot_x, rot_y, rot_z = np.rad2deg(T.rotation)
# trans_x, trans_y, trans_z = T.translation
# update the estimate
self.prevVol_T = T
motionParams = {'rms_abs': rms_abs,
'rms_rel': rms_rel}
return motionParams
def computeRMS(self, T1, T2, R=50):
""" Compute the RMS displacement between transformation matrices.
Parameters
----------
T1,T2 : nipy Rigid object
Transformation matrices
R : int, optional
radius (in mm) from center of head to cerebral cortex. Defaults to
50mm (apprx distance from cerebral cortex to center of head):
Motion-related artifacts in structural brain images revealed with
independent estimates of in-scanner head motion. (2017) Savalia,
et al. Human Brain Mapping. Jan; 38(1)
https://www.ncbi.nlm.nih.gov/pubmed/27634551
Returns
-------
rms : float
a single value representing the mean displacement in mm (assuming
a spherical volume with radius, R).
See Also
--------
This approach for estimating motion borrows heavily from:
https://github.com/cni/rtfmri/blob/master/rtfmri/analyzers.py
RMS calculations:
https://www.fmrib.ox.ac.uk/datasets/techrep/tr99mj1/tr99mj1.pdf
"""
diffMatrix = T1.as_affine().dot(np.linalg.inv(T2.as_affine())) - np.eye(4)
# decompose into A and t components
A = diffMatrix[:3, :3]
t = diffMatrix[:3, 3]
# volume center assumed to be at 0,0,0 in world space coords
center = np.zeros(3)
t += A.dot(center)
# compute RMS error (aka deviation error between transforms)
rms = np.sqrt((1 / 5) * R**2 * A.T.dot(A).trace() + t.T.dot(t))
return rms
# suppress stdOut from verbose functions
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
sys.stdout = io.StringIO()
yield
sys.stdout = save_stdout
| {
"content_hash": "4db1bc78d8814ba1dbbe77007ffa7762",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 101,
"avg_line_length": 34.18214285714286,
"alnum_prop": 0.6024448855918921,
"repo_name": "jeffmacinnes/pyneal",
"id": "6599725529a6f1884969079a76b32e9dcf0fda39",
"size": "9571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pynealPreprocessing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7821"
},
{
"name": "HTML",
"bytes": "2219"
},
{
"name": "JavaScript",
"bytes": "23040"
},
{
"name": "MATLAB",
"bytes": "286017"
},
{
"name": "Python",
"bytes": "423023"
},
{
"name": "Shell",
"bytes": "1522"
}
],
"symlink_target": ""
} |
"""Access control list implementation."""
import collections
from google.appengine.ext import ndb
from components import auth
from . import impl
################################################################################
## Role inheritance.
# Predefined roles.
ROLES = [
'OWNER',
'WRITER',
'READER',
]
def is_valid_role(role_name):
"""True if given string can be used as a role name."""
return role_name in ROLES
def is_owner(package_path, identity):
"""True if |identity| has OWNER role for package subpath."""
return has_role(package_path, 'OWNER', identity)
def is_writer(package_path, identity):
"""True if |identity| has WRITER role or better for package subpath."""
return (
has_role(package_path, 'WRITER', identity) or
is_owner(package_path, identity))
def is_reader(package_path, identity):
"""True if |identity| has READER role or better for package subpath."""
return (
has_role(package_path, 'READER', identity) or
is_writer(package_path, identity))
################################################################################
## Granular actions and mapping to roles. API uses these.
# Getting information about a package.
can_fetch_package = is_reader
# Fetching a package instance.
can_fetch_instance = is_reader
# Uploading a new instance to a package, registering new package.
can_register_instance = is_writer
# Creating or moving a ref. TODO(vadimsh): Make it per-ref.
can_move_ref = lambda package_path, ref, ident: is_writer(package_path, ident)
# Adding tags. TODO(vadimsh): Make it per-tag.
can_attach_tag = lambda package_path, tag, ident: is_writer(package_path, ident)
# Removing tags. TODO(vadimsh): Make it per-tag.
can_detach_tag = lambda package_path, tag, ident: is_writer(package_path, ident)
# Viewing ACLs.
can_fetch_acl = is_owner
# Changing ACLs.
can_modify_acl = is_owner
################################################################################
## Model.
# Describes single role modification. Passed to modify_roles.
RoleChange = collections.namedtuple('RoleChange', [
# Package subpath to modify.
'package_path',
# True to remove the role, False to add it.
'revoke',
# Role to add\remove.
'role',
# Identity to add\remove role for. Only one of 'user' or 'group' can be set.
'user',
# Group to add\remove role for. Only one of 'user' or 'group' can be set.
'group',
])
class PackageACLBody(ndb.Model):
"""Shared by PackageACL and PackageACLRevision.
Doesn't actually exist in the datastore by itself. Only inherited from.
"""
# Users granted the given role directly. Often only one account should be
# granted some role (e.g. a role account should be WRITER). It is annoying to
# manage one-account groups for cases like this.
users = auth.IdentityProperty(indexed=False, repeated=True)
# Groups granted the given role.
groups = ndb.StringProperty(indexed=False, repeated=True)
# Who made the last change.
modified_by = auth.IdentityProperty(indexed=True)
# When the last change was made.
modified_ts = ndb.DateTimeProperty(indexed=True)
class PackageACL(PackageACLBody):
"""List of users and groups that have some role in some package.
For role "R" and package "dir1/dir2" the entity key path is:
[PackageACLRoot, (PackageACL, "R:dir1/dir2")].
Notably:
* There's a single root entity. All ACL entities belong to a single entity
group. It allows transactional changes across different ACLs, but limits
ACL changes to 1 change per second (which is more than enough, ACLs should
not change very often).
* ACLs for each roles are stored in separate entities (it allows to easily
add new roles).
"""
# Incremented with each change.
rev = ndb.IntegerProperty(indexed=False, default=0)
@property
def package_path(self):
chunks = self.key.id().split(':')
assert len(chunks) == 2
assert impl.is_valid_package_path(chunks[1])
return chunks[1]
@property
def role(self):
chunks = self.key.id().split(':')
assert len(chunks) == 2
assert is_valid_role(chunks[0])
return chunks[0]
def _pre_put_hook(self):
chunks = self.key.id().split(':')
assert len(chunks) == 2
assert is_valid_role(chunks[0])
assert impl.is_valid_package_path(chunks[1])
class PackageACLRevision(PackageACLBody):
"""Used to store historical values of some PackageACL.
For role "R" and package "dir1/dir2" the entity key path is:
[PackageACLRoot, (PackageACL, "R:dir1/dir2"), (PackageACLRevision, rev)].
"""
def root_key():
"""Returns ndb.Key of ACL model root entity."""
return ndb.Key('PackageACLRoot', 'acls')
def package_acl_key(package_path, role):
"""Returns ndb.Key of some PackageACL entity."""
assert impl.is_valid_package_path(package_path), package_path
assert is_valid_role(role), role
return ndb.Key(PackageACL, '%s:%s' % (role, package_path), parent=root_key())
def get_package_acls(package_path, role):
"""Returns a list of PackageACL entities with ACLs for given package path."""
assert impl.is_valid_package_path(package_path), package_path
assert is_valid_role(role), role
components = package_path.split('/')
keys = [
package_acl_key('/'.join(components[:i+1]), role)
for i in xrange(len(components))
]
return filter(None, ndb.get_multi(keys))
def has_role(package_path, role, identity):
"""True if |identity| has |role| in some |package_path|."""
assert impl.is_valid_package_path(package_path), package_path
assert is_valid_role(role), role
if auth.is_admin(identity):
return True
for acl in get_package_acls(package_path, role):
if identity in acl.users:
return True
for group in acl.groups:
if auth.is_group_member(group, identity):
return True
return False
def modify_roles(changes, caller, now):
"""Transactionally modifies ACLs for a bunch of packages and roles.
Args:
changes: list of RoleChange objects describing what modifications to apply.
Order matters, modifications are applied in the order provided.
caller: Identity that made this change.
now: datetime with current time.
Raises:
ValueError if changes list contains bad changes.
"""
if not changes:
return
# Validate format of changes.
for c in changes:
if not isinstance(c, RoleChange):
raise ValueError(
'Expecting RoleChange, got %s instead' % type(c).__name__)
if not impl.is_valid_package_path(c.package_path):
raise ValueError('Invalid package_path: %s' % c.package_path)
if not is_valid_role(c.role):
raise ValueError('Invalid role: %s' % c.role)
if not c.user and not c.group:
raise ValueError('RoleChange.user or RoleChange.group should be set')
if c.user and c.group:
raise ValueError(
'Only one of RoleChange.user or RoleChange.group should be set')
if c.user and not isinstance(c.user, auth.Identity):
raise ValueError('RoleChange.user must be auth.Identity')
if c.group and not auth.is_valid_group_name(c.group):
raise ValueError('Invalid RoleChange.group value')
@ndb.transactional
def run():
# (package_path, role) pair -> list of RoleChanges to apply to it.
to_apply = collections.defaultdict(list)
for c in changes:
to_apply[(c.package_path, c.role)].append(c)
# Grab all existing PackageACL entities, make new empty ones if missing.
# Build mapping (package_path, role) -> PackageACL.
entities = {}
path_role_pairs = sorted(to_apply.keys())
keys = [package_acl_key(path, role) for path, role in path_role_pairs]
for i, entity in enumerate(ndb.get_multi(keys)):
entities[path_role_pairs[i]] = entity or PackageACL(key=keys[i])
# Helper to apply RoleChange to a list of users and groups.
def apply_change(c, users, groups):
if c.user:
assert not c.group
if c.revoke and c.user in users:
users.remove(c.user)
elif not c.revoke and c.user not in users:
users.append(c.user)
if c.group:
assert not c.user
if c.revoke and c.group in groups:
groups.remove(c.group)
elif not c.revoke and c.group not in groups:
groups.append(c.group)
# Apply all the changes. Collect a list of modified entities.
to_put = []
for package_path, role in path_role_pairs:
package_acl = entities[(package_path, role)]
change_list = to_apply[(package_path, role)]
# Mutate lists of users and groups.
users = list(package_acl.users)
groups = list(package_acl.groups)
for c in change_list:
apply_change(c, users, groups)
# Nothing changed?
if users == package_acl.users and groups == package_acl.groups:
continue
# Store the previous revision in the log.
if package_acl.rev:
to_put.append(
PackageACLRevision(
key=ndb.Key(
PackageACLRevision, package_acl.rev, parent=package_acl.key),
users=package_acl.users,
groups=package_acl.groups,
modified_by=package_acl.modified_by,
modified_ts=package_acl.modified_ts))
# Store modified PackageACL, bump revision.
package_acl.users = users
package_acl.groups = groups
package_acl.modified_by = caller
package_acl.modified_ts = now
package_acl.rev += 1
to_put.append(package_acl)
# Apply all pending changes.
ndb.put_multi(to_put)
run()
| {
"content_hash": "f9f8c8aaa51c1733d2712de57a0feada",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 80,
"avg_line_length": 32.68493150684932,
"alnum_prop": 0.6610435875943,
"repo_name": "nicko96/Chrome-Infra",
"id": "8b1df2e4e2932e4c15a043984573b1522987558d",
"size": "9707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appengine/chrome_infra_packages/cipd/acl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "100398"
},
{
"name": "Go",
"bytes": "648467"
},
{
"name": "HTML",
"bytes": "7323317"
},
{
"name": "JavaScript",
"bytes": "913960"
},
{
"name": "Makefile",
"bytes": "11281"
},
{
"name": "Protocol Buffer",
"bytes": "2730"
},
{
"name": "Python",
"bytes": "4034630"
},
{
"name": "Shell",
"bytes": "21687"
}
],
"symlink_target": ""
} |
import angr
######################################
# geteuid
######################################
class geteuid(angr.SimProcedure):
# pylint: disable=arguments-differ
def run(self):
return 1000
| {
"content_hash": "c11483f9bde041f3b6907b57254675d8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 38,
"avg_line_length": 21.4,
"alnum_prop": 0.43457943925233644,
"repo_name": "angr/angr",
"id": "b04954ceb1b548b9bde9ab6e8096bbb0ce466e40",
"size": "214",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "angr/procedures/libc/geteuid.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6694"
},
{
"name": "C++",
"bytes": "146292"
},
{
"name": "Makefile",
"bytes": "946"
},
{
"name": "Python",
"bytes": "27717304"
}
],
"symlink_target": ""
} |
from ._event_hub_management_client import EventHubManagementClient
__all__ = ['EventHubManagementClient']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
| {
"content_hash": "c03586c8a8b486f71a57d4c460c289cb",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 66,
"avg_line_length": 25.875,
"alnum_prop": 0.7246376811594203,
"repo_name": "Azure/azure-sdk-for-python",
"id": "72f93dd9db6cc6ba2d73c0e5ce35138779690eba",
"size": "675",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/eventhub/azure-mgmt-eventhub/azure/mgmt/eventhub/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import division, with_statement
import gzip
import bz2
import io
import gzip
import mmap
import os
import shutil
import sys
import warnings
import zipfile
try:
import StringIO
HAVE_STRINGIO = True
except ImportError:
HAVE_STRINGIO = False
import numpy as np
from ..extern.six import BytesIO
import pyfits as fits
from ..convenience import _getext
from ..diff import FITSDiff
from ..file import _File, GZIP_MAGIC
from . import PyfitsTestCase
from .util import ignore_warnings, CaptureStdio
from nose.tools import assert_raises
from warnings import catch_warnings
class TestCore(PyfitsTestCase):
def test_with_statement(self):
with fits.open(self.data('ascii.fits')) as f:
pass
def test_missing_file(self):
assert_raises(IOError, fits.open, self.temp('does-not-exist.fits'))
def test_naxisj_check(self):
hdulist = fits.open(self.data('o4sp040b0_raw.fits'))
hdulist[1].header['NAXIS3'] = 500
assert 'NAXIS3' in hdulist[1].header
hdulist.verify('silentfix')
assert 'NAXIS3' not in hdulist[1].header
def test_byteswap(self):
p = fits.PrimaryHDU()
l = fits.HDUList()
n = np.zeros(3, dtype='i2')
n[0] = 1
n[1] = 60000
n[2] = 2
c = fits.Column(name='foo', format='i2', bscale=1, bzero=32768,
array=n)
t = fits.BinTableHDU.from_columns([c])
l.append(p)
l.append(t)
l.writeto(self.temp('test.fits'), clobber=True)
with fits.open(self.temp('test.fits')) as p:
assert p[1].data[1]['foo'] == 60000.0
def test_add_del_columns(self):
p = fits.ColDefs([])
p.add_col(fits.Column(name='FOO', format='3J'))
p.add_col(fits.Column(name='BAR', format='1I'))
assert p.names == ['FOO', 'BAR']
p.del_col('FOO')
assert p.names == ['BAR']
def test_add_del_columns2(self):
hdulist = fits.open(self.data('tb.fits'))
table = hdulist[1]
assert table.data.dtype.names == ('c1', 'c2', 'c3', 'c4')
assert table.columns.names == ['c1', 'c2', 'c3', 'c4']
table.columns.del_col('c1')
assert table.data.dtype.names == ('c2', 'c3', 'c4')
assert table.columns.names == ['c2', 'c3', 'c4']
table.columns.del_col('c3')
assert table.data.dtype.names == ('c2', 'c4')
assert table.columns.names == ['c2', 'c4']
table.columns.add_col(fits.Column('foo', '3J'))
assert table.data.dtype.names == ('c2', 'c4', 'foo')
assert table.columns.names == ['c2', 'c4', 'foo']
hdulist.writeto(self.temp('test.fits'), clobber=True)
with ignore_warnings():
# TODO: The warning raised by this test is actually indication of a
# bug and should *not* be ignored. But as it is a known issue we
# hide it for now. See
# https://github.com/spacetelescope/PyFITS/issues/44
with fits.open(self.temp('test.fits')) as hdulist:
table = hdulist[1]
assert table.data.dtype.names == ('c2', 'c4', 'foo')
assert table.columns.names == ['c2', 'c4', 'foo']
def test_update_header_card(self):
"""A very basic test for the Header.update method--I'd like to add a
few more cases to this at some point.
"""
header = fits.Header()
comment = 'number of bits per data pixel'
header['BITPIX'] = (16, comment)
assert 'BITPIX' in header
assert header['BITPIX'] == 16
assert header.cards['BITPIX'].comment == comment
header.set('BITPIX', 32)
# Make sure the value has been updated, but the comment was preserved
assert header['BITPIX'] == 32
assert header.cards['BITPIX'].comment == comment
# The comment should still be preserved--savecomment only takes effect if
# a new comment is also specified
header['BITPIX'] = 16
assert header.cards['BITPIX'].comment == comment
header.set('BITPIX', 16, 'foobarbaz', savecomment=True)
assert header.cards['BITPIX'].comment == comment
def test_set_card_value(self):
"""Similar to test_update_header_card(), but tests the the
`header['FOO'] = 'bar'` method of updating card values.
"""
header = fits.Header()
comment = 'number of bits per data pixel'
card = fits.Card.fromstring('BITPIX = 32 / %s' % comment)
header.append(card)
header['BITPIX'] = 32
assert 'BITPIX' in header
assert header['BITPIX'] == 32
assert header.cards['BITPIX'].keyword == 'BITPIX'
assert header.cards['BITPIX'].value == 32
assert header.cards['BITPIX'].comment == comment
def test_uint(self):
hdulist_f = fits.open(self.data('o4sp040b0_raw.fits'), uint=False)
hdulist_i = fits.open(self.data('o4sp040b0_raw.fits'), uint=True)
assert hdulist_f[1].data.dtype == np.float32
assert hdulist_i[1].data.dtype == np.uint16
assert np.all(hdulist_f[1].data == hdulist_i[1].data)
def test_fix_missing_card_append(self):
hdu = fits.ImageHDU()
errs = hdu.req_cards('TESTKW', None, None, 'foo', 'silentfix', [])
assert len(errs) == 1
assert 'TESTKW' in hdu.header
assert hdu.header['TESTKW'] == 'foo'
assert hdu.header.cards[-1].keyword == 'TESTKW'
def test_fix_invalid_keyword_value(self):
hdu = fits.ImageHDU()
hdu.header['TESTKW'] = 'foo'
errs = hdu.req_cards('TESTKW', None,
lambda v: v == 'foo', 'foo', 'ignore', [])
assert len(errs) == 0
# Now try a test that will fail, and ensure that an error will be
# raised in 'exception' mode
errs = hdu.req_cards('TESTKW', None, lambda v: v == 'bar', 'bar',
'exception', [])
assert len(errs) == 1
assert errs[0][1] == "'TESTKW' card has invalid value 'foo'."
# See if fixing will work
hdu.req_cards('TESTKW', None, lambda v: v == 'bar', 'bar', 'silentfix',
[])
assert hdu.header['TESTKW'] == 'bar'
def test_unfixable_missing_card(self):
class TestHDU(fits.hdu.base.NonstandardExtHDU):
def _verify(self, option='warn'):
errs = super(TestHDU, self)._verify(option)
hdu.req_cards('TESTKW', None, None, None, 'fix', errs)
return errs
hdu = TestHDU(header=fits.Header())
assert_raises(fits.VerifyError, hdu.verify, 'fix')
def test_exception_on_verification_error(self):
hdu = fits.ImageHDU()
del hdu.header['XTENSION']
assert_raises(fits.VerifyError, hdu.verify, 'exception')
def test_ignore_verification_error(self):
hdu = fits.ImageHDU()
# The default here would be to issue a warning; ensure that no warnings
# or exceptions are raised
with catch_warnings():
warnings.simplefilter('error')
del hdu.header['NAXIS']
try:
hdu.verify('ignore')
except Exception as exc:
self.fail('An exception occurred when the verification error '
'should have been ignored: %s' % exc)
# Make sure the error wasn't fixed either, silently or otherwise
assert 'NAXIS' not in hdu.header
def test_unrecognized_verify_option(self):
hdu = fits.ImageHDU()
assert_raises(ValueError, hdu.verify, 'foobarbaz')
def test_combined_verify_options(self):
"""
Test verify options like fix+ignore.
"""
def make_invalid_hdu():
hdu = fits.ImageHDU()
# Add one keyword to the header that contains a fixable defect, and one
# with an unfixable defect
c1 = fits.Card.fromstring("test = ' test'")
c2 = fits.Card.fromstring("P.I. = ' Hubble'")
hdu.header.append(c1)
hdu.header.append(c2)
return hdu
# silentfix+ignore should be completely silent
hdu = make_invalid_hdu()
with catch_warnings():
warnings.simplefilter('error')
try:
hdu.verify('silentfix+ignore')
except Exception as exc:
self.fail('An exception occurred when the verification error '
'should have been ignored: %s' % exc)
# silentfix+warn should be quiet about the fixed HDU and only warn
# about the unfixable one
hdu = make_invalid_hdu()
with catch_warnings(record=True) as w:
hdu.verify('silentfix+warn')
assert len(w) == 4
assert 'Illegal keyword name' in str(w[2].message)
# silentfix+exception should only mention the unfixable error in the
# exception
hdu = make_invalid_hdu()
try:
hdu.verify('silentfix+exception')
except fits.VerifyError as exc:
assert 'Illegal keyword name' in str(exc)
assert 'not upper case' not in str(exc)
else:
self.fail('An exception should have been raised.')
# fix+ignore is not too useful, but it should warn about the fixed
# problems while saying nothing about the unfixable problems
hdu = make_invalid_hdu()
with catch_warnings(record=True) as w:
hdu.verify('fix+ignore')
assert len(w) == 4
assert 'not upper case' in str(w[2].message)
# fix+warn
hdu = make_invalid_hdu()
with catch_warnings(record=True) as w:
hdu.verify('fix+warn')
assert len(w) == 6
assert 'not upper case' in str(w[2].message)
assert 'Illegal keyword name' in str(w[4].message)
# fix+exception
hdu = make_invalid_hdu()
try:
hdu.verify('fix+exception')
except fits.VerifyError as exc:
assert 'Illegal keyword name' in str(exc)
assert 'not upper case' in str(exc)
else:
self.fail('An exception should have been raised.')
def test_getext(self):
"""
Test the various different ways of specifying an extension header in
the convenience functions.
"""
hl, ext = _getext(self.data('test0.fits'), 'readonly', 1)
assert ext == 1
assert_raises(ValueError, _getext, self.data('test0.fits'), 'readonly',
1, 2)
assert_raises(ValueError, _getext, self.data('test0.fits'), 'readonly',
(1, 2))
assert_raises(ValueError, _getext, self.data('test0.fits'), 'readonly',
'sci', 'sci')
assert_raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
1, 2, 3)
hl, ext = _getext(self.data('test0.fits'), 'readonly', ext=1)
assert ext == 1
hl, ext = _getext(self.data('test0.fits'), 'readonly', ext=('sci', 2))
assert ext == ('sci', 2)
assert_raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
1, ext=('sci', 2), extver=3)
assert_raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
ext=('sci', 2), extver=3)
hl, ext = _getext(self.data('test0.fits'), 'readonly', 'sci')
assert ext == ('sci', 1)
hl, ext = _getext(self.data('test0.fits'), 'readonly', 'sci', 1)
assert ext == ('sci', 1)
hl, ext = _getext(self.data('test0.fits'), 'readonly', ('sci', 1))
assert ext == ('sci', 1)
hl, ext = _getext(self.data('test0.fits'), 'readonly', 'sci',
extver=1, do_not_scale_image_data=True)
assert ext == ('sci', 1)
assert_raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
'sci', ext=1)
assert_raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
'sci', 1, extver=2)
hl, ext = _getext(self.data('test0.fits'), 'readonly', extname='sci')
assert ext == ('sci', 1)
hl, ext = _getext(self.data('test0.fits'), 'readonly', extname='sci',
extver=1)
assert ext == ('sci', 1)
assert_raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
extver=1)
def test_extension_name_case_sensitive(self):
"""
Tests that setting fits.EXTENSION_NAME_CASE_SENSITIVE at runtime
works.
"""
if 'PYFITS_EXTENSION_NAME_CASE_SENSITIVE' in os.environ:
del os.environ['PYFITS_EXTENSION_NAME_CASE_SENSITIVE']
hdu = fits.ImageHDU()
hdu.name = 'sCi'
assert hdu.name == 'SCI'
assert hdu.header['EXTNAME'] == 'SCI'
try:
fits.EXTENSION_NAME_CASE_SENSITIVE = True
hdu = fits.ImageHDU()
hdu.name = 'sCi'
assert hdu.name == 'sCi'
assert hdu.header['EXTNAME'] == 'sCi'
finally:
fits.EXTENSION_NAME_CASE_SENSITIVE = False
hdu.name = 'sCi'
assert hdu.name == 'SCI'
assert hdu.header['EXTNAME'] == 'SCI'
def test_hdu_fromstring(self):
"""
Tests creating a fully-formed HDU object from a string containing the
bytes of the HDU.
"""
dat = open(self.data('test0.fits'), 'rb').read()
offset = 0
with fits.open(self.data('test0.fits')) as hdul:
hdulen = hdul[0]._data_offset + hdul[0]._data_size
hdu = fits.PrimaryHDU.fromstring(dat[:hdulen])
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header == hdu.header
assert hdu.data is None
hdu.header['TEST'] = 'TEST'
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header[:-1] == hdu.header[:-1]
assert hdul[0].header['TEST'] == 'TEST'
assert hdu.data is None
with fits.open(self.data('test0.fits'))as hdul:
for ext_hdu in hdul[1:]:
offset += hdulen
hdulen = len(str(ext_hdu.header)) + ext_hdu._data_size
hdu = fits.ImageHDU.fromstring(dat[offset:offset + hdulen])
assert isinstance(hdu, fits.ImageHDU)
assert ext_hdu.header == hdu.header
assert (ext_hdu.data == hdu.data).all()
def test_nonstandard_hdu(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/157
Tests that "Nonstandard" HDUs with SIMPLE = F are read and written
without prepending a superfluous and unwanted standard primary HDU.
"""
data = np.arange(100, dtype=np.uint8)
hdu = fits.PrimaryHDU(data=data)
hdu.header['SIMPLE'] = False
hdu.writeto(self.temp('test.fits'))
info = [(0, '', 'NonstandardHDU', 5, (), '', '')]
with fits.open(self.temp('test.fits')) as hdul:
assert hdul.info(output=False) == info
# NonstandardHDUs just treat the data as an unspecified array of
# bytes. The first 100 bytes should match the original data we
# passed in...the rest should be zeros padding out the rest of the
# FITS block
assert (hdul[0].data[:100] == data).all()
assert (hdul[0].data[100:] == 0).all()
def test_extname(self):
"""Test getting/setting the EXTNAME of an HDU."""
h1 = fits.PrimaryHDU()
assert h1.name == 'PRIMARY'
# Normally a PRIMARY HDU should not have an EXTNAME, though it should
# have a default .name attribute
assert 'EXTNAME' not in h1.header
# The current version of the FITS standard does allow PRIMARY HDUs to
# have an EXTNAME, however.
h1.name = 'NOTREAL'
assert h1.name == 'NOTREAL'
assert h1.header.get('EXTNAME') == 'NOTREAL'
# Updating the EXTNAME in the header should update the .name
h1.header['EXTNAME'] = 'TOOREAL'
assert h1.name == 'TOOREAL'
# If we delete an EXTNAME keyword from a PRIMARY HDU it should go back
# to the default
del h1.header['EXTNAME']
assert h1.name == 'PRIMARY'
# For extension HDUs the situation is a bit simpler:
h2 = fits.ImageHDU()
assert h2.name == ''
assert 'EXTNAME' not in h2.header
h2.name = 'HELLO'
assert h2.name == 'HELLO'
assert h2.header.get('EXTNAME') == 'HELLO'
h2.header['EXTNAME'] = 'GOODBYE'
assert h2.name == 'GOODBYE'
def test_extver_extlevel(self):
"""Test getting/setting the EXTVER and EXTLEVEL of and HDU."""
# EXTVER and EXTNAME work exactly the same; their semantics are, for
# now, to be inferred by the user. Although they should never be less
# than 1, the standard does not explicitly forbid any value so long as
# it's an integer
h1 = fits.PrimaryHDU()
assert h1.ver == 1
assert h1.level == 1
assert 'EXTVER' not in h1.header
assert 'EXTLEVEL' not in h1.header
h1.ver = 2
assert h1.header.get('EXTVER') == 2
h1.header['EXTVER'] = 3
assert h1.ver == 3
del h1.header['EXTVER']
h1.ver == 1
h1.level = 2
assert h1.header.get('EXTLEVEL') == 2
h1.header['EXTLEVEL'] = 3
assert h1.level == 3
del h1.header['EXTLEVEL']
assert h1.level == 1
assert_raises(TypeError, setattr, h1, 'ver', 'FOO')
assert_raises(TypeError, setattr, h1, 'level', 'BAR')
def test_consecutive_writeto(self):
"""
Regression test for an issue where calling writeto twice on the same
HDUList could write a corrupted file.
https://github.com/spacetelescope/PyFITS/issues/40 is actually a
particular instance of this problem, though isn't unique to sys.stdout.
"""
with fits.open(self.data('test0.fits')) as hdul1:
# Add a bunch of header keywords so that the data will be forced to
# new offsets within the file:
for idx in range(40):
hdul1[1].header['TEST%d' % idx] = 'test'
hdul1.writeto(self.temp('test1.fits'))
hdul1.writeto(self.temp('test2.fits'))
# Open a second handle to the original file and compare it to hdul1
# (We only compare part of the one header that was modified)
# Compare also with the second writeto output
with fits.open(self.data('test0.fits')) as hdul2:
with fits.open(self.temp('test2.fits')) as hdul3:
for hdul in (hdul1, hdul3):
for idx, hdus in enumerate(zip(hdul1, hdul)):
hdu1, hdu2 = hdus
if idx != 1:
assert hdu1.header == hdu2.header
else:
assert (hdu1.header ==
hdu2.header[:len(hdu1.header)])
assert np.all(hdu1.data == hdu2.data)
class TestConvenienceFunctions(PyfitsTestCase):
def test_writeto(self):
"""
Simple test for writing a trivial header and some data to a file
with the `writeto()` convenience function.
"""
data = np.zeros((100, 100))
header = fits.Header()
fits.writeto(self.temp('array.fits'), data, header=header,
clobber=True)
hdul = fits.open(self.temp('array.fits'))
assert len(hdul) == 1
assert (data == hdul[0].data).all()
def test_writeto_2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/107
Test of `writeto()` with a trivial header containing a single keyword.
"""
data = np.zeros((100, 100))
header = fits.Header()
header['CRPIX1'] = 1.
fits.writeto(self.temp('array.fits'), data, header=header,
clobber=True, output_verify='silentfix')
hdul = fits.open(self.temp('array.fits'))
assert len(hdul) == 1
assert (data == hdul[0].data).all()
assert 'CRPIX1' in hdul[0].header
assert hdul[0].header['CRPIX1'] == 1.0
class TestFileFunctions(PyfitsTestCase):
"""
Tests various basic I/O operations, specifically in the pyfits.file._File
class.
"""
def test_open_nonexistent(self):
"""Test that trying to open a non-existent file results in an
IOError (and not some other arbitrary exception).
"""
try:
fits.open(self.temp('foobar.fits'))
except IOError as exc:
assert 'No such file or directory' in str(exc)
except:
raise
# But opening in ostream or append mode should be okay, since they
# allow writing new files
for mode in ('ostream', 'append'):
with fits.open(self.temp('foobar.fits'), mode=mode) as h:
pass
assert os.path.exists(self.temp('foobar.fits'))
os.remove(self.temp('foobar.fits'))
def test_open_gzipped(self):
with ignore_warnings():
assert len(fits.open(self._make_gzip_file())) == 5
def test_detect_gzipped(self):
"""Test detection of a gzip file when the extension is not .gz."""
with ignore_warnings():
assert len(fits.open(self._make_gzip_file('test0.fz'))) == 5
def test_writeto_append_mode_gzip(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/33
Check that a new GzipFile opened in append mode can be used to write
out a new FITS file.
"""
# Note: when opening a GzipFile the 'b+' is superfluous, but this was
# still how the original test case looked
# Note: with statement not supported on GzipFile in older Python
# versions
fileobj = gzip.GzipFile(self.temp('test.fits.gz'), 'ab+')
h = fits.PrimaryHDU()
try:
h.writeto(fileobj)
finally:
fileobj.close()
with fits.open(self.temp('test.fits.gz')) as hdul:
assert hdul[0].header == h.header
def test_open_bzipped(self):
with ignore_warnings():
assert len(fits.open(self._make_bzip2_file())) == 5
def test_detect_bzipped(self):
"""Test detection of a bzip2 file when the extension is not .bz2."""
with ignore_warnings():
assert len(fits.open(self._make_bzip2_file('test0.xx'))) == 5
def test_writeto_bzip2_fileobj(self):
"""Test writing to a bz2.BZ2File file like object"""
fileobj = bz2.BZ2File(self.temp('test.fits.bz2'), 'w')
h = fits.PrimaryHDU()
try:
h.writeto(fileobj)
finally:
fileobj.close()
with fits.open(self.temp('test.fits.bz2')) as hdul:
assert hdul[0].header == h.header
def test_writeto_bzip2_filename(self):
"""Test writing to a bzip2 file by name"""
filename = self.temp('testname.fits.bz2')
h = fits.PrimaryHDU()
h.writeto(filename)
with fits.open(self.temp('testname.fits.bz2')) as hdul:
assert hdul[0].header == h.header
def test_open_zipped(self):
zf = self._make_zip_file()
with ignore_warnings():
assert len(fits.open(self._make_zip_file())) == 5
with ignore_warnings():
assert len(fits.open(zipfile.ZipFile(zf))) == 5
def test_detect_zipped(self):
"""Test detection of a zip file when the extension is not .zip."""
zf = self._make_zip_file(filename='test0.fz')
with ignore_warnings():
assert len(fits.open(zf)) == 5
def test_open_zipped_writeable(self):
"""Opening zipped files in a writeable mode should fail."""
zf = self._make_zip_file()
assert_raises(IOError, fits.open, zf, 'update')
assert_raises(IOError, fits.open, zf, 'append')
zf = zipfile.ZipFile(zf, 'a')
assert_raises(IOError, fits.open, zf, 'update')
assert_raises(IOError, fits.open, zf, 'append')
def test_open_multiple_member_zipfile(self):
"""
Opening zip files containing more than one member files should fail
as there's no obvious way to specify which file is the FITS file to
read.
"""
zfile = zipfile.ZipFile(self.temp('test0.zip'), 'w')
zfile.write(self.data('test0.fits'))
zfile.writestr('foo', 'bar')
zfile.close()
assert_raises(IOError, fits.open, zfile.filename)
def test_read_open_file(self):
"""Read from an existing file object."""
with open(self.data('test0.fits'), 'rb') as f:
assert len(fits.open(f)) == 5
def test_read_closed_file(self):
"""Read from an existing file object that's been closed."""
f = open(self.data('test0.fits'), 'rb')
f.close()
assert len(fits.open(f)) == 5
def test_read_open_gzip_file(self):
"""Read from an open gzip file object."""
gf = gzip.GzipFile(self._make_gzip_file())
try:
assert len(fits.open(gf)) == 5
finally:
gf.close()
def test_open_gzip_file_for_writing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/195."""
gf = self._make_gzip_file()
with fits.open(gf, mode='update') as h:
h[0].header['EXPFLAG'] = 'ABNORMAL'
with fits.open(gf) as h:
# Just to make sur ethe update worked; if updates work
# normal writes should work too...
assert h[0].header['EXPFLAG'] == 'ABNORMAL'
def test_write_read_gzip_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2794
Ensure files written through gzip are readable.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
hdu.writeto(self.temp('test.fits.gz'))
with open(self.temp('test.fits.gz'), 'rb') as f:
assert f.read(3) == GZIP_MAGIC
with fits.open(self.temp('test.fits.gz')) as hdul:
assert np.all(hdul[0].data == data)
def test_read_file_like_object(self):
"""Test reading a FITS file from a file-like object."""
filelike = BytesIO()
with open(self.data('test0.fits'), 'rb') as f:
filelike.write(f.read())
filelike.seek(0)
with ignore_warnings():
assert len(fits.open(filelike)) == 5
def test_updated_file_permissions(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/79
Tests that when a FITS file is modified in update mode, the file
permissions are preserved.
"""
filename = self.temp('test.fits')
hdul = [fits.PrimaryHDU(), fits.ImageHDU()]
hdul = fits.HDUList(hdul)
hdul.writeto(filename)
old_mode = os.stat(filename).st_mode
hdul = fits.open(filename, mode='update')
hdul.insert(1, fits.ImageHDU())
hdul.flush()
hdul.close()
assert old_mode == os.stat(filename).st_mode
def test_fileobj_mode_guessing(self):
"""Tests whether a file opened without a specified pyfits mode
('readonly', etc.) is opened in a mode appropriate for the given file
object.
"""
self.copy_file('test0.fits')
# Opening in text mode should outright fail
for mode in ('r', 'w', 'a'):
with open(self.temp('test0.fits'), mode) as f:
assert_raises(ValueError, fits.HDUList.fromfile, f)
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'rb') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'readonly'
for mode in ('wb', 'ab'):
with open(self.temp('test0.fits'), mode) as f:
with fits.HDUList.fromfile(f) as h:
# Basically opening empty files for output streaming
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'wb+') as f:
with fits.HDUList.fromfile(f) as h:
# wb+ still causes an existing file to be overwritten so there
# are no HDUs
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'rb+') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'update'
with open(self.temp('test0.fits'), 'ab+') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'append'
def test_mmap_unwriteable(self):
"""Regression test for https://github.com/astropy/astropy/issues/968
Temporarily patches mmap.mmap to exhibit platform-specific bad
behavior.
"""
class MockMmap(mmap.mmap):
def flush(self):
raise mmap.error('flush is broken on this platform')
old_mmap = mmap.mmap
mmap.mmap = MockMmap
# Force the mmap test to be rerun
_File.__dict__['_mmap_available']._cache.clear()
try:
self.copy_file('test0.fits')
with catch_warnings(record=True) as w:
with fits.open(self.temp('test0.fits'), mode='update',
memmap=True) as h:
h[1].data[0, 0] = 999
assert len(w) == 1
assert 'mmap.flush is unavailable' in str(w[0].message)
# Double check that writing without mmap still worked
with fits.open(self.temp('test0.fits')) as h:
assert h[1].data[0, 0] == 999
finally:
mmap.mmap = old_mmap
_File.__dict__['_mmap_available']._cache.clear()
def test_mmap_closing(self):
"""
Tests that the mmap reference is closed/removed when there aren't any
HDU data references left.
"""
if not _File._mmap_available:
pytest.xfail('not expected to work on platforms without mmap '
'support')
with fits.open(self.data('test0.fits'), memmap=True) as hdul:
assert hdul._file._mmap is None
hdul[1].data
assert hdul._file._mmap is not None
del hdul[1].data
# Should be no more references to data in the file so close the
# mmap
assert hdul._file._mmap is None
hdul[1].data
hdul[2].data
del hdul[1].data
# hdul[2].data is still references so keep the mmap open
assert hdul._file._mmap is not None
del hdul[2].data
assert hdul._file._mmap is None
assert hdul._file._mmap is None
with fits.open(self.data('test0.fits'), memmap=True) as hdul:
hdul[1].data
# When the only reference to the data is on the hdu object, and the
# hdulist it belongs to has been closed, the mmap should be closed as
# well
assert hdul._file._mmap is None
with fits.open(self.data('test0.fits'), memmap=True) as hdul:
data = hdul[1].data
# also make a copy
data_copy = data.copy()
# The HDUList is closed; in fact, get rid of it completely
del hdul
# The data array should still work though...
assert np.all(data == data_copy)
def test_uncloseable_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2356
Demonstrates that FITS files can still be read from "file-like" objects
that don't have an obvious "open" or "closed" state.
"""
class MyFileLike(object):
def __init__(self, foobar):
self._foobar = foobar
def read(self, n):
return self._foobar.read(n)
def seek(self, offset, whence=os.SEEK_SET):
self._foobar.seek(offset, whence)
def tell(self):
return self._foobar.tell()
with open(self.data('test0.fits'), 'rb') as f:
fileobj = MyFileLike(f)
with fits.open(fileobj) as hdul1:
with fits.open(self.data('test0.fits')) as hdul2:
assert hdul1.info(output=False) == hdul2.info(output=False)
for hdu1, hdu2 in zip(hdul1, hdul2):
assert hdu1.header == hdu2.header
if hdu1.data is not None and hdu2.data is not None:
assert np.all(hdu1.data == hdu2.data)
if HAVE_STRINGIO:
def test_write_stringio(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2463
Only test against `StringIO.StringIO` on Python versions that have it.
Note: `io.StringIO` is not supported for this purpose as it does not
accept a bytes stream.
"""
self._test_write_string_bytes_io(StringIO.StringIO())
if HAVE_STRINGIO:
def test_write_stringio_discontiguous(self):
"""
Regression test related to
https://github.com/astropy/astropy/issues/2794#issuecomment-55441539
Demonstrates that writing an HDU containing a discontiguous Numpy
array should work properly.
"""
data = np.arange(100)[::3]
hdu = fits.PrimaryHDU(data=data)
fileobj = StringIO.StringIO()
hdu.writeto(fileobj)
fileobj.seek(0)
with fits.open(fileobj) as h:
assert np.all(h[0].data == data)
def test_write_bytesio(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2463
Test againt `io.BytesIO`. `io.StringIO` is not supported.
"""
self._test_write_string_bytes_io(io.BytesIO())
if not sys.platform.startswith('win32'):
def test_filename_with_colon(self):
"""
Test reading and writing a file with a colon in the filename.
Regression test for https://github.com/astropy/astropy/issues/3122
"""
# Skip on Windows since colons in filenames makes NTFS sad.
filename = 'APEXHET.2014-04-01T15:18:01.000.fits'
hdu = fits.PrimaryHDU(data=np.arange(10))
hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert np.all(hdul[0].data == hdu.data)
def _test_write_string_bytes_io(self, fileobj):
"""
Implemented for both test_write_stringio and test_write_bytesio.
"""
with fits.open(self.data('test0.fits')) as hdul:
hdul.writeto(fileobj)
hdul2 = fits.HDUList.fromstring(fileobj.getvalue())
assert FITSDiff(hdul, hdul2).identical
def _make_gzip_file(self, filename='test0.fits.gz'):
gzfile = self.temp(filename)
with open(self.data('test0.fits'), 'rb') as f:
gz = gzip.open(gzfile, 'wb')
gz.write(f.read())
gz.close()
return gzfile
def _make_zip_file(self, mode='copyonwrite', filename='test0.fits.zip'):
zfile = zipfile.ZipFile(self.temp(filename), 'w')
zfile.write(self.data('test0.fits'))
zfile.close()
return zfile.filename
def _make_bzip2_file(self, filename='test0.fits.bz2'):
bzfile = self.temp(filename)
with open(self.data('test0.fits'), 'rb') as f:
bz = bz2.BZ2File(bzfile, 'w')
bz.write(f.read())
bz.close()
return bzfile
class TestStreamingFunctions(PyfitsTestCase):
"""Test functionality of the StreamingHDU class."""
def test_streaming_hdu(self):
shdu = self._make_streaming_hdu(self.temp('new.fits'))
assert isinstance(shdu.size, int)
assert shdu.size == 100
def test_streaming_hdu_file_wrong_mode(self):
"""
Test that streaming an HDU to a file opened in the wrong mode fails as
expected (any writeable mode is acceptable; any read-only mode should
fail).
"""
# touch new.fits
with open(self.temp('new.fits'), 'wb'):
pass
with open(self.temp('new.fits'), 'rb') as f:
header = fits.Header()
assert_raises(ValueError, fits.StreamingHDU, f, header)
def test_streaming_hdu_write_file(self):
"""Test streaming an HDU to an open file object."""
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
hdul = fits.open(self.temp('new.fits'))
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_write_file_like(self):
"""Test streaming an HDU to an open file-like object."""
arr = np.zeros((5, 5), dtype=np.int32)
# The file-like object underlying a StreamingHDU must be in binary mode
sf = BytesIO()
shdu = self._make_streaming_hdu(sf)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_append_extension(self):
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
# Doing this again should update the file with an extension
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
def test_fix_invalid_extname(self):
phdu = fits.PrimaryHDU()
ihdu = fits.ImageHDU()
ihdu.header['EXTNAME'] = 12345678
hdul = fits.HDUList([phdu, ihdu])
assert_raises(fits.VerifyError, hdul.writeto, self.temp('temp.fits'),
output_verify='exception')
with CaptureStdio():
hdul.writeto(self.temp('temp.fits'), output_verify='fix')
with fits.open(self.temp('temp.fits')):
assert hdul[1].name == '12345678'
assert hdul[1].header['EXTNAME'] == '12345678'
def _make_streaming_hdu(self, fileobj):
hd = fits.Header()
hd['SIMPLE'] = (True, 'conforms to FITS standard')
hd['BITPIX'] = (32, 'array data type')
hd['NAXIS'] = (2, 'number of array dimensions')
hd['NAXIS1'] = 5
hd['NAXIS2'] = 5
hd['EXTEND'] = True
return fits.StreamingHDU(fileobj, hd)
def test_blank_ignore(self):
with fits.open(self.data('blank.fits'), ignore_blank=True) as f:
assert f[0].data.flat[0] == 2
def test_error_if_memmap_impossible(self):
pth = self.data('blank.fits')
assert_raises(ValueError,
lambda attr: getattr(fits.open(pth, memmap=True)[0],
attr),
'data')
# However, it should not fail if do_not_scale_image_data was used:
# See https://github.com/astropy/astropy/issues/3766
hdul = fits.open(pth, memmap=True, do_not_scale_image_data=True)
hdul[0].data # Just make sure it doesn't crash
| {
"content_hash": "f71b03ce240aceacc1e4464857e45564",
"timestamp": "",
"source": "github",
"line_count": 1124,
"max_line_length": 84,
"avg_line_length": 35.94128113879004,
"alnum_prop": 0.5646319124709144,
"repo_name": "spacetelescope/PyFITS",
"id": "b6d5a4e56fe57b50ce12614c1569c8a7a554829e",
"size": "40398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyfits/tests/test_core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "40680"
},
{
"name": "Python",
"bytes": "1234849"
}
],
"symlink_target": ""
} |
"""Flags for the Azure Blob Storage interface."""
import gflags as flags
flags.DEFINE_string('azure_account', None,
'The name of the storage account for Azure.')
flags.DEFINE_string('azure_key', None,
'The key of the storage account for Azure.')
| {
"content_hash": "d4b876165e82bd56c193b0080632327c",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 65,
"avg_line_length": 32.111111111111114,
"alnum_prop": 0.6366782006920415,
"repo_name": "xiaolihope/PerfKitBenchmarker-1.7.0",
"id": "e08850ad72fa19a2d220a171a7ed4de388b697a5",
"size": "900",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/scripts/object_storage_api_test_scripts/azure_flags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "1727478"
},
{
"name": "Shell",
"bytes": "23457"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from datetime import datetime
from mock import patch, Mock
from teuthology import suite
from teuthology.config import config
class TestSuiteOffline(object):
def test_name_timestamp_passed(self):
stamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
name = suite.make_run_name('suite', 'ceph', 'kernel', 'flavor',
'mtype', timestamp=stamp)
assert str(stamp) in name
def test_name_timestamp_not_passed(self):
stamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
name = suite.make_run_name('suite', 'ceph', 'kernel', 'flavor',
'mtype')
assert str(stamp) in name
def test_name_user(self):
name = suite.make_run_name('suite', 'ceph', 'kernel', 'flavor',
'mtype', user='USER')
assert name.startswith('USER-')
def test_gitbuilder_url(self):
ref_url = "http://{host}/ceph-deb-squeeze-x86_64-basic/".format(
host=config.gitbuilder_host
)
assert suite.get_gitbuilder_url('ceph', 'squeeze', 'deb', 'x86_64',
'basic') == ref_url
def test_substitute_placeholders(self):
input_dict = dict(
suite='suite',
suite_branch='suite_branch',
ceph_branch='ceph_branch',
ceph_hash='ceph_hash',
teuthology_branch='teuthology_branch',
machine_type='machine_type',
distro='distro',
)
output_dict = suite.substitute_placeholders(suite.dict_templ,
input_dict)
assert output_dict['suite'] == 'suite'
assert isinstance(suite.dict_templ['suite'], suite.Placeholder)
assert isinstance(
suite.dict_templ['overrides']['admin_socket']['branch'],
suite.Placeholder)
def test_null_placeholders_dropped(self):
input_dict = dict(
suite='suite',
suite_branch='suite_branch',
ceph_branch='ceph_branch',
ceph_hash='ceph_hash',
teuthology_branch='teuthology_branch',
machine_type='machine_type',
distro=None,
)
output_dict = suite.substitute_placeholders(suite.dict_templ,
input_dict)
assert 'os_type' not in output_dict
@patch('teuthology.suite.get_gitbuilder_url')
@patch('requests.get')
def test_get_hash_success(self, m_get, m_get_gitbuilder_url):
m_get_gitbuilder_url.return_value = "http://baseurl.com"
mock_resp = Mock()
mock_resp.ok = True
mock_resp.text = "the_hash"
m_get.return_value = mock_resp
result = suite.get_hash()
m_get.assert_called_with("http://baseurl.com/ref/master/sha1")
assert result == "the_hash"
@patch('teuthology.suite.get_gitbuilder_url')
@patch('requests.get')
def test_get_hash_fail(self, m_get, m_get_gitbuilder_url):
m_get_gitbuilder_url.return_value = "http://baseurl.com"
mock_resp = Mock()
mock_resp.ok = False
m_get.return_value = mock_resp
result = suite.get_hash()
assert result is None
@patch('teuthology.suite.get_gitbuilder_url')
@patch('requests.get')
def test_package_version_for_hash(self, m_get, m_get_gitbuilder_url):
m_get_gitbuilder_url.return_value = "http://baseurl.com"
mock_resp = Mock()
mock_resp.ok = True
mock_resp.text = "the_version"
m_get.return_value = mock_resp
result = suite.package_version_for_hash("hash")
m_get.assert_called_with("http://baseurl.com/sha1/hash/version")
assert result == "the_version"
@patch('requests.get')
def test_get_branch_info(self, m_get):
mock_resp = Mock()
mock_resp.ok = True
mock_resp.json.return_value = "some json"
m_get.return_value = mock_resp
result = suite.get_branch_info("teuthology", "master")
m_get.assert_called_with(
"https://api.github.com/repos/ceph/teuthology/git/refs/heads/master"
)
assert result == "some json"
@patch('teuthology.suite.lock')
def test_get_arch_fail(self, m_lock):
m_lock.list_locks.return_value = False
suite.get_arch('magna')
m_lock.list_locks.assert_called_with(machine_type="magna", count=1)
@patch('teuthology.suite.lock')
def test_get_arch_success(self, m_lock):
m_lock.list_locks.return_value = [{"arch": "arch"}]
result = suite.get_arch('magna')
m_lock.list_locks.assert_called_with(
machine_type="magna",
count=1
)
assert result == "arch"
def test_combine_path(self):
result = suite.combine_path("/path/to/left", "right/side")
assert result == "/path/to/left/right/side"
def test_combine_path_no_right(self):
result = suite.combine_path("/path/to/left", None)
assert result == "/path/to/left"
class TestFlavor(object):
def test_get_install_task_flavor_bare(self):
config = dict(
tasks=[
dict(
install=dict(),
),
],
)
assert suite.get_install_task_flavor(config) == 'basic'
def test_get_install_task_flavor_simple(self):
config = dict(
tasks=[
dict(
install=dict(
flavor='notcmalloc',
),
),
],
)
assert suite.get_install_task_flavor(config) == 'notcmalloc'
def test_get_install_task_flavor_override_simple(self):
config = dict(
tasks=[
dict(install=dict()),
],
overrides=dict(
install=dict(
flavor='notcmalloc',
),
),
)
assert suite.get_install_task_flavor(config) == 'notcmalloc'
def test_get_install_task_flavor_override_project(self):
config = dict(
tasks=[
dict(install=dict()),
],
overrides=dict(
install=dict(
ceph=dict(
flavor='notcmalloc',
),
),
),
)
assert suite.get_install_task_flavor(config) == 'notcmalloc'
class TestMissingPackages(object):
"""
Tests the functionality that checks to see if a
scheduled job will have missing packages in gitbuilder.
"""
def setup(self):
package_versions = dict(
sha1=dict(
ubuntu=dict(
basic="1.0",
)
)
)
self.pv = package_versions
def test_os_in_package_versions(self):
assert self.pv == suite.get_package_versions(
"sha1",
"ubuntu",
"basic",
package_versions=self.pv
)
@patch("teuthology.suite.package_version_for_hash")
def test_os_not_in_package_versions(self, m_package_versions_for_hash):
m_package_versions_for_hash.return_value = "1.1"
result = suite.get_package_versions(
"sha1",
"rhel",
"basic",
package_versions=self.pv
)
expected = deepcopy(self.pv)
expected['sha1'].update(dict(rhel=dict(basic="1.1")))
assert result == expected
@patch("teuthology.suite.package_version_for_hash")
def test_package_versions_not_found(self, m_package_versions_for_hash):
# if gitbuilder returns a status that's not a 200, None is returned
m_package_versions_for_hash.return_value = None
result = suite.get_package_versions(
"sha1",
"rhel",
"basic",
package_versions=self.pv
)
assert result == self.pv
@patch("teuthology.suite.package_version_for_hash")
def test_no_package_versions_kwarg(self, m_package_versions_for_hash):
m_package_versions_for_hash.return_value = "1.0"
result = suite.get_package_versions(
"sha1",
"ubuntu",
"basic",
)
expected = deepcopy(self.pv)
assert result == expected
def test_distro_has_packages(self):
result = suite.has_packages_for_distro(
"sha1",
"ubuntu",
"basic",
package_versions=self.pv,
)
assert result
def test_distro_does_not_have_packages(self):
result = suite.has_packages_for_distro(
"sha1",
"rhel",
"basic",
package_versions=self.pv,
)
assert not result
@patch("teuthology.suite.get_package_versions")
def test_has_packages_no_package_versions(self, m_get_package_versions):
m_get_package_versions.return_value = self.pv
result = suite.has_packages_for_distro(
"sha1",
"rhel",
"basic",
)
assert not result
class TestDistroDefaults(object):
def test_distro_defaults_saya(self):
assert suite.get_distro_defaults('ubuntu', 'saya') == ('armv7l',
'saucy', 'deb')
def test_distro_defaults_plana(self):
assert suite.get_distro_defaults('ubuntu', 'plana') == ('x86_64',
'trusty',
'deb')
def test_distro_defaults_debian(self):
assert suite.get_distro_defaults('debian', 'magna') == ('x86_64',
'wheezy',
'deb')
def test_distro_defaults_centos(self):
assert suite.get_distro_defaults('centos', 'magna') == ('x86_64',
'centos7',
'rpm')
def test_distro_defaults_fedora(self):
assert suite.get_distro_defaults('fedora', 'magna') == ('x86_64',
'fedora20',
'rpm')
def test_distro_defaults_default(self):
assert suite.get_distro_defaults('rhel', 'magna') == ('x86_64',
'centos7',
'rpm')
def make_fake_fstools(fake_filesystem):
"""
Build a fake listdir() and isfile(), to be used instead of
os.listir() and os.isfile()
An example fake_filesystem value:
>>> fake_fs = {
'a_directory': {
'another_directory': {
'a_file': None,
'another_file': None,
},
'random_file': None,
'yet_another_directory': {
'empty_directory': {},
},
},
}
>>> fake_listdir = make_fake_listdir(fake_fs)
>>> fake_listdir('a_directory/yet_another_directory')
['empty_directory']
>>> fake_isfile('a_directory/yet_another_directory')
False
:param fake_filesystem: A dict representing a filesystem layout
"""
assert isinstance(fake_filesystem, dict)
def fake_listdir(path, fsdict=False):
if fsdict is False:
fsdict = fake_filesystem
remainder = path.strip('/') + '/'
subdict = fsdict
while '/' in remainder:
next_dir, remainder = remainder.split('/', 1)
if next_dir not in subdict:
raise OSError(
'[Errno 2] No such file or directory: %s' % next_dir)
subdict = subdict.get(next_dir)
if not isinstance(subdict, dict):
raise OSError('[Errno 20] Not a directory: %s' % next_dir)
if subdict and not remainder:
return subdict.keys()
return []
def fake_isfile(path, fsdict=False):
if fsdict is False:
fsdict = fake_filesystem
components = path.strip('/').split('/')
subdict = fsdict
for component in components:
if component not in subdict:
raise OSError(
'[Errno 2] No such file or directory: %s' % component)
subdict = subdict.get(component)
if subdict is None:
return True
else:
return False
def fake_isdir(path, fsdict=False):
return not fake_isfile(path)
return fake_listdir, fake_isfile, fake_isdir
class TestBuildMatrix(object):
def fragment_occurences(self, jobs, fragment):
# What fraction of jobs contain fragment?
count = 0
for (description, fragment_list) in jobs:
for item in fragment_list:
if item.endswith(fragment):
count += 1
return count / float(len(jobs))
def test_concatenate_1x2x3(self):
fake_fs = {
'd0_0': {
'+': None,
'd1_0': {
'd1_0_0.yaml': None,
},
'd1_1': {
'd1_1_0.yaml': None,
'd1_1_1.yaml': None,
},
'd1_2': {
'd1_2_0.yaml': None,
'd1_2_1.yaml': None,
'd1_2_2.yaml': None,
},
},
}
fake_listdir, fake_isfile, fake_isdir = make_fake_fstools(fake_fs)
result = suite.build_matrix('d0_0', fake_isfile, fake_isdir,
fake_listdir)
assert len(result) == 1
def test_convolve_2x2(self):
fake_fs = {
'd0_0': {
'%': None,
'd1_0': {
'd1_0_0.yaml': None,
'd1_0_1.yaml': None,
},
'd1_1': {
'd1_1_0.yaml': None,
'd1_1_1.yaml': None,
},
},
}
fake_listdir, fake_isfile, fake_isdir = make_fake_fstools(fake_fs)
result = suite.build_matrix('d0_0', fake_isfile, fake_isdir,
fake_listdir)
assert len(result) == 4
assert self.fragment_occurences(result, 'd1_1_1.yaml') == 0.5
def test_convolve_2x2x2(self):
fake_fs = {
'd0_0': {
'%': None,
'd1_0': {
'd1_0_0.yaml': None,
'd1_0_1.yaml': None,
},
'd1_1': {
'd1_1_0.yaml': None,
'd1_1_1.yaml': None,
},
'd1_2': {
'd1_2_0.yaml': None,
'd1_2_1.yaml': None,
},
},
}
fake_listdir, fake_isfile, fake_isdir = make_fake_fstools(fake_fs)
result = suite.build_matrix('d0_0', fake_isfile, fake_isdir,
fake_listdir)
assert len(result) == 8
assert self.fragment_occurences(result, 'd1_2_0.yaml') == 0.5
def test_convolve_1x2x4(self):
fake_fs = {
'd0_0': {
'%': None,
'd1_0': {
'd1_0_0.yaml': None,
},
'd1_1': {
'd1_1_0.yaml': None,
'd1_1_1.yaml': None,
},
'd1_2': {
'd1_2_0.yaml': None,
'd1_2_1.yaml': None,
'd1_2_2.yaml': None,
'd1_2_3.yaml': None,
},
},
}
fake_listdir, fake_isfile, fake_isdir = make_fake_fstools(fake_fs)
result = suite.build_matrix('d0_0', fake_isfile, fake_isdir,
fake_listdir)
assert len(result) == 8
assert self.fragment_occurences(result, 'd1_2_2.yaml') == 0.25
def test_convolve_with_concat(self):
fake_fs = {
'd0_0': {
'%': None,
'd1_0': {
'd1_0_0.yaml': None,
},
'd1_1': {
'd1_1_0.yaml': None,
'd1_1_1.yaml': None,
},
'd1_2': {
'+': None,
'd1_2_0.yaml': None,
'd1_2_1.yaml': None,
'd1_2_2.yaml': None,
'd1_2_3.yaml': None,
},
},
}
fake_listdir, fake_isfile, fake_isdir = make_fake_fstools(fake_fs)
result = suite.build_matrix('d0_0', fake_isfile, fake_isdir,
fake_listdir)
assert len(result) == 2
for i in result:
assert 'd0_0/d1_2/d1_2_0.yaml' in i[1]
assert 'd0_0/d1_2/d1_2_1.yaml' in i[1]
assert 'd0_0/d1_2/d1_2_2.yaml' in i[1]
assert 'd0_0/d1_2/d1_2_3.yaml' in i[1]
def test_emulate_teuthology_noceph(self):
fake_fs = {
'teuthology': {
'no-ceph': {
'%': None,
'clusters': {
'single.yaml': None,
},
'distros': {
'baremetal.yaml': None,
'rhel7.0.yaml': None,
'ubuntu12.04.yaml': None,
'ubuntu14.04.yaml': None,
'vps.yaml': None,
'vps_centos6.5.yaml': None,
'vps_debian7.yaml': None,
'vps_rhel6.4.yaml': None,
'vps_rhel6.5.yaml': None,
'vps_rhel7.0.yaml': None,
'vps_ubuntu14.04.yaml': None,
},
'tasks': {
'teuthology.yaml': None,
},
},
},
}
fake_listdir, fake_isfile, fake_isdir = make_fake_fstools(fake_fs)
result = suite.build_matrix('teuthology/no-ceph', fake_isfile,
fake_isdir, fake_listdir)
assert len(result) == 11
assert self.fragment_occurences(result, 'vps.yaml') == 1 / 11.0
def test_empty_dirs(self):
fake_fs = {
'teuthology': {
'no-ceph': {
'%': None,
'clusters': {
'single.yaml': None,
},
'distros': {
'baremetal.yaml': None,
'rhel7.0.yaml': None,
'ubuntu12.04.yaml': None,
'ubuntu14.04.yaml': None,
'vps.yaml': None,
'vps_centos6.5.yaml': None,
'vps_debian7.yaml': None,
'vps_rhel6.4.yaml': None,
'vps_rhel6.5.yaml': None,
'vps_rhel7.0.yaml': None,
'vps_ubuntu14.04.yaml': None,
},
'tasks': {
'teuthology.yaml': None,
},
},
},
}
fake_listdir, fake_isfile, fake_isdir = make_fake_fstools(fake_fs)
result = suite.build_matrix('teuthology/no-ceph', fake_isfile,
fake_isdir, fake_listdir)
fake_fs2 = {
'teuthology': {
'no-ceph': {
'%': None,
'clusters': {
'single.yaml': None,
},
'distros': {
'empty': {},
'baremetal.yaml': None,
'rhel7.0.yaml': None,
'ubuntu12.04.yaml': None,
'ubuntu14.04.yaml': None,
'vps.yaml': None,
'vps_centos6.5.yaml': None,
'vps_debian7.yaml': None,
'vps_rhel6.4.yaml': None,
'vps_rhel6.5.yaml': None,
'vps_rhel7.0.yaml': None,
'vps_ubuntu14.04.yaml': None,
},
'tasks': {
'teuthology.yaml': None,
},
'empty': {},
},
},
}
fake_listdir2, fake_isfile2, fake_isdir2 = make_fake_fstools(fake_fs2)
result2 = suite.build_matrix('teuthology/no-ceph', fake_isfile2,
fake_isdir2, fake_listdir2)
assert len(result) == 11
assert len(result2) == len(result)
def test_disable_extension(self):
fake_fs = {
'teuthology': {
'no-ceph': {
'%': None,
'clusters': {
'single.yaml': None,
},
'distros': {
'baremetal.yaml': None,
'rhel7.0.yaml': None,
'ubuntu12.04.yaml': None,
'ubuntu14.04.yaml': None,
'vps.yaml': None,
'vps_centos6.5.yaml': None,
'vps_debian7.yaml': None,
'vps_rhel6.4.yaml': None,
'vps_rhel6.5.yaml': None,
'vps_rhel7.0.yaml': None,
'vps_ubuntu14.04.yaml': None,
},
'tasks': {
'teuthology.yaml': None,
},
},
},
}
fake_listdir, fake_isfile, fake_isdir = make_fake_fstools(fake_fs)
result = suite.build_matrix('teuthology/no-ceph', fake_isfile,
fake_isdir, fake_listdir)
fake_fs2 = {
'teuthology': {
'no-ceph': {
'%': None,
'clusters': {
'single.yaml': None,
},
'distros': {
'baremetal.yaml': None,
'rhel7.0.yaml': None,
'ubuntu12.04.yaml': None,
'ubuntu14.04.yaml': None,
'vps.yaml': None,
'vps_centos6.5.yaml': None,
'vps_debian7.yaml': None,
'vps_rhel6.4.yaml': None,
'vps_rhel6.5.yaml': None,
'vps_rhel7.0.yaml': None,
'vps_ubuntu14.04.yaml': None,
'forcefilevps_ubuntu14.04.yaml.disable': None,
'forcefilevps_ubuntu14.04.yaml.anotherextension': None,
},
'tasks': {
'teuthology.yaml': None,
'forcefilevps_ubuntu14.04notyaml': None,
},
'forcefilevps_ubuntu14.04notyaml': None,
'tasks.disable': {
'teuthology2.yaml': None,
'forcefilevps_ubuntu14.04notyaml': None,
},
},
},
}
fake_listdir2, fake_isfile2, fake_isdir2 = make_fake_fstools(fake_fs2)
result2 = suite.build_matrix('teuthology/no-ceph', fake_isfile2,
fake_isdir2, fake_listdir2)
assert len(result) == 11
assert len(result2) == len(result)
def test_sort_order(self):
# This test ensures that 'ceph' comes before 'ceph-thrash' when yaml
# fragments are sorted.
fake_fs = {
'thrash': {
'%': None,
'ceph-thrash': {'default.yaml': None},
'ceph': {'base.yaml': None},
'clusters': {'mds-1active-1standby.yaml': None},
'debug': {'mds_client.yaml': None},
'fs': {'btrfs.yaml': None},
'msgr-failures': {'none.yaml': None},
'overrides': {'whitelist_wrongly_marked_down.yaml': None},
'tasks': {'cfuse_workunit_suites_fsstress.yaml': None},
},
}
fake_listdir, fake_isfile, fake_isdir = make_fake_fstools(fake_fs)
result = suite.build_matrix('thrash', fake_isfile,
fake_isdir, fake_listdir)
assert len(result) == 1
assert self.fragment_occurences(result, 'base.yaml') == 1
fragments = result[0][1]
assert fragments[0] == 'thrash/ceph/base.yaml'
assert fragments[1] == 'thrash/ceph-thrash/default.yaml'
| {
"content_hash": "059ef08df94ad483bf24950ce8e3871c",
"timestamp": "",
"source": "github",
"line_count": 701,
"max_line_length": 80,
"avg_line_length": 36.07703281027104,
"alnum_prop": 0.4465005931198102,
"repo_name": "ivotron/teuthology",
"id": "1ef535bcd5216628a7d198124598823ae03220f4",
"size": "25290",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "teuthology/test/test_suite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "752112"
},
{
"name": "Shell",
"bytes": "10658"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0005_auto_20150703_2228'),
]
operations = [
migrations.AlterField(
model_name='element',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='element',
name='last_modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='page',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='page',
name='last_modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='procedure',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='procedure',
name='last_modified',
field=models.DateTimeField(auto_now=True),
),
]
| {
"content_hash": "e38ca530d05b8a39c178a0f619af13b2",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 58,
"avg_line_length": 27.976744186046513,
"alnum_prop": 0.5461346633416458,
"repo_name": "SanaMobile/sana.protocol_builder",
"id": "69dc53f62d3784e58bbd05c21248642406ee9a30",
"size": "1227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src-django/api/migrations/0006_auto_20150714_2302.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "49368"
},
{
"name": "HTML",
"bytes": "32640"
},
{
"name": "JavaScript",
"bytes": "254860"
},
{
"name": "Python",
"bytes": "192441"
},
{
"name": "Ruby",
"bytes": "8875"
},
{
"name": "Shell",
"bytes": "1680"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('herders', '0011_runecraftinstance_quantity'),
]
operations = [
migrations.RemoveField(
model_name='monsterinstance',
name='base_accuracy',
),
migrations.RemoveField(
model_name='monsterinstance',
name='base_attack',
),
migrations.RemoveField(
model_name='monsterinstance',
name='base_crit_damage',
),
migrations.RemoveField(
model_name='monsterinstance',
name='base_crit_rate',
),
migrations.RemoveField(
model_name='monsterinstance',
name='base_defense',
),
migrations.RemoveField(
model_name='monsterinstance',
name='base_hp',
),
migrations.RemoveField(
model_name='monsterinstance',
name='base_resistance',
),
migrations.RemoveField(
model_name='monsterinstance',
name='base_speed',
),
migrations.AlterField(
model_name='monsterinstance',
name='stars',
field=models.IntegerField(choices=[(1, '1⭐'), (2, '2⭐'), (3, '3⭐'), (4, '4⭐'), (5, '5⭐'), (6, '6⭐')]),
),
migrations.CreateModel(
name='RuneBuild',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(default='', max_length=200)),
('avg_efficiency', models.FloatField(default=0)),
('hp', models.IntegerField(default=0)),
('hp_pct', models.IntegerField(default=0)),
('attack', models.IntegerField(default=0)),
('attack_pct', models.IntegerField(default=0)),
('defense', models.IntegerField(default=0)),
('defense_pct', models.IntegerField(default=0)),
('speed', models.IntegerField(default=0)),
('crit_rate', models.IntegerField(default=0)),
('crit_damage', models.IntegerField(default=0)),
('resistance', models.IntegerField(default=0)),
('accuracy', models.IntegerField(default=0)),
('monster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='herders.MonsterInstance')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='herders.Summoner')),
('runes', models.ManyToManyField(to='herders.RuneInstance')),
],
),
migrations.AddField(
model_name='monsterinstance',
name='default_build',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='default_build', to='herders.RuneBuild'),
),
migrations.AddField(
model_name='monsterinstance',
name='rta_build',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rta_build', to='herders.RuneBuild'),
),
]
| {
"content_hash": "078e658de1faa993e184923bc2d83012",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 147,
"avg_line_length": 40.02439024390244,
"alnum_prop": 0.5548446069469836,
"repo_name": "PeteAndersen/swarfarm",
"id": "8acbff048b027b11648a63e399f633af5b6bafa8",
"size": "3344",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "herders/migrations/0012_auto_20200328_1741.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31891"
},
{
"name": "HTML",
"bytes": "352588"
},
{
"name": "JavaScript",
"bytes": "79075"
},
{
"name": "Python",
"bytes": "982216"
},
{
"name": "Shell",
"bytes": "3403"
}
],
"symlink_target": ""
} |
"""Auto-generated file, do not edit by hand. FI metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_FI = PhoneMetadata(id='FI', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2,5}', possible_number_pattern='\\d{3,6}', possible_length=(3, 6)),
toll_free=PhoneNumberDesc(national_number_pattern='116111', possible_number_pattern='\\d{6}', example_number='116111', possible_length=(6,)),
premium_rate=PhoneNumberDesc(),
emergency=PhoneNumberDesc(national_number_pattern='112', possible_number_pattern='\\d{3,6}', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='11(?:2|6111)', possible_number_pattern='\\d{3,6}', example_number='112', possible_length=(3, 6)),
standard_rate=PhoneNumberDesc(),
carrier_specific=PhoneNumberDesc(),
short_data=True)
| {
"content_hash": "57cfe3227b5cb16a575b480b81eef1ff",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 153,
"avg_line_length": 77.5,
"alnum_prop": 0.7311827956989247,
"repo_name": "vicky2135/lucious",
"id": "b0a56318d48d160f77c9e4ce011323838da4a2d4",
"size": "930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/lib/python2.7/site-packages/phonenumbers/shortdata/region_FI.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "896683"
},
{
"name": "C++",
"bytes": "52230"
},
{
"name": "CSS",
"bytes": "1169533"
},
{
"name": "HTML",
"bytes": "1104983"
},
{
"name": "JavaScript",
"bytes": "1055140"
},
{
"name": "Makefile",
"bytes": "145238"
},
{
"name": "Python",
"bytes": "55993261"
},
{
"name": "Shell",
"bytes": "40487"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.core import mail
from django.contrib.auth.models import User
from sentry.models import Project, ProjectKey, Group, Event, Team, \
MessageFilterValue, MessageCountByMinute, FilterValue, PendingTeamMember
from sentry.testutils import TestCase
class ProjectTest(TestCase):
fixtures = ['tests/fixtures/views.json']
def setUp(self):
self.project = Project.objects.get(id=1)
def test_migrate(self):
project2 = Project.objects.create(name='Test')
self.project.merge_to(project2)
self.assertFalse(Project.objects.filter(pk=1).exists())
self.assertFalse(Group.objects.filter(project__isnull=True).exists())
self.assertFalse(Event.objects.filter(project__isnull=True).exists())
self.assertFalse(MessageFilterValue.objects.filter(project__isnull=True).exists())
self.assertFalse(MessageCountByMinute.objects.filter(project__isnull=True).exists())
self.assertFalse(FilterValue.objects.filter(project__isnull=True).exists())
self.assertEquals(project2.group_set.count(), 4)
self.assertEquals(project2.event_set.count(), 10)
self.assertEquals(project2.messagefiltervalue_set.count(), 0)
self.assertEquals(project2.messagecountbyminute_set.count(), 0)
self.assertEquals(project2.filtervalue_set.count(), 0)
class ProjectKeyTest(TestCase):
fixtures = ['tests/fixtures/views.json']
def test_get_dsn(self):
key = ProjectKey(project_id=1, public_key='public', secret_key='secret')
with self.Settings(SENTRY_URL_PREFIX='http://example.com'):
self.assertEquals(key.get_dsn(), 'http://public:secret@example.com/1')
def test_get_dsn_with_ssl(self):
key = ProjectKey(project_id=1, public_key='public', secret_key='secret')
with self.Settings(SENTRY_URL_PREFIX='https://example.com'):
self.assertEquals(key.get_dsn(), 'https://public:secret@example.com/1')
def test_get_dsn_with_port(self):
key = ProjectKey(project_id=1, public_key='public', secret_key='secret')
with self.Settings(SENTRY_URL_PREFIX='http://example.com:81'):
self.assertEquals(key.get_dsn(), 'http://public:secret@example.com:81/1')
def test_key_is_created_for_project_with_existing_team(self):
user = User.objects.create(username='admin')
team = Team.objects.create(name='Test', slug='test', owner=user)
project = Project.objects.create(name='Test', slug='test', owner=user, team=team)
self.assertTrue(project.key_set.filter(user=user).exists())
def test_key_is_created_for_project_with_new_team(self):
user = User.objects.create(username='admin')
project = Project.objects.create(name='Test', slug='test', owner=user)
self.assertTrue(project.key_set.filter(user=user).exists())
class PendingTeamMemberTest(TestCase):
fixtures = ['tests/fixtures/views.json']
def test_token_generation(self):
member = PendingTeamMember(id=1, team_id=1, email='foo@example.com')
with self.Settings(SENTRY_KEY='a'):
self.assertEquals(member.token, 'f3f2aa3e57f4b936dfd4f42c38db003e')
def test_token_generation_unicode_key(self):
member = PendingTeamMember(id=1, team_id=1, email='foo@example.com')
with self.Settings(SENTRY_KEY="\xfc]C\x8a\xd2\x93\x04\x00\x81\xeak\x94\x02H\x1d\xcc&P'q\x12\xa2\xc0\xf2v\x7f\xbb*lX"):
self.assertEquals(member.token, 'df41d9dfd4ba25d745321e654e15b5d0')
def test_send_invite_email(self):
team = Team(name='test', slug='test', id=1)
member = PendingTeamMember(id=1, team=team, email='foo@example.com')
with self.Settings(SENTRY_URL_PREFIX='http://example.com'):
member.send_invite_email()
self.assertEquals(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEquals(msg.to, ['foo@example.com'])
| {
"content_hash": "4b6d98a57bd13059d68f8f30fa390d03",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 126,
"avg_line_length": 44.49438202247191,
"alnum_prop": 0.6825757575757576,
"repo_name": "simmetria/sentry",
"id": "c4622bc0c05c3a70c1556af1a9cc682b19b97e08",
"size": "3977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/models/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
createdby: Darren Zhao Xie on 9/23/2018
module: wechat auto sending msg server
next plan: 1) send self msg
2) multiple wechat login
3) news update server once a day
"""
current_version = 0.0.0.2
| {
"content_hash": "093525ed8fa4b4ee84269eb66a07bb6e",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 43,
"avg_line_length": 22.3,
"alnum_prop": 0.6547085201793722,
"repo_name": "mndarren/Code-Lib",
"id": "f11e5bc09509aaff9dda3016cfda06cc5ec52a93",
"size": "223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python_lib/wechat_project/v2/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "23754"
},
{
"name": "C",
"bytes": "6497"
},
{
"name": "C#",
"bytes": "1064"
},
{
"name": "C++",
"bytes": "34707"
},
{
"name": "CSS",
"bytes": "1262"
},
{
"name": "HTML",
"bytes": "1491"
},
{
"name": "Hack",
"bytes": "590"
},
{
"name": "Java",
"bytes": "33553"
},
{
"name": "Jinja",
"bytes": "466"
},
{
"name": "Makefile",
"bytes": "802"
},
{
"name": "PHP",
"bytes": "1119"
},
{
"name": "Perl",
"bytes": "11676"
},
{
"name": "PowerShell",
"bytes": "5628"
},
{
"name": "Python",
"bytes": "121450"
},
{
"name": "Shell",
"bytes": "18238"
},
{
"name": "TSQL",
"bytes": "4024"
},
{
"name": "Zeek",
"bytes": "7592"
}
],
"symlink_target": ""
} |
import warnings
from nose.tools import eq_
from wsgi_intercept.httplib2_intercept import install, uninstall
import wsgi_intercept
from wsgi_intercept import test_wsgi_app
import httplib2
from paste import lint
_saved_debuglevel = None
def prudent_wsgi_app():
return lint.middleware(test_wsgi_app.create_fn())
def setup():
warnings.simplefilter("error")
_saved_debuglevel, wsgi_intercept.debuglevel = wsgi_intercept.debuglevel, 1
install()
wsgi_intercept.add_wsgi_intercept('some_hopefully_nonexistant_domain', 80, prudent_wsgi_app)
def test():
http = httplib2.Http()
resp, content = http.request('http://some_hopefully_nonexistant_domain:80/', 'GET')
assert test_wsgi_app.success()
def test_quoting_issue11():
# see http://code.google.com/p/wsgi-intercept/issues/detail?id=11
http = httplib2.Http()
inspected_env = {}
def make_path_checking_app():
def path_checking_app(environ, start_response):
inspected_env ['QUERY_STRING'] = environ['QUERY_STRING']
inspected_env ['PATH_INFO'] = environ['PATH_INFO']
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return []
return path_checking_app
wsgi_intercept.add_wsgi_intercept('some_hopefully_nonexistant_domain', 80, make_path_checking_app)
resp, content = http.request('http://some_hopefully_nonexistant_domain:80/spaced+words.html?word=something%20spaced', 'GET')
assert ('QUERY_STRING' in inspected_env and 'PATH_INFO' in inspected_env), "path_checking_app() was never called?"
eq_(inspected_env['PATH_INFO'], '/spaced+words.html')
eq_(inspected_env['QUERY_STRING'], 'word=something%20spaced')
def teardown():
warnings.resetwarnings()
wsgi_intercept.debuglevel = _saved_debuglevel
uninstall()
if __name__ == '__main__':
setup()
try:
test()
finally:
teardown()
| {
"content_hash": "25b5919fefc6ac8878ba83bcec1aa04f",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 128,
"avg_line_length": 36.53703703703704,
"alnum_prop": 0.6791687785098834,
"repo_name": "Zavteq/fixofx",
"id": "45735e7621aa9a3f1b42f41540a712c6f075f5b3",
"size": "1999",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "3rdparty/wsgi_intercept/test/test_wsgi_compliance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "206247"
}
],
"symlink_target": ""
} |
import json
import os
import numpy as np
from scipy import stats
from CLASSES import CLASSES_REVERSED
def count_classes(dir='data/dataset', simple_count=False):
"""
Given a directory returns a dict with all the classes found and the quantity if each class. It counts over the
.txt files.
Result:
{
"A": 150, "B": ...
}
:param simple_count: Boolean indicating if it only has to count by the name of the file (less expensive).
"""
# Get only txt files
txt_filenames = list(filter(lambda x: x[-3:] == 'txt', os.listdir(dir)))
counter = {}
if (simple_count):
# We suppose that the name is as AJGH23.txt
for filename in txt_filenames:
for letter in filename.split('.')[0]:
counter[letter] = 1 if not letter in counter else counter[letter] + 1
return counter
else:
for filename in txt_filenames:
with open("{0}/{1}".format(dir, filename), 'r') as f:
for line in f.read().split("\n"):
class_number = line.split(" ")[0]
if class_number != "":
if class_number in counter:
counter[class_number] += 1
else:
counter[class_number] = 1
# Reverse and show the class name and not the number
final_result = {}
for key in counter:
final_result[CLASSES_REVERSED[int(key)]] = counter[key]
return final_result
def max_from_json(data):
"""
Returns the max value (and its class) in a dict of pairs key: value (Integer).
"""
result = (None, -1)
for key in data:
if data[key] > max: result = (key, data[key])
return result
def sort_by_quantity(data):
"""
Returns a list ordered by the quantity of the class. Works with the output of count_classes.
"""
return sorted(data.items(), key=lambda x: x[1])
def print_some_stats(data):
"""
Prints the mean, mode and variances of the data.
"""
print(stats.describe(np.array(list(data.values()))))
data = count_classes()
print_some_stats(data)
print("Sorted:", sort_by_quantity(data))
print("Data: \n", json.dumps(data, indent=2, sort_keys=True))
| {
"content_hash": "778062a6a8df6398c9c7f80d835af5e1",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 114,
"avg_line_length": 30.346666666666668,
"alnum_prop": 0.5812829525483304,
"repo_name": "SetaSouto/license-plate-detection",
"id": "4ad34de6be9754cfffdb3e23b3536f549b490789",
"size": "2276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statistics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28117"
}
],
"symlink_target": ""
} |
"""
Support for the DirecTV recievers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.directv/
"""
import voluptuous as vol
from homeassistant.components.media_player import (
MEDIA_TYPE_TVSHOW, MEDIA_TYPE_VIDEO, SUPPORT_PAUSE, SUPPORT_PLAY_MEDIA,
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_STOP, PLATFORM_SCHEMA,
SUPPORT_NEXT_TRACK, SUPPORT_PREVIOUS_TRACK, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, STATE_OFF, STATE_PLAYING, CONF_PORT)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['directpy==0.1']
DEFAULT_NAME = 'DirecTV Receiver'
DEFAULT_PORT = 8080
SUPPORT_DTV = SUPPORT_PAUSE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_NEXT_TRACK | \
SUPPORT_PREVIOUS_TRACK
KNOWN_HOSTS = []
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the DirecTV platform."""
hosts = []
if discovery_info and discovery_info in KNOWN_HOSTS:
return
if discovery_info is not None:
hosts.append([
'DirecTV_' + discovery_info[1],
discovery_info[0],
DEFAULT_PORT
])
elif CONF_HOST in config:
hosts.append([
config.get(CONF_NAME), config.get(CONF_HOST), config.get(CONF_PORT)
])
dtvs = []
for host in hosts:
dtvs.append(DirecTvDevice(*host))
KNOWN_HOSTS.append(host)
add_devices(dtvs)
return True
class DirecTvDevice(MediaPlayerDevice):
"""Representation of a DirecTV reciever on the network."""
# pylint: disable=abstract-method
# pylint: disable=too-many-public-methods
def __init__(self, name, host, port):
"""Initialize the device."""
from DirectPy import DIRECTV
self.dtv = DIRECTV(host, port)
self._name = name
self._is_standby = True
self._current = None
def update(self):
"""Retrieve latest state."""
self._is_standby = self.dtv.get_standby()
if self._is_standby:
self._current = None
else:
self._current = self.dtv.get_tuned()
@property
def name(self):
"""Return the name of the device."""
return self._name
# MediaPlayerDevice properties and methods
@property
def state(self):
"""Return the state of the device."""
if self._is_standby:
return STATE_OFF
# haven't determined a way to see if the content is paused
else:
return STATE_PLAYING
@property
def media_content_id(self):
"""Content ID of current playing media."""
if self._is_standby:
return None
else:
return self._current['programId']
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._is_standby:
return None
else:
return self._current['duration']
@property
def media_title(self):
"""Title of current playing media."""
if self._is_standby:
return None
else:
return self._current['title']
@property
def media_series_title(self):
"""Title of current episode of TV show."""
if self._is_standby:
return None
else:
if 'episodeTitle' in self._current:
return self._current['episodeTitle']
else:
return None
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
return SUPPORT_DTV
@property
def media_content_type(self):
"""Content type of current playing media."""
if 'episodeTitle' in self._current:
return MEDIA_TYPE_TVSHOW
else:
return MEDIA_TYPE_VIDEO
@property
def media_channel(self):
"""Channel current playing media."""
if self._is_standby:
return None
else:
chan = "{} ({})".format(self._current['callsign'],
self._current['major'])
return chan
def turn_on(self):
"""Turn on the reciever."""
self.dtv.key_press('poweron')
def turn_off(self):
"""Turn off the reciever."""
self.dtv.key_press('poweroff')
def media_play(self):
"""Send play commmand."""
self.dtv.key_press('play')
def media_pause(self):
"""Send pause commmand."""
self.dtv.key_press('pause')
def media_stop(self):
"""Send stop commmand."""
self.dtv.key_press('stop')
def media_previous_track(self):
"""Send rewind commmand."""
self.dtv.key_press('rew')
def media_next_track(self):
"""Send fast forward commmand."""
self.dtv.key_press('ffwd')
| {
"content_hash": "26bd0f705ee641a4277bea984acb3577",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 79,
"avg_line_length": 27.864864864864863,
"alnum_prop": 0.5957322987390883,
"repo_name": "leoc/home-assistant",
"id": "0a53ffbbed60876ecccc1c8f777a0e42a937d145",
"size": "5155",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/media_player/directv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1366220"
},
{
"name": "Python",
"bytes": "3636900"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
} |
import unittest
from rocks.process import cover
class TestReadingStructures(unittest.TestCase):
"""Reading
"""
def test_when_compile_code_return_lines_pass(self):
cov = cover.check_code('/mnt/e/some_code')
data = cov.get_data()
assert data is not None
assert data.line_counts() != {}
| {
"content_hash": "91a82b6cf856dd33245791a4de874f21",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 55,
"avg_line_length": 20.875,
"alnum_prop": 0.6377245508982036,
"repo_name": "jonesambrosi/rocks",
"id": "bd4ee18e95de7f900fe65a7cdda76f62a142f658",
"size": "334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_coverage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19766"
}
],
"symlink_target": ""
} |
import numpy as np
import imageio
from PIL import Image
from skimage import color
def imread(path, mode="RGB"):
# Loads data in HxW format, then transposes to correct format
img = np.array(imageio.imread(path, pilmode=mode))
return img
def imresize(img, size, interp='bilinear'):
"""
Resizes an image
:param img:
:param size: (Must be H, W format !)
:param interp:
:return:
"""
if interp == 'bilinear':
interpolation = Image.BILINEAR
elif interp == 'bicubic':
interpolation = Image.BICUBIC
else:
interpolation = Image.NEAREST
# Requires size to be HxW
size = (size[1], size[0])
if type(img) != Image:
img = Image.fromarray(img, mode='RGB')
img = np.array(img.resize(size, interpolation))
return img
def imsave(path, img):
imageio.imwrite(path, img)
return
def fromimage(img, mode='RGB'):
if mode == 'RGB':
img = color.lab2rgb(img)
else:
img = color.rgb2lab(img)
return img
def toimage(arr, mode='RGB'):
return Image.fromarray(arr, mode)
| {
"content_hash": "536a0022fa845b7b4833b414aa2748fa",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 65,
"avg_line_length": 20.962264150943398,
"alnum_prop": 0.6120612061206121,
"repo_name": "titu1994/Neural-Style-Transfer",
"id": "615b7eed942dd309844c187abc997beb46603ac0",
"size": "1112",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "script_helper/Script/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2186376"
},
{
"name": "Python",
"bytes": "277912"
}
],
"symlink_target": ""
} |
"""
sphinx.transforms
~~~~~~~~~~~~~~~~~
Docutils transforms used by Sphinx when reading documents.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from os import path
from docutils import nodes
from docutils.utils import new_document, relative_path
from docutils.parsers.rst import Parser as RSTParser
from docutils.transforms import Transform
from docutils.transforms.parts import ContentsFilter
from sphinx import addnodes
from sphinx.locale import _, init as init_locale
from sphinx.util import split_index_msg
from sphinx.util.nodes import (
traverse_translatable_index, extract_messages, LITERAL_TYPE_NODES, IMAGE_TYPE_NODES,
)
from sphinx.util.osutil import ustrftime
from sphinx.util.i18n import find_catalog
from sphinx.util.pycompat import indent
from sphinx.domains.std import (
make_term_from_paragraph_node,
make_termnodes_from_paragraph_node,
)
default_substitutions = set([
'version',
'release',
'today',
])
class DefaultSubstitutions(Transform):
"""
Replace some substitutions if they aren't defined in the document.
"""
# run before the default Substitutions
default_priority = 210
def apply(self):
config = self.document.settings.env.config
# only handle those not otherwise defined in the document
to_handle = default_substitutions - set(self.document.substitution_defs)
for ref in self.document.traverse(nodes.substitution_reference):
refname = ref['refname']
if refname in to_handle:
text = config[refname]
if refname == 'today' and not text:
# special handling: can also specify a strftime format
text = ustrftime(config.today_fmt or _('%B %d, %Y'))
ref.replace_self(nodes.Text(text, text))
class MoveModuleTargets(Transform):
"""
Move module targets that are the first thing in a section to the section
title.
XXX Python specific
"""
default_priority = 210
def apply(self):
for node in self.document.traverse(nodes.target):
if not node['ids']:
continue
if ('ismod' in node and
node.parent.__class__ is nodes.section and
# index 0 is the section title node
node.parent.index(node) == 1):
node.parent['ids'][0:0] = node['ids']
node.parent.remove(node)
class HandleCodeBlocks(Transform):
"""
Several code block related transformations.
"""
default_priority = 210
def apply(self):
# move doctest blocks out of blockquotes
for node in self.document.traverse(nodes.block_quote):
if all(isinstance(child, nodes.doctest_block) for child
in node.children):
node.replace_self(node.children)
# combine successive doctest blocks
# for node in self.document.traverse(nodes.doctest_block):
# if node not in node.parent.children:
# continue
# parindex = node.parent.index(node)
# while len(node.parent) > parindex+1 and \
# isinstance(node.parent[parindex+1], nodes.doctest_block):
# node[0] = nodes.Text(node[0] + '\n\n' +
# node.parent[parindex+1][0])
# del node.parent[parindex+1]
class AutoNumbering(Transform):
"""
Register IDs of tables, figures and literal_blocks to assign numbers.
"""
default_priority = 210
def apply(self):
def has_child(node, cls):
return any(isinstance(child, cls) for child in node)
for node in self.document.traverse(nodes.Element):
if isinstance(node, nodes.figure):
if has_child(node, nodes.caption):
self.document.note_implicit_target(node)
elif isinstance(node, nodes.image):
if has_child(node.parent, nodes.caption):
self.document.note_implicit_target(node.parent)
elif isinstance(node, nodes.table):
if has_child(node, nodes.title):
self.document.note_implicit_target(node)
elif isinstance(node, nodes.literal_block):
if has_child(node.parent, nodes.caption):
self.document.note_implicit_target(node.parent)
class SortIds(Transform):
"""
Sort secion IDs so that the "id[0-9]+" one comes last.
"""
default_priority = 261
def apply(self):
for node in self.document.traverse(nodes.section):
if len(node['ids']) > 1 and node['ids'][0].startswith('id'):
node['ids'] = node['ids'][1:] + [node['ids'][0]]
class CitationReferences(Transform):
"""
Replace citation references by pending_xref nodes before the default
docutils transform tries to resolve them.
"""
default_priority = 619
def apply(self):
for citnode in self.document.traverse(nodes.citation_reference):
cittext = citnode.astext()
refnode = addnodes.pending_xref(cittext, reftype='citation',
reftarget=cittext, refwarn=True,
ids=citnode["ids"])
refnode.line = citnode.line or citnode.parent.line
refnode += nodes.Text('[' + cittext + ']')
citnode.parent.replace(citnode, refnode)
TRANSLATABLE_NODES = {
'literal-block': nodes.literal_block,
'doctest-block': nodes.doctest_block,
'raw': nodes.raw,
'index': addnodes.index,
'image': nodes.image,
}
class ExtraTranslatableNodes(Transform):
"""
make nodes translatable
"""
default_priority = 10
def apply(self):
targets = self.document.settings.env.config.gettext_additional_targets
target_nodes = [v for k, v in TRANSLATABLE_NODES.items() if k in targets]
if not target_nodes:
return
def is_translatable_node(node):
return isinstance(node, tuple(target_nodes))
for node in self.document.traverse(is_translatable_node):
node['translatable'] = True
class CustomLocaleReporter(object):
"""
Replacer for document.reporter.get_source_and_line method.
reST text lines for translation do not have the original source line number.
This class provides the correct line numbers when reporting.
"""
def __init__(self, source, line):
self.source, self.line = source, line
def set_reporter(self, document):
document.reporter.get_source_and_line = self.get_source_and_line
def get_source_and_line(self, lineno=None):
return self.source, self.line
class Locale(Transform):
"""
Replace translatable nodes with their translated doctree.
"""
default_priority = 20
def apply(self):
env = self.document.settings.env
settings, source = self.document.settings, self.document['source']
# XXX check if this is reliable
assert source.startswith(env.srcdir)
docname = path.splitext(relative_path(path.join(env.srcdir, 'dummy'),
source))[0]
textdomain = find_catalog(docname,
self.document.settings.gettext_compact)
# fetch translations
dirs = [path.join(env.srcdir, directory)
for directory in env.config.locale_dirs]
catalog, has_catalog = init_locale(dirs, env.config.language,
textdomain)
if not has_catalog:
return
parser = RSTParser()
# phase1: replace reference ids with translated names
for node, msg in extract_messages(self.document):
msgstr = catalog.gettext(msg)
# XXX add marker to untranslated parts
if not msgstr or msgstr == msg or not msgstr.strip():
# as-of-yet untranslated
continue
# Avoid "Literal block expected; none found." warnings.
# If msgstr ends with '::' then it cause warning message at
# parser.parse() processing.
# literal-block-warning is only appear in avobe case.
if msgstr.strip().endswith('::'):
msgstr += '\n\n dummy literal'
# dummy literal node will discard by 'patch = patch[0]'
# literalblock need literal block notation to avoid it become
# paragraph.
if isinstance(node, LITERAL_TYPE_NODES):
msgstr = '::\n\n' + indent(msgstr, ' '*3)
patch = new_document(source, settings)
CustomLocaleReporter(node.source, node.line).set_reporter(patch)
parser.parse(msgstr, patch)
try:
patch = patch[0]
except IndexError: # empty node
pass
# XXX doctest and other block markup
if not isinstance(patch, nodes.paragraph):
continue # skip for now
processed = False # skip flag
# update title(section) target name-id mapping
if isinstance(node, nodes.title):
section_node = node.parent
new_name = nodes.fully_normalize_name(patch.astext())
old_name = nodes.fully_normalize_name(node.astext())
if old_name != new_name:
# if name would be changed, replace node names and
# document nameids mapping with new name.
names = section_node.setdefault('names', [])
names.append(new_name)
if old_name in names:
names.remove(old_name)
_id = self.document.nameids.get(old_name, None)
explicit = self.document.nametypes.get(old_name, None)
# * if explicit: _id is label. title node need another id.
# * if not explicit:
#
# * if _id is None:
#
# _id is None means:
#
# 1. _id was not provided yet.
#
# 2. _id was duplicated.
#
# old_name entry still exists in nameids and
# nametypes for another duplicated entry.
#
# * if _id is provided: bellow process
if _id:
if not explicit:
# _id was not duplicated.
# remove old_name entry from document ids database
# to reuse original _id.
self.document.nameids.pop(old_name, None)
self.document.nametypes.pop(old_name, None)
self.document.ids.pop(_id, None)
# re-entry with new named section node.
#
# Note: msgnode that is a second parameter of the
# `note_implicit_target` is not necessary here because
# section_node has been noted previously on rst parsing by
# `docutils.parsers.rst.states.RSTState.new_subsection()`
# and already has `system_message` if needed.
self.document.note_implicit_target(section_node)
# replace target's refname to new target name
def is_named_target(node):
return isinstance(node, nodes.target) and \
node.get('refname') == old_name
for old_target in self.document.traverse(is_named_target):
old_target['refname'] = new_name
processed = True
# glossary terms update refid
if isinstance(node, nodes.term):
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
ids = []
termnodes = []
for _id in node['names']:
if _id in gloss_entries:
gloss_entries.remove(_id)
_id, _, new_termnodes = \
make_termnodes_from_paragraph_node(env, patch, _id)
ids.append(_id)
termnodes.extend(new_termnodes)
if termnodes and ids:
patch = make_term_from_paragraph_node(termnodes, ids)
node['ids'] = patch['ids']
node['names'] = patch['names']
processed = True
# update leaves with processed nodes
if processed:
for child in patch.children:
child.parent = node
node.children = patch.children
node['translated'] = True
# phase2: translation
for node, msg in extract_messages(self.document):
if node.get('translated', False):
continue
msgstr = catalog.gettext(msg)
# XXX add marker to untranslated parts
if not msgstr or msgstr == msg: # as-of-yet untranslated
continue
# Avoid "Literal block expected; none found." warnings.
# If msgstr ends with '::' then it cause warning message at
# parser.parse() processing.
# literal-block-warning is only appear in avobe case.
if msgstr.strip().endswith('::'):
msgstr += '\n\n dummy literal'
# dummy literal node will discard by 'patch = patch[0]'
# literalblock need literal block notation to avoid it become
# paragraph.
if isinstance(node, LITERAL_TYPE_NODES):
msgstr = '::\n\n' + indent(msgstr, ' '*3)
patch = new_document(source, settings)
CustomLocaleReporter(node.source, node.line).set_reporter(patch)
parser.parse(msgstr, patch)
try:
patch = patch[0]
except IndexError: # empty node
pass
# XXX doctest and other block markup
if not isinstance(
patch,
(nodes.paragraph,) + LITERAL_TYPE_NODES + IMAGE_TYPE_NODES):
continue # skip for now
# auto-numbered foot note reference should use original 'ids'.
def is_autonumber_footnote_ref(node):
return isinstance(node, nodes.footnote_reference) and \
node.get('auto') == 1
def list_replace_or_append(lst, old, new):
if old in lst:
lst[lst.index(old)] = new
else:
lst.append(new)
old_foot_refs = node.traverse(is_autonumber_footnote_ref)
new_foot_refs = patch.traverse(is_autonumber_footnote_ref)
if len(old_foot_refs) != len(new_foot_refs):
env.warn_node('inconsistent footnote references in '
'translated message', node)
old_foot_namerefs = {}
for r in old_foot_refs:
old_foot_namerefs.setdefault(r.get('refname'), []).append(r)
for new in new_foot_refs:
refname = new.get('refname')
refs = old_foot_namerefs.get(refname, [])
if not refs:
continue
old = refs.pop(0)
new['ids'] = old['ids']
for id in new['ids']:
self.document.ids[id] = new
list_replace_or_append(
self.document.autofootnote_refs, old, new)
if refname:
list_replace_or_append(
self.document.footnote_refs.setdefault(refname, []),
old, new)
list_replace_or_append(
self.document.refnames.setdefault(refname, []),
old, new)
# reference should use new (translated) 'refname'.
# * reference target ".. _Python: ..." is not translatable.
# * use translated refname for section refname.
# * inline reference "`Python <...>`_" has no 'refname'.
def is_refnamed_ref(node):
return isinstance(node, nodes.reference) and \
'refname' in node
old_refs = node.traverse(is_refnamed_ref)
new_refs = patch.traverse(is_refnamed_ref)
if len(old_refs) != len(new_refs):
env.warn_node('inconsistent references in '
'translated message', node)
old_ref_names = [r['refname'] for r in old_refs]
new_ref_names = [r['refname'] for r in new_refs]
orphans = list(set(old_ref_names) - set(new_ref_names))
for new in new_refs:
if not self.document.has_name(new['refname']):
# Maybe refname is translated but target is not translated.
# Note: multiple translated refnames break link ordering.
if orphans:
new['refname'] = orphans.pop(0)
else:
# orphan refnames is already empty!
# reference number is same in new_refs and old_refs.
pass
self.document.note_refname(new)
# refnamed footnote and citation should use original 'ids'.
def is_refnamed_footnote_ref(node):
footnote_ref_classes = (nodes.footnote_reference,
nodes.citation_reference)
return isinstance(node, footnote_ref_classes) and \
'refname' in node
old_refs = node.traverse(is_refnamed_footnote_ref)
new_refs = patch.traverse(is_refnamed_footnote_ref)
refname_ids_map = {}
if len(old_refs) != len(new_refs):
env.warn_node('inconsistent references in '
'translated message', node)
for old in old_refs:
refname_ids_map[old["refname"]] = old["ids"]
for new in new_refs:
refname = new["refname"]
if refname in refname_ids_map:
new["ids"] = refname_ids_map[refname]
# Original pending_xref['reftarget'] contain not-translated
# target name, new pending_xref must use original one.
# This code restricts to change ref-targets in the translation.
old_refs = node.traverse(addnodes.pending_xref)
new_refs = patch.traverse(addnodes.pending_xref)
xref_reftarget_map = {}
if len(old_refs) != len(new_refs):
env.warn_node('inconsistent term references in '
'translated message', node)
def get_ref_key(node):
case = node["refdomain"], node["reftype"]
if case == ('std', 'term'):
return None
else:
return (
node["refdomain"],
node["reftype"],
node['reftarget'],)
for old in old_refs:
key = get_ref_key(old)
if key:
xref_reftarget_map[key] = old.attributes
for new in new_refs:
key = get_ref_key(new)
# Copy attributes to keep original node behavior. Especially
# copying 'reftarget', 'py:module', 'py:class' are needed.
for k, v in xref_reftarget_map.get(key, {}).items():
# Note: This implementation overwrite all attributes.
# if some attributes `k` should not be overwritten,
# you should provide exclude list as:
# `if k not in EXCLUDE_LIST: new[k] = v`
new[k] = v
# update leaves
for child in patch.children:
child.parent = node
node.children = patch.children
# for highlighting that expects .rawsource and .astext() are same.
if isinstance(node, LITERAL_TYPE_NODES):
node.rawsource = node.astext()
if isinstance(node, IMAGE_TYPE_NODES):
node.update_all_atts(patch)
node['translated'] = True
if 'index' in env.config.gettext_additional_targets:
# Extract and translate messages for index entries.
for node, entries in traverse_translatable_index(self.document):
new_entries = []
for type, msg, tid, main in entries:
msg_parts = split_index_msg(type, msg)
msgstr_parts = []
for part in msg_parts:
msgstr = catalog.gettext(part)
if not msgstr:
msgstr = part
msgstr_parts.append(msgstr)
new_entries.append((type, ';'.join(msgstr_parts), tid, main))
node['raw_entries'] = entries
node['entries'] = new_entries
class RemoveTranslatableInline(Transform):
"""
Remove inline nodes used for translation as placeholders.
"""
default_priority = 999
def apply(self):
from sphinx.builders.gettext import MessageCatalogBuilder
env = self.document.settings.env
builder = env.app.builder
if isinstance(builder, MessageCatalogBuilder):
return
for inline in self.document.traverse(nodes.inline):
if 'translatable' in inline:
inline.parent.remove(inline)
inline.parent += inline.children
class SphinxContentsFilter(ContentsFilter):
"""
Used with BuildEnvironment.add_toc_from() to discard cross-file links
within table-of-contents link nodes.
"""
def visit_pending_xref(self, node):
text = node.astext()
self.parent.append(nodes.literal(text, text))
raise nodes.SkipNode
def visit_image(self, node):
raise nodes.SkipNode
| {
"content_hash": "bda98fc6c7ecab86939208017a94c245",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 88,
"avg_line_length": 39.72982456140351,
"alnum_prop": 0.5387706438223085,
"repo_name": "WhySoGeeky/DroidPot",
"id": "681b50e0571f4179d7d123d3ad1783ce1875cb5d",
"size": "22670",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/sphinx/transforms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "874"
},
{
"name": "C",
"bytes": "31005"
},
{
"name": "CSS",
"bytes": "791857"
},
{
"name": "HTML",
"bytes": "1896759"
},
{
"name": "JavaScript",
"bytes": "2509094"
},
{
"name": "Makefile",
"bytes": "2057"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "13513559"
},
{
"name": "Shell",
"bytes": "3886"
},
{
"name": "TeX",
"bytes": "57070"
}
],
"symlink_target": ""
} |
import os
import pytest
from pytzdata import set_directory, tz_path, TimezoneNotFound
fixtures_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'tz')
def setup_module(module):
if 'PYTZDATA_TZDATADIR' in os.environ:
del os.environ['PYTZDATA_TZDATADIR']
set_directory()
def teardown_module(module):
if 'PYTZDATA_TZDATADIR' in os.environ:
del os.environ['PYTZDATA_TZDATADIR']
set_directory()
def test_set_directory():
set_directory(fixtures_path)
assert tz_path('Europe/Paris') == os.path.join(fixtures_path, 'Europe/Paris')
with pytest.raises(TimezoneNotFound):
tz_path('America/New_York')
here = os.path.realpath(os.path.dirname(__file__))
filepath = os.path.realpath(
os.path.join(here, '..', 'pytzdata', 'zoneinfo', 'America', 'New_York')
)
set_directory()
assert tz_path('America/New_York') == filepath
def test_env_variable():
os.environ['PYTZDATA_TZDATADIR'] = fixtures_path
set_directory()
assert tz_path('Europe/Paris') == os.path.join(fixtures_path, 'Europe/Paris')
with pytest.raises(TimezoneNotFound):
tz_path('America/New_York')
del os.environ['PYTZDATA_TZDATADIR']
here = os.path.realpath(os.path.dirname(__file__))
filepath = os.path.realpath(
os.path.join(here, '..', 'pytzdata', 'zoneinfo', 'America', 'New_York')
)
set_directory()
assert tz_path('America/New_York') == filepath
| {
"content_hash": "f486f0d660ace8cad1a97d105352a8aa",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 81,
"avg_line_length": 24.779661016949152,
"alnum_prop": 0.6538987688098495,
"repo_name": "sdispater/pytzdata",
"id": "430d2392cd27757c886a9649b754d3fdc98d02b1",
"size": "1487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_set_directory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "735"
},
{
"name": "Python",
"bytes": "26781"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from collections import namedtuple
from inspect import isclass
from django.utils.translation import ugettext_lazy as _
__all__ = ('LOG', 'LOG_BY_ID', 'LOG_KEEP',)
class _LOG(object):
action_class = None
class CREATE_ADDON(_LOG):
id = 1
action_class = 'add'
format = _(u'{addon} was created.')
keep = True
class EDIT_PROPERTIES(_LOG):
""" Expects: addon """
id = 2
action_class = 'edit'
format = _(u'{addon} properties edited.')
class EDIT_DESCRIPTIONS(_LOG):
id = 3
action_class = 'edit'
format = _(u'{addon} description edited.')
class EDIT_CATEGORIES(_LOG):
id = 4
action_class = 'edit'
format = _(u'Categories edited for {addon}.')
class ADD_USER_WITH_ROLE(_LOG):
id = 5
action_class = 'add'
format = _(u'{0.name} ({1}) added to {addon}.')
keep = True
class REMOVE_USER_WITH_ROLE(_LOG):
id = 6
action_class = 'delete'
# L10n: {0} is the user being removed, {1} is their role.
format = _(u'{0.name} ({1}) removed from {addon}.')
keep = True
class EDIT_CONTRIBUTIONS(_LOG):
id = 7
action_class = 'edit'
format = _(u'Contributions for {addon}.')
class USER_DISABLE(_LOG):
id = 8
format = _(u'{addon} disabled.')
keep = True
class USER_ENABLE(_LOG):
id = 9
format = _(u'{addon} enabled.')
keep = True
class CHANGE_STATUS(_LOG):
id = 12
# L10n: {status} is the status
format = _(u'{addon} status changed to {status}.')
keep = True
class ADD_VERSION(_LOG):
id = 16
action_class = 'add'
format = _(u'{version} added to {addon}.')
keep = True
class EDIT_VERSION(_LOG):
id = 17
action_class = 'edit'
format = _(u'{version} edited for {addon}.')
class DELETE_VERSION(_LOG):
id = 18
action_class = 'delete'
# Note, {0} is a string not a version since the version is deleted.
# L10n: {0} is the version number
format = _(u'Version {0} deleted from {addon}.')
keep = True
class ADD_FILE_TO_VERSION(_LOG):
id = 19
action_class = 'add'
format = _(u'File {0.name} added to {version} of {addon}.')
class DELETE_FILE_FROM_VERSION(_LOG):
"""
Expecting: addon, filename, version
Because the file is being deleted, filename and version
should be strings and not the object.
"""
id = 20
action_class = 'delete'
format = _(u'File {0} deleted from {version} of {addon}.')
class APPROVE_VERSION(_LOG):
id = 21
action_class = 'approve'
format = _(u'{addon} {version} approved.')
short = _(u'Approved')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
class PRELIMINARY_VERSION(_LOG):
id = 42
action_class = 'approve'
format = _(u'{addon} {version} given preliminary review.')
short = _(u'Preliminarily approved')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
class REJECT_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 43
action_class = 'reject'
format = _(u'{addon} {version} rejected.')
short = _(u'Rejected')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
class RETAIN_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 22
format = _(u'{addon} {version} retained.')
short = _(u'Retained')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
class ESCALATE_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 23
format = _(u'{addon} {version} escalated.')
short = _(u'Super review requested')
keep = True
review_email_user = True
review_queue = True
hide_developer = True
class REQUEST_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 24
format = _(u'{addon} {version} review requested.')
short = _(u'Review requested')
keep = True
review_email_user = True
review_queue = True
# Obsolete now that we have pending rejections, kept for compatibility.
class REQUEST_INFORMATION(_LOG):
id = 44
format = _(u'{addon} {version} more information requested.')
short = _(u'More information requested')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
# Obsolete now that we've split the requests for admin review into separate
# actions for code/theme/content, but kept for compatibility with old history,
# and also to re-use the `sanitize` property.
class REQUEST_SUPER_REVIEW(_LOG):
id = 45
format = _(u'{addon} {version} super review requested.')
short = _(u'Super review requested')
keep = True
review_queue = True
sanitize = _("The addon has been flagged for Admin Review. It's still "
"in our review queue, but it will need to be checked by one "
"of our admin reviewers. The review might take longer than "
"usual.")
reviewer_review_action = True
class COMMENT_VERSION(_LOG):
id = 49
format = _(u'Comment on {addon} {version}.')
short = _(u'Commented')
keep = True
review_queue = True
hide_developer = True
reviewer_review_action = True
class ADD_TAG(_LOG):
id = 25
action_class = 'tag'
format = _(u'{tag} added to {addon}.')
class REMOVE_TAG(_LOG):
id = 26
action_class = 'tag'
format = _(u'{tag} removed from {addon}.')
class ADD_TO_COLLECTION(_LOG):
id = 27
action_class = 'collection'
format = _(u'{addon} added to {collection}.')
class REMOVE_FROM_COLLECTION(_LOG):
id = 28
action_class = 'collection'
format = _(u'{addon} removed from {collection}.')
class ADD_RATING(_LOG):
id = 29
action_class = 'review'
format = _(u'{rating} for {addon} written.')
# TODO(davedash): Add these when we do the admin site
class ADD_RECOMMENDED_CATEGORY(_LOG):
id = 31
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} featured in {0}.')
class REMOVE_RECOMMENDED_CATEGORY(_LOG):
id = 32
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} no longer featured in {0}.')
class ADD_RECOMMENDED(_LOG):
id = 33
format = _(u'{addon} is now featured.')
keep = True
class REMOVE_RECOMMENDED(_LOG):
id = 34
format = _(u'{addon} is no longer featured.')
keep = True
class ADD_APPVERSION(_LOG):
id = 35
action_class = 'add'
# L10n: {0} is the application, {1} is the version of the app
format = _(u'{0} {1} added.')
class CHANGE_USER_WITH_ROLE(_LOG):
""" Expects: author.user, role, addon """
id = 36
# L10n: {0} is a user, {1} is their role
format = _(u'{0.name} role changed to {1} for {addon}.')
keep = True
class CHANGE_LICENSE(_LOG):
""" Expects: license, addon """
id = 37
action_class = 'edit'
format = _(u'{addon} is now licensed under {0}.')
class CHANGE_POLICY(_LOG):
id = 38
action_class = 'edit'
format = _(u'{addon} policy changed.')
class CHANGE_ICON(_LOG):
id = 39
action_class = 'edit'
format = _(u'{addon} icon changed.')
class APPROVE_RATING(_LOG):
id = 40
action_class = 'approve'
format = _(u'{rating} for {addon} approved.')
reviewer_format = _(u'{user} approved {rating} for {addon}.')
keep = True
reviewer_event = True
class DELETE_RATING(_LOG):
"""Requires rating.id and add-on objects."""
id = 41
action_class = 'review'
format = _(u'Review {rating} for {addon} deleted.')
reviewer_format = _(u'{user} deleted {rating} for {addon}.')
keep = True
reviewer_event = True
class MAX_APPVERSION_UPDATED(_LOG):
id = 46
format = _(u'Application max version for {version} updated.')
class BULK_VALIDATION_EMAILED(_LOG):
id = 47
format = _(u'Authors emailed about compatibility of {version}.')
class BULK_VALIDATION_USER_EMAILED(_LOG):
id = 130
format = _(u'Email sent to Author about add-on compatibility.')
class CHANGE_PASSWORD(_LOG):
id = 48
format = _(u'Password changed.')
class APPROVE_VERSION_WAITING(_LOG):
id = 53
action_class = 'approve'
format = _(u'{addon} {version} approved but waiting to be made public.')
short = _(u'Approved but waiting')
keep = True
review_email_user = True
review_queue = True
class USER_EDITED(_LOG):
id = 60
format = _(u'Account updated.')
class CUSTOM_TEXT(_LOG):
id = 98
format = '{0}'
class CUSTOM_HTML(_LOG):
id = 99
format = '{0}'
class OBJECT_ADDED(_LOG):
id = 100
format = _(u'Created: {0}.')
admin_event = True
class OBJECT_EDITED(_LOG):
id = 101
format = _(u'Edited field: {2} set to: {0}.')
admin_event = True
class OBJECT_DELETED(_LOG):
id = 102
format = _(u'Deleted: {1}.')
admin_event = True
class ADMIN_USER_EDITED(_LOG):
id = 103
format = _(u'User {user} edited, reason: {1}')
admin_event = True
class ADMIN_USER_ANONYMIZED(_LOG):
id = 104
format = _(u'User {user} anonymized.')
keep = True
admin_event = True
class ADMIN_USER_RESTRICTED(_LOG):
id = 105
format = _(u'User {user} restricted.')
keep = True
admin_event = True
class ADMIN_VIEWED_LOG(_LOG):
id = 106
format = _(u'Admin {0} viewed activity log for {user}.')
admin_event = True
class EDIT_RATING(_LOG):
id = 107
action_class = 'review'
format = _(u'{rating} for {addon} updated.')
class THEME_REVIEW(_LOG):
id = 108
action_class = 'review'
format = _(u'{addon} reviewed.')
keep = True
class ADMIN_USER_BANNED(_LOG):
id = 109
format = _(u'User {user} banned.')
keep = True
admin_event = True
class ADMIN_USER_PICTURE_DELETED(_LOG):
id = 110
format = _(u'User {user} picture deleted.')
admin_event = True
class GROUP_USER_ADDED(_LOG):
id = 120
action_class = 'access'
format = _(u'User {0.name} added to {group}.')
keep = True
admin_event = True
class GROUP_USER_REMOVED(_LOG):
id = 121
action_class = 'access'
format = _(u'User {0.name} removed from {group}.')
keep = True
admin_event = True
class ADDON_UNLISTED(_LOG):
id = 128
format = _(u'{addon} unlisted.')
keep = True
class BETA_SIGNED(_LOG):
id = 131
format = _(u'{file} was signed.')
keep = True
# Obsolete, we don't care about validation results on beta files.
class BETA_SIGNED_VALIDATION_FAILED(_LOG):
id = 132
format = _(u'{file} was signed.')
keep = True
class DELETE_ADDON(_LOG):
id = 133
action_class = 'delete'
# L10n: {0} is the add-on GUID.
format = _(u'Addon id {0} with GUID {1} has been deleted')
keep = True
class EXPERIMENT_SIGNED(_LOG):
id = 134
format = _(u'{file} was signed.')
keep = True
class UNLISTED_SIGNED(_LOG):
id = 135
format = _(u'{file} was signed.')
keep = True
# Obsolete, we don't care about validation results on unlisted files anymore.
class UNLISTED_SIGNED_VALIDATION_FAILED(_LOG):
id = 136
format = _(u'{file} was signed.')
keep = True
# Obsolete, we don't care about validation results on unlisted files anymore,
# and the distinction for sideloading add-ons is gone as well.
class UNLISTED_SIDELOAD_SIGNED_VALIDATION_PASSED(_LOG):
id = 137
format = _(u'{file} was signed.')
keep = True
# Obsolete, we don't care about validation results on unlisted files anymore,
# and the distinction for sideloading add-ons is gone as well.
class UNLISTED_SIDELOAD_SIGNED_VALIDATION_FAILED(_LOG):
id = 138
format = _(u'{file} was signed.')
keep = True
class PRELIMINARY_ADDON_MIGRATED(_LOG):
id = 139
format = _(u'{addon} migrated from preliminary.')
keep = True
review_queue = True
class DEVELOPER_REPLY_VERSION(_LOG):
id = 140
format = _(u'Reply by developer on {addon} {version}.')
short = _(u'Developer Reply')
keep = True
review_queue = True
class REVIEWER_REPLY_VERSION(_LOG):
id = 141
format = _(u'Reply by reviewer on {addon} {version}.')
short = _(u'Reviewer Reply')
keep = True
review_queue = True
class APPROVAL_NOTES_CHANGED(_LOG):
id = 142
format = _(u'Approval notes changed for {addon} {version}.')
short = _(u'Approval notes changed')
keep = True
review_queue = True
class SOURCE_CODE_UPLOADED(_LOG):
id = 143
format = _(u'Source code uploaded for {addon} {version}.')
short = _(u'Source code uploaded')
keep = True
review_queue = True
class CONFIRM_AUTO_APPROVED(_LOG):
id = 144
format = _(u'Auto-Approval confirmed for {addon} {version}.')
short = _(u'Auto-Approval confirmed')
keep = True
reviewer_review_action = True
review_queue = True
hide_developer = True
class ENABLE_VERSION(_LOG):
id = 145
format = _(u'{addon} {version} re-enabled.')
class DISABLE_VERSION(_LOG):
id = 146
format = _(u'{addon} {version} disabled.')
class APPROVE_CONTENT(_LOG):
id = 147
format = _(u'{addon} {version} content approved.')
short = _(u'Content approved')
keep = True
reviewer_review_action = True
review_queue = True
hide_developer = True
class REJECT_CONTENT(_LOG):
id = 148
action_class = 'reject'
format = _(u'{addon} {version} content rejected.')
short = _(u'Content rejected')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
class ADMIN_ALTER_INFO_REQUEST(_LOG):
id = 149
format = _(u'{addon} information request altered or removed by admin.')
short = _(u'Information request altered')
keep = True
reviewer_review_action = True
review_queue = True
class DEVELOPER_CLEAR_INFO_REQUEST(_LOG):
id = 150
format = _(u'Information request cleared by developer on '
u'{addon} {version}.')
short = _(u'Information request removed')
keep = True
review_queue = True
class REQUEST_ADMIN_REVIEW_CODE(_LOG):
id = 151
format = _(u'{addon} {version} admin add-on-review requested.')
short = _(u'Admin add-on-review requested')
keep = True
review_queue = True
reviewer_review_action = True
sanitize = REQUEST_SUPER_REVIEW.sanitize
class REQUEST_ADMIN_REVIEW_CONTENT(_LOG):
id = 152
format = _(u'{addon} {version} admin content-review requested.')
short = _(u'Admin content-review requested')
keep = True
review_queue = True
reviewer_review_action = True
sanitize = REQUEST_SUPER_REVIEW.sanitize
class REQUEST_ADMIN_REVIEW_THEME(_LOG):
id = 153
format = _(u'{addon} {version} admin theme-review requested.')
short = _(u'Admin theme-review requested')
keep = True
review_queue = True
reviewer_review_action = True
sanitize = REQUEST_SUPER_REVIEW.sanitize
class CREATE_STATICTHEME_FROM_PERSONA(_LOG):
id = 154
action_class = 'add'
format = _(u'{addon} was migrated from a lightweight theme.')
keep = True
class ADMIN_API_KEY_RESET(_LOG):
id = 155
format = _(u'User {user} api key reset.')
admin_event = True
class BLOCKLIST_BLOCK_ADDED(_LOG):
id = 156
keep = True
action_class = 'add'
hide_developer = True
format = _('Block for {0} added to Blocklist.')
short = _('Block added')
class BLOCKLIST_BLOCK_EDITED(_LOG):
id = 157
keep = True
action_class = 'edit'
hide_developer = True
format = _('Block for {0} edited in Blocklist.')
short = _('Block edited')
class BLOCKLIST_BLOCK_DELETED(_LOG):
id = 158
keep = True
action_class = 'delete'
hide_developer = True
format = _('Block for {0} deleted from Blocklist.')
short = _('Block deleted')
class DENIED_GUID_ADDED(_LOG):
id = 159
keep = True
action_class = 'add'
hide_developer = True
format = _('GUID for {addon} added to DeniedGuid.')
class DENIED_GUID_DELETED(_LOG):
id = 160
keep = True
action_class = 'delete'
hide_developer = True
format = _('GUID for {addon} removed from DeniedGuid.')
class BLOCKLIST_SIGNOFF(_LOG):
id = 161
keep = True
hide_developer = True
format = _('Block {1} action for {0} signed off.')
short = _('Block action signoff')
class ADMIN_USER_SESSION_RESET(_LOG):
id = 162
format = _('User {user} session(s) reset.')
admin_event = True
class THROTTLED(_LOG):
id = 163
format = _('User {user} throttled for scope "{0}"')
admin_event = True
class REJECT_CONTENT_DELAYED(_LOG):
id = 164
action_class = 'reject'
format = _('{addon} {version} content reject scheduled.')
short = _('Content reject scheduled')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
class REJECT_VERSION_DELAYED(_LOG):
# takes add-on, version, reviewtype
id = 165
action_class = 'reject'
format = _('{addon} {version} reject scheduled.')
short = _('Rejection scheduled')
keep = True
review_email_user = True
review_queue = True
reviewer_review_action = True
LOGS = [x for x in vars().values()
if isclass(x) and issubclass(x, _LOG) and x != _LOG]
# Make sure there's no duplicate IDs.
assert len(LOGS) == len(set(log.id for log in LOGS))
LOG_BY_ID = dict((log.id, log) for log in LOGS)
LOG = namedtuple('LogTuple', [log.__name__ for log in LOGS])(
*[log for log in LOGS])
LOG_ADMINS = [
log.id for log in LOGS if hasattr(log, 'admin_event')]
LOG_KEEP = [
log.id for log in LOGS if hasattr(log, 'keep')]
LOG_RATING_MODERATION = [
log.id for log in LOGS if hasattr(log, 'reviewer_event')]
LOG_REVIEW_QUEUE = [
log.id for log in LOGS if hasattr(log, 'review_queue')]
LOG_REVIEWER_REVIEW_ACTION = [
log.id for log in LOGS if hasattr(log, 'reviewer_review_action')]
# Is the user emailed the message?
LOG_REVIEW_EMAIL_USER = [
log.id for log in LOGS if hasattr(log, 'review_email_user')]
# Logs *not* to show to the developer.
LOG_HIDE_DEVELOPER = [
log.id for log in LOGS
if (getattr(log, 'hide_developer', False) or log.id in LOG_ADMINS)]
# Review Queue logs to show to developer (i.e. hiding admin/private)
LOG_REVIEW_QUEUE_DEVELOPER = list(
set(LOG_REVIEW_QUEUE) - set(LOG_HIDE_DEVELOPER))
| {
"content_hash": "636dfcc670910a7c429ce960b3922a9b",
"timestamp": "",
"source": "github",
"line_count": 782,
"max_line_length": 78,
"avg_line_length": 23.558823529411764,
"alnum_prop": 0.6192802475166911,
"repo_name": "eviljeff/olympia",
"id": "6e10122a60a00e8e818e903ab05f00fd10f55513",
"size": "18531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/constants/activity.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "251925"
},
{
"name": "Dockerfile",
"bytes": "4063"
},
{
"name": "HTML",
"bytes": "314372"
},
{
"name": "JavaScript",
"bytes": "865804"
},
{
"name": "Less",
"bytes": "307222"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6146705"
},
{
"name": "Shell",
"bytes": "8000"
},
{
"name": "Smarty",
"bytes": "1413"
}
],
"symlink_target": ""
} |
from gppylib.operations.backup_utils import escapeDoubleQuoteInSQLString
class RepairMissingExtraneous:
def __init__(self, catalog_table_obj, issues, pk_name):
self.catalog_table_obj = catalog_table_obj
catalog_name = self.catalog_table_obj.getTableName()
self._escaped_catalog_name = escapeDoubleQuoteInSQLString(catalog_name)
self._issues = issues
self._pk_name = pk_name
def _generate_delete_sql_for_oid(self, pk_name, oids):
escaped_pk_name = escapeDoubleQuoteInSQLString(pk_name)
delete_sql = 'BEGIN;set allow_system_table_mods="dml";delete from {0} where {1} in ({2});COMMIT;'
return delete_sql.format(self._escaped_catalog_name, escaped_pk_name, ','.join(str(oid) for oid in oids))
def _generate_delete_sql_for_pkeys(self, pk_names):
delete_sql = 'BEGIN;set allow_system_table_mods="dml";'
for issue in self._issues:
delete_issue_sql = 'delete from {0} where '
for pk, issue_col in zip(pk_names, issue):
operator = " and " if pk != pk_names[-1] else ";"
add_on = "{pk} = '{col}'{operator}".format(pk=pk,
col=str(issue_col),
operator=operator)
delete_issue_sql += add_on
delete_issue_sql = delete_issue_sql.format(self._escaped_catalog_name)
delete_sql += delete_issue_sql
delete_sql += 'COMMIT;'
return delete_sql
def get_delete_sql(self, oids):
if self.catalog_table_obj.tableHasConsistentOids():
pk_name = 'oid' if self._pk_name is None else self._pk_name
return self._generate_delete_sql_for_oid(pk_name=pk_name, oids=oids)
pk_names = tuple(self.catalog_table_obj.getPrimaryKey())
return self._generate_delete_sql_for_pkeys(pk_names=pk_names)
def get_segment_to_oid_mapping(self, all_seg_ids):
if not self._issues:
return
# issues look like this
# [(49401, "extra", '{1,2}'),
# (49401, "extra", '{1,2}')]
# OR
# [(49401, 'cmax', "extra", '{1,2}'),
# (49401, 'cmax', "extra", '{1,2}')]
all_seg_ids = set([str(seg_id) for seg_id in all_seg_ids])
oids_to_segment_mapping = {}
for issue in self._issues:
oid = issue[0]
issue_type = issue[-2]
seg_ids = issue[-1].strip('{}').split(',')
# if an oid is missing from a segment(s) , then it is considered to be extra
# on all the other segments/master
if issue_type == "missing":
seg_ids = all_seg_ids - set(seg_ids)
for seg_id in seg_ids:
seg_id = int(seg_id)
if not oids_to_segment_mapping.has_key(seg_id):
oids_to_segment_mapping[seg_id] = set()
oids_to_segment_mapping[seg_id].add(oid)
return oids_to_segment_mapping
| {
"content_hash": "68bd897da46ae0e6f7d65729a2ed8861",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 113,
"avg_line_length": 41.37837837837838,
"alnum_prop": 0.5525800130633572,
"repo_name": "lintzc/gpdb",
"id": "f6ea0d61dc975ed2bb92dbf70d020426ea03e993",
"size": "3084",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "gpMgmt/bin/gpcheckcat_modules/repair_missing_extraneous.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5665"
},
{
"name": "Batchfile",
"bytes": "11028"
},
{
"name": "C",
"bytes": "35144943"
},
{
"name": "C++",
"bytes": "3731160"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7068"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "Cucumber",
"bytes": "829167"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Groff",
"bytes": "631842"
},
{
"name": "HTML",
"bytes": "169455"
},
{
"name": "Java",
"bytes": "307541"
},
{
"name": "Lex",
"bytes": "196276"
},
{
"name": "M4",
"bytes": "78510"
},
{
"name": "Makefile",
"bytes": "431523"
},
{
"name": "Objective-C",
"bytes": "22149"
},
{
"name": "PLSQL",
"bytes": "190501"
},
{
"name": "PLpgSQL",
"bytes": "8131027"
},
{
"name": "Perl",
"bytes": "3933982"
},
{
"name": "Perl6",
"bytes": "14219"
},
{
"name": "Python",
"bytes": "9229659"
},
{
"name": "Ruby",
"bytes": "21343"
},
{
"name": "SQLPL",
"bytes": "1860160"
},
{
"name": "Shell",
"bytes": "484246"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "486035"
}
],
"symlink_target": ""
} |
import os
import re
import time
import unittest
from django.conf import settings
from selenium import webdriver
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from hypermap.aggregator.elasticsearch_client import ESHypermap
from hypermap.aggregator.solr import SolrHypermap
SELENIUM_HUB_URL = os.environ.get("SELENIUM_HUB_URL", None)
BROWSER_HYPERMAP_URL = os.environ.get("BROWSER_HYPERMAP_URL",
"http://localhost")
BROWSER_SEARCH_URL = "{0}/_elastic".format(BROWSER_HYPERMAP_URL)
# TODO: Uncomment this when a fix for ES 6.0 with nginx is found.
# BROWSER_SEARCH_URL = "{0}/_elastic".format(BROWSER_HYPERMAP_URL)
BROWSER_SEARCH_URL = "http://elasticsearch:9200"
BROWSER_MAPLOOM_URL = "{0}/_maploom/".format(BROWSER_HYPERMAP_URL)
WAIT_FOR_CELERY_JOB_PERIOD = int(
os.environ.get("WAIT_FOR_CELERY_JOB_PERIOD", 30))
SEARCH_TYPE = settings.REGISTRY_SEARCH_URL.split('+')[0]
SEARCH_URL = settings.REGISTRY_SEARCH_URL.split('+')[1]
SEARCH_TYPE_SOLR = 'solr'
SEARCH_TYPE_ES = 'elasticsearch'
catalog_test_slug = 'hypermap'
class TestBrowser(unittest.TestCase):
def setUp(self):
if not SELENIUM_HUB_URL:
# run test on firefox of this machine.
self.driver = webdriver.Firefox()
else:
# run test on stand alone node machine in docker: selenium-firefox
self.driver = webdriver.Remote(
command_executor=SELENIUM_HUB_URL,
desired_capabilities=DesiredCapabilities.FIREFOX
)
self.driver.implicitly_wait(30)
self.base_url = BROWSER_HYPERMAP_URL
self.verificationErrors = []
self.accept_next_alert = True
print '> clearing SEARCH_URL={0}'.format(SEARCH_URL)
if SEARCH_TYPE == SEARCH_TYPE_SOLR:
self.solr = SolrHypermap()
# delete solr documents
# add the schema
print '> updating schema'.format(SEARCH_URL)
self.solr.update_schema(catalog=catalog_test_slug)
self.solr.clear_solr(catalog=catalog_test_slug)
self.search_engine_endpoint = '{0}/solr/{1}/select'.format(
SEARCH_URL, catalog_test_slug
)
elif SEARCH_TYPE == SEARCH_TYPE_ES:
es = ESHypermap()
# delete ES documents
es.clear_es()
self.search_engine_endpoint = '{0}/{1}/_search'.format(
SEARCH_URL, catalog_test_slug
)
else:
raise Exception("SEARCH_TYPE not valid=%s" % SEARCH_TYPE)
def test_browser(self):
ENDPOINT_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"mesonet.agron.iastate.edu.txt")
print ""
print ">>> with env:"
print "REGISTRY_SKIP_CELERY: %s" % settings.REGISTRY_SKIP_CELERY
print "REGISTRY_LIMIT_LAYERS: %s" % settings.REGISTRY_LIMIT_LAYERS
print "REGISTRY_CHECK_PERIOD: %s" % settings.REGISTRY_CHECK_PERIOD
print ""
print "SELENIUM_HUB_URL: %s" % SELENIUM_HUB_URL
print "BROWSER_HYPERMAP_URL: %s" % BROWSER_HYPERMAP_URL
print "BROWSER_SEARCH_URL: %s" % BROWSER_SEARCH_URL
print "BROWSER_MAPLOOM_URL: %s" % BROWSER_MAPLOOM_URL
print "WAIT_FOR_CELERY_JOB_PERIOD: %s" % WAIT_FOR_CELERY_JOB_PERIOD
print "ENDPOINT FILE: %s" % ENDPOINT_FILE
print ""
print "Starting..."
driver = self.driver
time.sleep(3)
driver.get(self.base_url + "/admin/login/?next=/admin/")
print driver.current_url
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys("admin")
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys("admin")
driver.find_element_by_css_selector("input[type=\"submit\"]").click()
print driver.current_url
driver.find_element_by_link_text("Periodic tasks").click()
print driver.current_url
print "> assert 3 periodic tasks. means beat is alive."
self.assertEqual("3 periodic tasks",
driver.find_element_by_css_selector(
"p.paginator").text)
driver.find_element_by_link_text("Home").click()
print driver.current_url
driver.find_element_by_link_text("Endpoint lists").click()
print driver.current_url
driver.find_element_by_link_text("Add endpoint list").click()
print driver.current_url
print "> uploading Endpoint List..."
driver.find_element_by_id("id_upload").clear()
driver.find_element_by_id("id_upload").send_keys(ENDPOINT_FILE)
driver.find_element_by_name("_save").click()
print driver.current_url
print "> waiting {0} seconds for celery do the job....".format(
WAIT_FOR_CELERY_JOB_PERIOD
)
time.sleep(WAIT_FOR_CELERY_JOB_PERIOD)
driver.find_element_by_link_text("Aggregator").click()
time.sleep(1)
print driver.current_url
driver.find_element_by_link_text("Endpoints").click()
print driver.current_url
print "> assert Endpoint created."
time.sleep(1)
self.assertEqual(
"http://mesonet.agron.iastate.edu/cgi-bin/wms/us/wwa.cgi",
driver.find_element_by_link_text(
"http://mesonet.agron.iastate.edu/cgi-bin/wms/us/wwa.cgi").text)
driver.find_element_by_link_text(
"http://mesonet.agron.iastate.edu/cgi-bin/wms/us/wwa.cgi").click()
# self.assertEqual("1 service/s created", driver.find_element_by_id("id_message").text)
driver.find_element_by_link_text("Endpoints").click()
print driver.current_url
time.sleep(1)
driver.find_element_by_link_text("Aggregator").click()
print driver.current_url
time.sleep(1)
driver.find_element_by_link_text("Services").click()
print driver.current_url
print "> assert 1 Service created."
time.sleep(1)
self.assertEqual("1 service", driver.find_element_by_css_selector(
"p.paginator").text)
self.assertEqual(
"http://mesonet.agron.iastate.edu/cgi-bin/wms/us/wwa.cgi",
driver.find_element_by_css_selector("td.field-url").text)
driver.find_element_by_xpath(
'//*[@id="result_list"]/tbody/tr/th/a').click()
print driver.current_url
print "> assert Service details."
time.sleep(1)
self.assertEqual("IEM NWS Warnings WMS Service",
driver.find_element_by_id(
"id_title").get_attribute("value"))
driver.find_element_by_link_text("Services").click()
print driver.current_url
driver.find_element_by_link_text("Aggregator").click()
print driver.current_url
driver.find_element_by_link_text("Layers").click()
print driver.current_url
print "> assert 3 layers created."
time.sleep(1)
self.assertEqual("3 layers", driver.find_element_by_css_selector(
"p.paginator").text)
driver.get(self.base_url + "/registry/")
print driver.current_url
print "> go to /registry/."
for i in range(1, 11):
print "> try assert checks count > 0. (%i of 10)" % i
try:
self.assertNotEqual("0", driver.find_element_by_xpath(
"//td[4]").text)
print "> found"
break
except AssertionError as e:
print "> wait and reload page"
time.sleep(10)
driver.get(self.base_url + "/registry/")
try:
self.assertNotEqual("0",
driver.find_element_by_xpath("//td[4]").text)
except AssertionError as e:
self.verificationErrors.append(str(e))
driver.get("{0}/hypermap/_count".format(BROWSER_SEARCH_URL))
print driver.current_url
time.sleep(2)
for i in range(1, 11):
print "> assert layers indexed are 3. (%i of 10)" % i
try:
self.assertRegexpMatches(
driver.find_element_by_css_selector("pre").text,
"^\\{\"count\":3[\\s\\S]*$")
print "> found"
break
except AssertionError:
print "> wait and reload page"
time.sleep(10)
driver.refresh()
self.assertRegexpMatches(
driver.find_element_by_css_selector("pre").text,
"^\\{\"count\":3[\\s\\S]*$")
driver.get(self.base_url + "/registry/")
print driver.current_url
driver.find_element_by_link_text(
"IEM NWS Warnings WMS Service").click()
print driver.current_url
print "> remove checks."
driver.find_element_by_name("remove").click()
print driver.current_url
driver.find_element_by_link_text("Home").click()
print driver.current_url
print "> assert checks = 0."
self.assertEqual("0", driver.find_element_by_xpath("//td[4]").text)
driver.find_element_by_link_text(
"IEM NWS Warnings WMS Service").click()
print driver.current_url
print "> trigger check."
driver.find_element_by_name("check").click()
print driver.current_url
driver.find_element_by_link_text("Home").click()
print driver.current_url
for i in range(1, 11):
try:
print "> assert checks = 1. (%i of 10)" % i
self.assertTrue(
int(driver.find_element_by_xpath("//td[4]").text) > 0)
print "> found"
break
except AssertionError:
print "> wait and reload page"
time.sleep(10)
driver.refresh()
driver.find_element_by_link_text(
"IEM NWS Warnings WMS Service").click()
print driver.current_url
driver.find_element_by_link_text("wwa").click()
print driver.current_url
print "> remove checks from Layer."
driver.find_element_by_name("remove").click()
print driver.current_url
print "> assert text [No checks performed so far]."
self.assertEqual("No checks performed so far",
driver.find_element_by_xpath("//tr[11]/td[2]").text)
print "> check Layer."
driver.find_element_by_name("check").click()
print driver.current_url
for i in range(1, 11):
try:
print "> assert text [Total Checks: N>0]. (%i of 10)" % i
src = driver.page_source
text_found_TOTAL_CHECKS_LTE_1 = re.search(
r'Total Checks: (1|2|3|4|5|6|7)', src)
self.assertNotEqual(text_found_TOTAL_CHECKS_LTE_1, None)
print "> found"
break
except AssertionError:
print "> wait and reload page"
time.sleep(10)
driver.get(driver.current_url)
src = driver.page_source
text_found_TOTAL_CHECKS_LTE_1 = re.search(
r'Total Checks: (1|2|3|4|5|6|7)', src)
self.assertNotEqual(text_found_TOTAL_CHECKS_LTE_1, None)
driver.find_element_by_link_text("Home").click()
print driver.current_url
driver.find_element_by_link_text("Monitor").click()
print driver.current_url
print "> clean Search index and wait"
driver.find_element_by_name("clear_index").click()
print driver.current_url
time.sleep(5)
driver.get("{0}/hypermap/_count".format(BROWSER_SEARCH_URL))
print driver.current_url
print "> assert count != 3 layers"
try:
self.assertNotRegexpMatches(
driver.find_element_by_css_selector("pre").text,
"^\\{\"count\":3[\\s\\S]*$")
except AssertionError as e:
self.verificationErrors.append(str(e))
driver.get(self.base_url + "/registry/")
print driver.current_url
print "> finish hypermap page"
print ""
# TODO: activate this to test maploom, now dat app looks very buggy.
"""
print ">> start maploom"
driver.get(BROWSER_MAPLOOM_URL)
print driver.current_url
print ">> open registry modal"
driver.find_element_by_xpath(
"//div[@id='pulldown-content']/div[2]/div/div").click()
print ">> assert Hypermap catalog"
time.sleep(10)
self.assertEqual("Hypermap",
driver.find_element_by_xpath(
"//div[@id='explore']/div/nav/div/form/div/div[2]/select").text)
print ">> assert [Showing 3 of 3 - Page 1 / 1]"
self.assertEqual("Showing 3 of 3 - Page 1 / 1".lower(),
driver.find_element_by_css_selector(
"span.text-muted.ng-binding").text.lower())
driver.find_element_by_id("text_search_input_exp").clear()
print ">> search IEM"
driver.find_element_by_id("text_search_input_exp").send_keys("IEM")
driver.find_element_by_id("text_search_btn").click()
time.sleep(10)
print ">> assert [Showing 1 of 1 - Page 1 / 1]"
self.assertEqual("Showing 1 of 1 - Page 1 / 1".lower(),
driver.find_element_by_css_selector(
"span.text-muted.ng-binding").text.lower())
print ">> click reset"
driver.find_element_by_name("button").click()
time.sleep(10)
print ">> assert [Showing 3 of 3 - Page 1 / 1]"
self.assertEqual("Showing 3 of 3 - Page 1 / 1".lower(),
driver.find_element_by_css_selector(
"span.text-muted.ng-binding").text.lower())
print ">> click on 3 layers to select"
driver.find_element_by_css_selector("td.ellipsis.ng-binding").click()
driver.find_element_by_xpath(
"//div[@id='registry-layers']/div/div/div/div[2]/div[2]/div/table/tbody/tr[3]/td").click()
driver.find_element_by_xpath(
"//div[@id='registry-layers']/div/div/div/div[2]/div[2]/div/table/tbody/tr[4]/td").click()
print ">> click on 3 layers to unselect"
driver.find_element_by_css_selector("td.ellipsis.ng-binding").click()
driver.find_element_by_xpath(
"//div[@id='registry-layers']/div/div/div/div[2]/div[2]/div/table/tbody/tr[3]/td").click()
driver.find_element_by_xpath(
"//div[@id='registry-layers']/div/div/div/div[2]/div[2]/div/table/tbody/tr[4]/td").click()
"""
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException as e:
print e
return False
return True
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException as e:
print e
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
| {
"content_hash": "c79cddc300129db6c0a6cae9fafdd167",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 102,
"avg_line_length": 41.527415143603136,
"alnum_prop": 0.5749764225086451,
"repo_name": "cga-harvard/HHypermap",
"id": "34174802485aa5b624043379052efd08ed17c7de",
"size": "15929",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hypermap/tests/test_end_to_end_selenium_firefox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "93364"
},
{
"name": "HTML",
"bytes": "50409"
},
{
"name": "JavaScript",
"bytes": "2298247"
},
{
"name": "Makefile",
"bytes": "3713"
},
{
"name": "Python",
"bytes": "334877"
},
{
"name": "Shell",
"bytes": "750"
}
],
"symlink_target": ""
} |
import eventful
from django.conf import settings
def get_events():
"""
Docs: https://api.eventful.com/docs/events/search
Need to sort by date and then show some actual details
"""
api = eventful.API(settings.EVENTFUL_API_KEY)
return api.call('/events/search', l='Dover, NH').get('events').get('event') | {
"content_hash": "a98a1193a7e23f9dafc28562352ea35c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 79,
"avg_line_length": 29.727272727272727,
"alnum_prop": 0.6819571865443425,
"repo_name": "tclancy/whiteboard",
"id": "3eb712e730c1061ab1f4c59f7cc37f82b33201ed",
"size": "327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "whiteboard/dashboard/events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "642"
},
{
"name": "Python",
"bytes": "4593"
}
],
"symlink_target": ""
} |
import threading
import websocket
import json
import logging
logging.basicConfig()
class SocketHandler(threading.Thread):
def __init__(self, host, send_q, recv_q, debug=False, sentinel=None):
super(SocketHandler, self).__init__()
self.debug = debug
self.send_q = send_q
self.recv_q = recv_q
self._sentinel = sentinel
self.ready = False
def on_message(ws, message):
self.recv_q.put(json.loads(message))
print(message)
def on_error(ws, error):
print(error)
def on_close(ws):
# TODO: implement reconnection strategy
pass
def on_open(ws):
self.ready = True;
self.ws = websocket.WebSocketApp('ws://%s:8899/websocket' % host,
on_message = on_message,
on_error = on_error,
on_close = on_close,
on_open = on_open)
# Run the WebSocket handler in its own thread
def run_ws():
self.ws.run_forever()
threading.Thread(target=run_ws).start()
if (self.debug):
print('opened socket to %s:%d' % (host, 8899))
def run(self):
while True:
# Pull new messages to send off the queue
if self.send_q.qsize() > 0 and self.ready == True:
msg = self.send_q.get()
# Check if we're being told to shut down
if msg is self._sentinel:
self.ws.close()
break
if self.debug: print("Tx: " + json.dumps(msg))
msg_to_send = json.dumps(msg) + "\r\n"
# Send the message
self.ws.send(msg_to_send)
self.send_q.task_done()
| {
"content_hash": "a0fa870c133dd7bfdda1a27b90a3ba44",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 71,
"avg_line_length": 26.310344827586206,
"alnum_prop": 0.6009174311926605,
"repo_name": "mirobot/mirobot-py",
"id": "f2bb91c3edff047b0ea414f1592f4906bfef1c61",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mirobot/socket_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7097"
}
],
"symlink_target": ""
} |
import jwt
from django.utils.translation import gettext_lazy as _
from jwt import InvalidAlgorithmError, InvalidTokenError, algorithms
from .exceptions import TokenBackendError
from .utils import format_lazy
ALLOWED_ALGORITHMS = (
'HS256',
'HS384',
'HS512',
'RS256',
'RS384',
'RS512',
)
class TokenBackend:
def __init__(self, algorithm, signing_key=None, verifying_key=None, audience=None, issuer=None):
self._validate_algorithm(algorithm)
self.algorithm = algorithm
self.signing_key = signing_key
self.audience = audience
self.issuer = issuer
if algorithm.startswith('HS'):
self.verifying_key = signing_key
else:
self.verifying_key = verifying_key
def _validate_algorithm(self, algorithm):
"""
Ensure that the nominated algorithm is recognized, and that cryptography is installed for those
algorithms that require it
"""
if algorithm not in ALLOWED_ALGORITHMS:
raise TokenBackendError(format_lazy(_("Unrecognized algorithm type '{}'"), algorithm))
if algorithm in algorithms.requires_cryptography and not algorithms.has_crypto:
raise TokenBackendError(format_lazy(_("You must have cryptography installed to use {}."), algorithm))
def encode(self, payload):
"""
Returns an encoded token for the given payload dictionary.
"""
jwt_payload = payload.copy()
if self.audience is not None:
jwt_payload['aud'] = self.audience
if self.issuer is not None:
jwt_payload['iss'] = self.issuer
token = jwt.encode(jwt_payload, self.signing_key, algorithm=self.algorithm)
if isinstance(token, bytes):
# For PyJWT <= 1.7.1
return token.decode('utf-8')
# For PyJWT >= 2.0.0a1
return token
def decode(self, token, verify=True):
"""
Performs a validation of the given token and returns its payload
dictionary.
Raises a `TokenBackendError` if the token is malformed, if its
signature check fails, or if its 'exp' claim indicates it has expired.
"""
try:
return jwt.decode(
token, self.verifying_key, algorithms=[self.algorithm], verify=verify,
audience=self.audience, issuer=self.issuer,
options={'verify_aud': self.audience is not None, "verify_signature": verify}
)
except InvalidAlgorithmError as ex:
raise TokenBackendError(_('Invalid algorithm specified')) from ex
except InvalidTokenError:
raise TokenBackendError(_('Token is invalid or expired'))
| {
"content_hash": "a92f0fcfaf81549bbe25f84cc3a309d8",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 113,
"avg_line_length": 35.94736842105263,
"alnum_prop": 0.6314055636896047,
"repo_name": "davesque/django-rest-framework-simplejwt",
"id": "de7128ad318ee0474ca37140f09d244038639d41",
"size": "2732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_framework_simplejwt/backends.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1311"
},
{
"name": "Python",
"bytes": "122235"
},
{
"name": "Shell",
"bytes": "599"
}
],
"symlink_target": ""
} |
import proto # type: ignore
from google.cloud.aiplatform_v1.types import accelerator_type as gca_accelerator_type
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"MachineSpec",
"DedicatedResources",
"AutomaticResources",
"BatchDedicatedResources",
"ResourcesConsumed",
"DiskSpec",
"NfsMount",
"AutoscalingMetricSpec",
},
)
class MachineSpec(proto.Message):
r"""Specification of a single machine.
Attributes:
machine_type (str):
Immutable. The type of the machine.
See the `list of machine types supported for
prediction <https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types>`__
See the `list of machine types supported for custom
training <https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types>`__.
For
[DeployedModel][google.cloud.aiplatform.v1.DeployedModel]
this field is optional, and the default value is
``n1-standard-2``. For
[BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]
or as part of
[WorkerPoolSpec][google.cloud.aiplatform.v1.WorkerPoolSpec]
this field is required.
accelerator_type (google.cloud.aiplatform_v1.types.AcceleratorType):
Immutable. The type of accelerator(s) that may be attached
to the machine as per
[accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count].
accelerator_count (int):
The number of accelerators to attach to the
machine.
"""
machine_type = proto.Field(
proto.STRING,
number=1,
)
accelerator_type = proto.Field(
proto.ENUM,
number=2,
enum=gca_accelerator_type.AcceleratorType,
)
accelerator_count = proto.Field(
proto.INT32,
number=3,
)
class DedicatedResources(proto.Message):
r"""A description of resources that are dedicated to a
DeployedModel, and that need a higher degree of manual
configuration.
Attributes:
machine_spec (google.cloud.aiplatform_v1.types.MachineSpec):
Required. Immutable. The specification of a
single machine used by the prediction.
min_replica_count (int):
Required. Immutable. The minimum number of
machine replicas this DeployedModel will be
always deployed on. This value must be greater
than or equal to 1.
If traffic against the DeployedModel increases,
it may dynamically be deployed onto more
replicas, and as traffic decreases, some of
these extra replicas may be freed.
max_replica_count (int):
Immutable. The maximum number of replicas this DeployedModel
may be deployed on when the traffic against it increases. If
the requested value is too large, the deployment will error,
but if deployment succeeds then the ability to scale the
model to that many replicas is guaranteed (barring service
outages). If traffic against the DeployedModel increases
beyond what its replicas at maximum may handle, a portion of
the traffic will be dropped. If this value is not provided,
will use
[min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count]
as the default value.
The value of this field impacts the charge against Vertex
CPU and GPU quotas. Specifically, you will be charged for
(max_replica_count \* number of cores in the selected
machine type) and (max_replica_count \* number of GPUs per
replica in the selected machine type).
autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1.types.AutoscalingMetricSpec]):
Immutable. The metric specifications that overrides a
resource utilization metric (CPU utilization, accelerator's
duty cycle, and so on) target value (default to 60 if not
set). At most one entry is allowed per metric.
If
[machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count]
is above 0, the autoscaling will be based on both CPU
utilization and accelerator's duty cycle metrics and scale
up when either metrics exceeds its target value while scale
down if both metrics are under their target value. The
default target value is 60 for both metrics.
If
[machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count]
is 0, the autoscaling will be based on CPU utilization
metric only with default target value 60 if not explicitly
set.
For example, in the case of Online Prediction, if you want
to override target CPU utilization to 80, you should set
[autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1.AutoscalingMetricSpec.metric_name]
to
``aiplatform.googleapis.com/prediction/online/cpu/utilization``
and
[autoscaling_metric_specs.target][google.cloud.aiplatform.v1.AutoscalingMetricSpec.target]
to ``80``.
"""
machine_spec = proto.Field(
proto.MESSAGE,
number=1,
message="MachineSpec",
)
min_replica_count = proto.Field(
proto.INT32,
number=2,
)
max_replica_count = proto.Field(
proto.INT32,
number=3,
)
autoscaling_metric_specs = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="AutoscalingMetricSpec",
)
class AutomaticResources(proto.Message):
r"""A description of resources that to large degree are decided
by Vertex AI, and require only a modest additional
configuration. Each Model supporting these resources documents
its specific guidelines.
Attributes:
min_replica_count (int):
Immutable. The minimum number of replicas this DeployedModel
will be always deployed on. If traffic against it increases,
it may dynamically be deployed onto more replicas up to
[max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count],
and as traffic decreases, some of these extra replicas may
be freed. If the requested value is too large, the
deployment will error.
max_replica_count (int):
Immutable. The maximum number of replicas
this DeployedModel may be deployed on when the
traffic against it increases. If the requested
value is too large, the deployment will error,
but if deployment succeeds then the ability to
scale the model to that many replicas is
guaranteed (barring service outages). If traffic
against the DeployedModel increases beyond what
its replicas at maximum may handle, a portion of
the traffic will be dropped. If this value is
not provided, a no upper bound for scaling under
heavy traffic will be assume, though Vertex AI
may be unable to scale beyond certain replica
number.
"""
min_replica_count = proto.Field(
proto.INT32,
number=1,
)
max_replica_count = proto.Field(
proto.INT32,
number=2,
)
class BatchDedicatedResources(proto.Message):
r"""A description of resources that are used for performing batch
operations, are dedicated to a Model, and need manual
configuration.
Attributes:
machine_spec (google.cloud.aiplatform_v1.types.MachineSpec):
Required. Immutable. The specification of a
single machine.
starting_replica_count (int):
Immutable. The number of machine replicas used at the start
of the batch operation. If not set, Vertex AI decides
starting number, not greater than
[max_replica_count][google.cloud.aiplatform.v1.BatchDedicatedResources.max_replica_count]
max_replica_count (int):
Immutable. The maximum number of machine
replicas the batch operation may be scaled to.
The default value is 10.
"""
machine_spec = proto.Field(
proto.MESSAGE,
number=1,
message="MachineSpec",
)
starting_replica_count = proto.Field(
proto.INT32,
number=2,
)
max_replica_count = proto.Field(
proto.INT32,
number=3,
)
class ResourcesConsumed(proto.Message):
r"""Statistics information about resource consumption.
Attributes:
replica_hours (float):
Output only. The number of replica hours
used. Note that many replicas may run in
parallel, and additionally any given work may be
queued for some time. Therefore this value is
not strictly related to wall time.
"""
replica_hours = proto.Field(
proto.DOUBLE,
number=1,
)
class DiskSpec(proto.Message):
r"""Represents the spec of disk options.
Attributes:
boot_disk_type (str):
Type of the boot disk (default is "pd-ssd").
Valid values: "pd-ssd" (Persistent Disk Solid
State Drive) or "pd-standard" (Persistent Disk
Hard Disk Drive).
boot_disk_size_gb (int):
Size in GB of the boot disk (default is
100GB).
"""
boot_disk_type = proto.Field(
proto.STRING,
number=1,
)
boot_disk_size_gb = proto.Field(
proto.INT32,
number=2,
)
class NfsMount(proto.Message):
r"""Represents a mount configuration for Network File System
(NFS) to mount.
Attributes:
server (str):
Required. IP address of the NFS server.
path (str):
Required. Source path exported from NFS server. Has to start
with '/', and combined with the ip address, it indicates the
source mount path in the form of ``server:path``
mount_point (str):
Required. Destination mount path. The NFS will be mounted
for the user under /mnt/nfs/<mount_point>
"""
server = proto.Field(
proto.STRING,
number=1,
)
path = proto.Field(
proto.STRING,
number=2,
)
mount_point = proto.Field(
proto.STRING,
number=3,
)
class AutoscalingMetricSpec(proto.Message):
r"""The metric specification that defines the target resource
utilization (CPU utilization, accelerator's duty cycle, and so
on) for calculating the desired replica count.
Attributes:
metric_name (str):
Required. The resource metric name. Supported metrics:
- For Online Prediction:
- ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle``
- ``aiplatform.googleapis.com/prediction/online/cpu/utilization``
target (int):
The target resource utilization in percentage
(1% - 100%) for the given metric; once the real
usage deviates from the target by a certain
percentage, the machine replicas change. The
default value is 60 (representing 60%) if not
provided.
"""
metric_name = proto.Field(
proto.STRING,
number=1,
)
target = proto.Field(
proto.INT32,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "5a6aeaf32df101f89d58e1dadf748a82",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 112,
"avg_line_length": 35.82831325301205,
"alnum_prop": 0.6291719209751997,
"repo_name": "googleapis/python-aiplatform",
"id": "23a5bb218777c51af75af42ba83e9220ef5c3fa8",
"size": "12495",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/aiplatform_v1/types/machine_resources.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("statistics", "0010_existing_case_timings_count")]
operations = [
migrations.AlterField(model_name="dailysecondtotalcount", name="seconds", field=models.BigIntegerField())
]
| {
"content_hash": "7229471244918465be20f8475b51efad",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 113,
"avg_line_length": 28,
"alnum_prop": 0.7291666666666666,
"repo_name": "rapidpro/casepro",
"id": "3d6fcb05317a307a561a8440518a86e59ef334a0",
"size": "409",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "casepro/statistics/migrations/0011_auto_20170605_0657.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "196784"
},
{
"name": "HTML",
"bytes": "10550"
},
{
"name": "Haml",
"bytes": "98371"
},
{
"name": "Less",
"bytes": "3180"
},
{
"name": "PLpgSQL",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "914926"
},
{
"name": "Shell",
"bytes": "816"
}
],
"symlink_target": ""
} |
import unittest
class GpTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GpTestCase, self).__init__(methodName)
self.patches = []
self.mock_objs = []
self.__class__.apply_patches_counter = 0
self.__class__.tear_down_counter = 0
def apply_patches(self, patches):
if self.patches:
raise Exception('Test class is already patched!')
self.patches = patches
self.mock_objs = [p.start() for p in self.patches]
self.__class__.apply_patches_counter += 1
def get_mock_from_apply_patch(self, mock_name):
''' Return None if there is no existing object
mock name prints out the last "namespace"
for example "os.path.exists", mock_name will be "exists"
'''
for mock_obj in self.mock_objs:
if mock_name == mock_obj._mock_name:
return mock_obj
return None
# if you have a tearDown() in your test class,
# be sure to call this using super(<child class name>, self).tearDown()
def tearDown(self):
[p.stop() for p in self.patches]
self.mock_objs = []
self.__class__.tear_down_counter += 1
@classmethod
def setUpClass(cls):
cls.apply_patches_counter = 0
cls.tear_down_counter = 0
@classmethod
def tearDownClass(cls):
if cls.apply_patches_counter > 0 and cls.apply_patches_counter != cls.tear_down_counter:
raise Exception("Unequal call for apply patches: %s, teardown: %s. "
"You probably need to add a super(<child class>, "
"self).tearDown() in your tearDown()" % (cls.apply_patches_counter,
cls.tear_down_counter))
def add_setup(setup=None, teardown=None):
"""decorate test functions to add additional setup/teardown contexts"""
def decorate_function(test):
def wrapper(self):
if setup:
setup(self)
test(self)
if teardown:
teardown(self)
return wrapper
return decorate_function
# hide unittest dependencies here
def run_tests():
unittest.main(verbosity=2, buffer=True)
skip = unittest.skip
class FakeCursor:
def __init__(self, my_list=None):
self.list = []
self.rowcount = 0
if my_list:
self.set_result_for_testing(my_list)
def __iter__(self):
return iter(self.list)
def close(self):
pass
def fetchall(self):
return self.list
def set_result_for_testing(self, result_list):
self.list = result_list
self.rowcount = len(result_list)
| {
"content_hash": "1dbbf1e1e45c3dd1357c7da13ca96b9a",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 96,
"avg_line_length": 31.227272727272727,
"alnum_prop": 0.5749636098981077,
"repo_name": "rvs/gpdb",
"id": "ddfee30407fe010ab5efc5ba6a760fa5efe12471",
"size": "2748",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gpMgmt/bin/gppylib/test/unit/gp_unittest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5665"
},
{
"name": "Batchfile",
"bytes": "11492"
},
{
"name": "C",
"bytes": "35013613"
},
{
"name": "C++",
"bytes": "3833252"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "715430"
},
{
"name": "HTML",
"bytes": "169634"
},
{
"name": "Java",
"bytes": "268348"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "196275"
},
{
"name": "M4",
"bytes": "102006"
},
{
"name": "Makefile",
"bytes": "420136"
},
{
"name": "PLSQL",
"bytes": "261269"
},
{
"name": "PLpgSQL",
"bytes": "5477026"
},
{
"name": "Perl",
"bytes": "3831299"
},
{
"name": "Perl6",
"bytes": "14219"
},
{
"name": "Python",
"bytes": "8653837"
},
{
"name": "Roff",
"bytes": "51338"
},
{
"name": "Ruby",
"bytes": "26724"
},
{
"name": "SQLPL",
"bytes": "3824391"
},
{
"name": "Shell",
"bytes": "527804"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "488001"
}
],
"symlink_target": ""
} |
import unittest
import json
from guitar.configurator import Configurator
class TestConfigurator(unittest.TestCase):
config_path = 'tests/test_config_json.txt'
maxDiff = None
def test_configurator(self):
with open(self.config_path, 'r') as f:
config = json.loads(f.read())
file_paths = {
'settings': 'dummy',
'urls': 'dummy2',
'installed_apps': 'dummy',
}
configurator = Configurator(config, file_paths)
answers = [1, 'my_db_name', 'my_db_user', '^test_url/', None]
for question in configurator:
answer = answers.pop(0)
self.assertTrue(question['title'])
question_answers = question.get('answers', [])
for _answer in question_answers:
if _answer['key'] == answer:
question.answer(answer)
continue
if question['type'] == 'input':
question.answer(answer)
patches = configurator.get_patches()
patches_expect = {
u'installed_apps': {
'file_path': 'dummy',
'patch': {'item_to_add': u'foo.bar', 'after': u'django.contrib.sessions', 'before': None}
},
u'urls': {
'file_path': 'dummy2',
'patch': {'item_to_add': u"url(r'^test_url/', include('foo.urls'))", 'after': None, 'before': None}
},
u'settings': {
'file_path': 'dummy',
'patch': {
'item_to_add': u"DATABASES = {\n 'default': {\n 'ENGINE': 'postgresql',\n 'NAME': '',\n 'USER': 'my_db_name',\n 'PASSWORD': 'my_db_user',\n 'HOST': '',\n 'PORT': '',\n }\n}\n",
'after': None,
'before': None
}
}
}
self.assertDictEqual(patches_expect, patches)
def test_skip_questions(self):
with open(self.config_path, 'r') as f:
config = json.loads(f.read())
file_paths = {
'settings': 'dummy',
'urls': 'dummy2',
'installed_apps': 'dummy',
}
configurator = Configurator(config, file_paths)
answers = [3, '^test_url/', None]
for question in configurator:
answer = answers.pop(0)
self.assertTrue(question['title'])
question_answers = question.get('answers', [])
for _answer in question_answers:
if _answer['key'] == answer:
question.answer(answer)
continue
if question['type'] == 'input':
question.answer(answer)
patches = configurator.get_patches()
patches_expect = {
u'installed_apps': {
'file_path': 'dummy',
'patch': {
'item_to_add': u'foo.bar',
'after': u'django.contrib.sessions',
'before': None
}
},
u'urls': {
'file_path': 'dummy2',
'patch': {
'item_to_add': u"url(r'^test_url/', include('foo.urls'))",
'after': None,
'before': None
}
},
u'settings': {
'file_path': 'dummy',
'patch': {
'item_to_add': u"DATABASES = {\n 'default': {\n 'ENGINE': 'sqlite3',\n 'NAME': '',\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n",
'after': None,
'before': None
}
}
}
self.assertDictEqual(patches_expect, patches)
| {
"content_hash": "cfc5e88e58e580128ea3e4cc43797acb",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 244,
"avg_line_length": 31.614754098360656,
"alnum_prop": 0.4366087632875292,
"repo_name": "django-stars/guitar",
"id": "0eef2d8c3ba659ff6014158eb4c551afda70df02",
"size": "3857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "guitar-package/guitar/tests/configurator_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28912"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import copy
import os
import re
import sys
from io import BytesIO
from itertools import chain
from pprint import pformat
from django.conf import settings
from django.core import signing
from django.core.exceptions import DisallowedHost, ImproperlyConfigured
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils import six
from django.utils.datastructures import ImmutableList, MultiValueDict
from django.utils.encoding import (
escape_uri_path, force_bytes, force_str, force_text, iri_to_uri,
)
from django.utils.six.moves.urllib.parse import (
parse_qsl, quote, urlencode, urljoin, urlsplit,
)
RAISE_ERROR = object()
host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9:]+\])(:\d+)?$")
class UnreadablePostError(IOError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self._post_parse_error = False
def __repr__(self):
if self.method is None or not self.get_full_path():
return force_str('<%s>' % self.__class__.__name__)
return force_str(
'<%s: %s %r>' % (self.__class__.__name__, self.method, force_str(self.get_full_path()))
)
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
# There is no hostname validation when DEBUG=True
if settings.DEBUG:
return host
domain, port = split_domain_port(host)
if domain and validate_host(domain, settings.ALLOWED_HOSTS):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += " The domain name provided is not valid according to RFC 1034/1035."
raise DisallowedHost(msg)
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (
escape_uri_path(self.path),
('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else ''
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempts to return a signed cookie. If the signature fails or the
cookie has expired, raises an exception... unless you provide the
default argument in which case that value will be returned instead.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, the absolute URI is
built on ``request.get_full_path()``. Anyway, if the location is
absolute, it is simply converted to an RFC 3987 compliant URI and
returned and if location is relative or is scheme-relative (i.e.,
``//example.com/``), it is urljoined to a base URL constructed from the
request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = '//%s' % self.get_full_path()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
current_uri = '{scheme}://{host}{path}'.format(scheme=self.scheme,
host=self.get_host(),
path=self.path)
# Join the constructed URL with the provided location, which will
# allow the provided ``location`` to apply query strings to the
# base path as well as override the host, if it begins with //
location = urljoin(current_uri, location)
return iri_to_uri(location)
def _get_scheme(self):
return 'https' if os.environ.get("HTTPS") == "on" else 'http'
@property
def scheme(self):
# First, check the SECURE_PROXY_SSL_HEADER setting.
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
)
if self.META.get(header, None) == value:
return 'https'
# Failing that, fall back to _get_scheme(), which is a hook for
# subclasses to implement.
return self._get_scheme()
def is_secure(self):
return self.scheme == 'https'
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise RawPostDataException("You cannot access body after reading from request's data stream")
try:
self._body = self.read()
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart/form-data'):
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occurred. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
elif self.META.get('CONTENT_TYPE', '').startswith('application/x-www-form-urlencoded'):
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
def close(self):
if hasattr(self, '_files'):
for f in chain.from_iterable(l[1] for l in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super(QueryDict, self).__init__()
if not encoding:
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
if six.PY3:
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode('iso-8859-1')
for key, value in parse_qsl(query_string or '',
keep_blank_values=True,
encoding=encoding):
self.appendlist(key, value)
else:
for key, value in parse_qsl(query_string or '',
keep_blank_values=True):
try:
value = value.decode(encoding)
except UnicodeDecodeError:
value = value.decode('iso-8859-1')
self.appendlist(force_text(key, encoding, errors='replace'),
value)
self._mutable = mutable
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in six.iterlists(self):
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in six.iterlists(self):
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super(QueryDict, self).setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super(QueryDict, self).setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super(QueryDict, self).pop(key, *args)
def popitem(self):
self._assert_mutable()
return super(QueryDict, self).popitem()
def clear(self):
self._assert_mutable()
super(QueryDict, self).clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super(QueryDict, self).setdefault(key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = force_bytes(safe, self.encoding)
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = force_bytes(k, self.encoding)
output.extend(encode(k, force_bytes(v, self.encoding))
for v in list_)
return '&'.join(output)
def build_request_repr(request, path_override=None, GET_override=None,
POST_override=None, COOKIES_override=None,
META_override=None):
"""
Builds and returns the request's representation string. The request's
attributes may be overridden by pre-processed values.
"""
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = (pformat(GET_override)
if GET_override is not None
else pformat(request.GET))
except Exception:
get = '<could not parse>'
if request._post_parse_error:
post = '<could not parse>'
else:
try:
post = (pformat(POST_override)
if POST_override is not None
else pformat(request.POST))
except Exception:
post = '<could not parse>'
try:
cookies = (pformat(COOKIES_override)
if COOKIES_override is not None
else pformat(request.COOKIES))
except Exception:
cookies = '<could not parse>'
try:
meta = (pformat(META_override)
if META_override is not None
else pformat(request.META))
except Exception:
meta = '<could not parse>'
path = path_override if path_override is not None else request.path
return force_str('<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
(request.__class__.__name__,
path,
six.text_type(get),
six.text_type(post),
six.text_type(cookies),
six.text_type(meta)))
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_text for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, bytes):
return six.text_type(s, encoding, 'replace')
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lower-cased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return '', ''
if host[-1] == ']':
# It's an IPv6 address without a port.
return host, ''
bits = host.rsplit(':', 1)
if len(bits) == 2:
return tuple(bits)
return bits[0], ''
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lower-cased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
host = host[:-1] if host.endswith('.') else host
for pattern in allowed_hosts:
pattern = pattern.lower()
match = (
pattern == '*' or
pattern.startswith('.') and (
host.endswith(pattern) or host == pattern[1:]
) or
pattern == host
)
if match:
return True
return False
| {
"content_hash": "827fac365fedacb0fffd982d9671a5fa",
"timestamp": "",
"source": "github",
"line_count": 581,
"max_line_length": 110,
"avg_line_length": 37.63855421686747,
"alnum_prop": 0.56841046277666,
"repo_name": "diego-d5000/MisValesMd",
"id": "d28da556b24c3710ce7c5088e8e4cbb0b92f0941",
"size": "21868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/django/http/request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "115465"
},
{
"name": "Groff",
"bytes": "22"
},
{
"name": "HTML",
"bytes": "1415583"
},
{
"name": "JavaScript",
"bytes": "1381588"
},
{
"name": "PowerShell",
"bytes": "8325"
},
{
"name": "Python",
"bytes": "8107650"
},
{
"name": "Shell",
"bytes": "11786"
}
],
"symlink_target": ""
} |
"""
Cache middleware. If enabled, each Django-powered page will be cached based on
URL. The canonical way to enable cache middleware is to set
``UpdateCacheMiddleware`` as your first piece of middleware, and
``FetchFromCacheMiddleware`` as the last::
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
...
'django.middleware.cache.FetchFromCacheMiddleware'
]
This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run
last during the response phase, which processes middleware bottom-up;
``FetchFromCacheMiddleware`` needs to run last during the request phase, which
processes middleware top-down.
The single-class ``CacheMiddleware`` can be used for some simple sites.
However, if any other piece of middleware needs to affect the cache key, you'll
need to use the two-part ``UpdateCacheMiddleware`` and
``FetchFromCacheMiddleware``. This'll most often happen when you're using
Django's ``LocaleMiddleware``.
More details about how the caching works:
* Only GET or HEAD-requests with status code 200 are cached.
* The number of seconds each page is stored for is set by the "max-age" section
of the response's "Cache-Control" header, falling back to the
CACHE_MIDDLEWARE_SECONDS setting if the section was not found.
* This middleware expects that a HEAD request is answered with the same response
headers exactly like the corresponding GET request.
* When a hit occurs, a shallow copy of the original response object is returned
from process_request.
* Pages will be cached based on the contents of the request headers listed in
the response's "Vary" header.
* This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
from django.conf import settings
from django.core.cache import DEFAULT_CACHE_ALIAS, caches
from django.utils.cache import (
get_cache_key, get_max_age, has_vary_header, learn_cache_key,
patch_response_headers,
)
from django.utils.deprecation import MiddlewareMixin
class UpdateCacheMiddleware(MiddlewareMixin):
"""
Response-phase cache middleware that updates the cache if the response is
cacheable.
Must be used as part of the two-part update/fetch cache middleware.
UpdateCacheMiddleware must be the first piece of middleware in MIDDLEWARE
so that it'll get called last during the response phase.
"""
def __init__(self, get_response=None):
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.page_timeout = None
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
self.get_response = get_response
def _should_update_cache(self, request, response):
return hasattr(request, '_cache_update_cache') and request._cache_update_cache
def process_response(self, request, response):
"""Set the cache, if needed."""
if not self._should_update_cache(request, response):
# We don't need to update the cache, just return.
return response
if response.streaming or response.status_code not in (200, 304):
return response
# Don't cache responses that set a user-specific (and maybe security
# sensitive) cookie in response to a cookie-less request.
if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):
return response
# Don't cache a response with 'Cache-Control: private'
if 'private' in response.get('Cache-Control', ()):
return response
# Page timeout takes precedence over the "max-age" and the default
# cache timeout.
timeout = self.page_timeout
if timeout is None:
# The timeout from the "max-age" section of the "Cache-Control"
# header takes precedence over the default cache timeout.
timeout = get_max_age(response)
if timeout is None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't cache.
return response
patch_response_headers(response, timeout)
if timeout and response.status_code == 200:
cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache)
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(MiddlewareMixin):
"""
Request-phase cache middleware that fetches a page from the cache.
Must be used as part of the two-part update/fetch cache middleware.
FetchFromCacheMiddleware must be the last piece of middleware in MIDDLEWARE
so that it'll get called last during the request phase.
"""
def __init__(self, get_response=None):
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
self.get_response = get_response
def process_request(self, request):
"""
Check whether the page is already cached and return the cached
version if available.
"""
if request.method not in ('GET', 'HEAD'):
request._cache_update_cache = False
return None # Don't bother checking the cache.
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == 'HEAD':
cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache)
response = self.cache.get(cache_key)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
# hit, return cached response
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
"""
Cache middleware that provides basic behavior for many simple sites.
Also used as the hook point for the cache decorator, which is generated
using the decorator-from-middleware utility.
"""
def __init__(self, get_response=None, cache_timeout=None, page_timeout=None, **kwargs):
self.get_response = get_response
# We need to differentiate between "provided, but using default value",
# and "not provided". If the value is provided using a default, then
# we fall back to system defaults. If it is not provided at all,
# we need to use middleware defaults.
try:
key_prefix = kwargs['key_prefix']
if key_prefix is None:
key_prefix = ''
except KeyError:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.key_prefix = key_prefix
try:
cache_alias = kwargs['cache_alias']
if cache_alias is None:
cache_alias = DEFAULT_CACHE_ALIAS
except KeyError:
cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache_alias = cache_alias
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.cache_timeout = cache_timeout
self.page_timeout = page_timeout
self.cache = caches[self.cache_alias]
| {
"content_hash": "b97f68035de5ab34d87f0776d4c003c6",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 102,
"avg_line_length": 41.07216494845361,
"alnum_prop": 0.6708082329317269,
"repo_name": "simonw/django",
"id": "9705270b59277333c5ac460ca2abc2029789bb65",
"size": "7968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/middleware/cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85351"
},
{
"name": "HTML",
"bytes": "227641"
},
{
"name": "JavaScript",
"bytes": "258434"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13501540"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
} |
import time
import mock
import shutil
import tempfile
import unittest
from hashlib import md5
from six.moves import urllib
from textwrap import dedent
from swift.common import swob
from swift.common.middleware import copy
from swift.common.storage_policy import POLICIES
from swift.common.swob import Request, HTTPException
from swift.common.utils import closing_if_possible
from test.unit import patch_policies, debug_logger, FakeMemcache, FakeRing
from test.unit.common.middleware.helpers import FakeSwift
from test.unit.proxy.controllers.test_obj import set_http_connect, \
PatchedObjControllerApp
class TestCopyConstraints(unittest.TestCase):
def test_validate_copy_from(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'c/o2'})
src_cont, src_obj = copy._check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'c/subdir/o2'})
src_cont, src_obj = copy._check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'subdir/o2')
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': '/c/o2'})
src_cont, src_obj = copy._check_copy_from_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
def test_validate_bad_copy_from(self):
req = Request.blank(
'/v/a/c/o',
headers={'x-copy-from': 'bad_object'})
self.assertRaises(HTTPException,
copy._check_copy_from_header, req)
def test_validate_destination(self):
req = Request.blank(
'/v/a/c/o',
headers={'destination': 'c/o2'})
src_cont, src_obj = copy._check_destination_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
req = Request.blank(
'/v/a/c/o',
headers={'destination': 'c/subdir/o2'})
src_cont, src_obj = copy._check_destination_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'subdir/o2')
req = Request.blank(
'/v/a/c/o',
headers={'destination': '/c/o2'})
src_cont, src_obj = copy._check_destination_header(req)
self.assertEqual(src_cont, 'c')
self.assertEqual(src_obj, 'o2')
def test_validate_bad_destination(self):
req = Request.blank(
'/v/a/c/o',
headers={'destination': 'bad_object'})
self.assertRaises(HTTPException,
copy._check_destination_header, req)
class TestServerSideCopyMiddleware(unittest.TestCase):
def setUp(self):
self.app = FakeSwift()
self.ssc = copy.filter_factory({
'object_post_as_copy': 'yes',
})(self.app)
self.ssc.logger = self.app.logger
def tearDown(self):
self.assertEqual(self.app.unclosed_requests, {})
def call_app(self, req, app=None, expect_exception=False):
if app is None:
app = self.app
self.authorized = []
def authorize(req):
self.authorized.append(req)
if 'swift.authorize' not in req.environ:
req.environ['swift.authorize'] = authorize
req.headers.setdefault("User-Agent", "Bruce Wayne")
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = app(req.environ, start_response)
body = ''
caught_exc = None
try:
# appease the close-checker
with closing_if_possible(body_iter):
for chunk in body_iter:
body += chunk
except Exception as exc:
if expect_exception:
caught_exc = exc
else:
raise
if expect_exception:
return status[0], headers[0], body, caught_exc
else:
return status[0], headers[0], body
def call_ssc(self, req, **kwargs):
return self.call_app(req, app=self.ssc, **kwargs)
def assertRequestEqual(self, req, other):
self.assertEqual(req.method, other.method)
self.assertEqual(req.path, other.path)
def test_no_object_in_path_pass_through(self):
self.app.register('PUT', '/v1/a/c', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c', method='PUT')
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_object_pass_through_methods(self):
for method in ['DELETE', 'GET', 'HEAD', 'REPLICATE']:
self.app.register(method, '/v1/a/c/o', swob.HTTPOk, {})
req = Request.blank('/v1/a/c/o', method=method)
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertNotIn('swift.orig_req_method', req.environ)
def test_POST_as_COPY_simple(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPAccepted, {})
req = Request.blank('/v1/a/c/o', method='POST')
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '202 Accepted')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
# For basic test cases, assert orig_req_method behavior
self.assertEqual(req.environ['swift.orig_req_method'], 'POST')
def test_POST_as_COPY_201_return_202(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='POST')
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '202 Accepted')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_POST_delete_at(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPAccepted, {})
t = str(int(time.time() + 100))
req = Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '202 Accepted')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertTrue('X-Delete-At' in req_headers)
self.assertEqual(req_headers['X-Delete-At'], str(t))
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_POST_as_COPY_static_large_object(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'X-Static-Large-Object': True}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPAccepted, {})
req = Request.blank('/v1/a/c/o', method='POST',
headers={})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '202 Accepted')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertNotIn('X-Static-Large-Object', req_headers)
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_POST_as_COPY_dynamic_large_object_manifest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'X-Object-Manifest': 'orig_manifest'}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='POST',
headers={'X-Object-Manifest': 'new_manifest'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '202 Accepted')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('new_manifest', req_headers['x-object-manifest'])
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_POST_as_COPY_dynamic_large_object_no_manifest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'X-Object-Manifest': 'orig_manifest'}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='POST',
headers={})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '202 Accepted')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertNotIn('X-Object-Manifest', req_headers)
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_basic_put_with_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o2', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o2', self.authorized[1].path)
self.assertEqual(self.app.swift_sources[0], 'SSC')
self.assertEqual(self.app.swift_sources[1], 'SSC')
# For basic test cases, assert orig_req_method behavior
self.assertNotIn('swift.orig_req_method', req.environ)
def test_static_large_object_manifest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'X-Static-Large-Object': 'True',
'Etag': 'should not be sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o2?multipart-manifest=put',
swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o2?multipart-manifest=get',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(2, len(self.app.calls))
self.assertEqual('GET', self.app.calls[0][0])
get_path, qs = self.app.calls[0][1].split('?')
params = urllib.parse.parse_qs(qs)
self.assertDictEqual(
{'format': ['raw'], 'multipart-manifest': ['get']}, params)
self.assertEqual(get_path, '/v1/a/c/o')
self.assertEqual(self.app.calls[1],
('PUT', '/v1/a/c/o2?multipart-manifest=put'))
req_headers = self.app.headers[1]
self.assertNotIn('X-Static-Large-Object', req_headers)
self.assertNotIn('Etag', req_headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o2', self.authorized[1].path)
def test_static_large_object(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'X-Static-Large-Object': 'True',
'Etag': 'should not be sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o2',
swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o2',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(self.app.calls, [
('GET', '/v1/a/c/o'),
('PUT', '/v1/a/c/o2')])
req_headers = self.app.headers[1]
self.assertNotIn('X-Static-Large-Object', req_headers)
self.assertNotIn('Etag', req_headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o2', self.authorized[1].path)
def test_basic_put_with_x_copy_from_across_container(self):
self.app.register('GET', '/v1/a/c1/o1', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c2/o2', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c1/o1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c1/o1') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c1/o1', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c2/o2', self.authorized[1].path)
def test_basic_put_with_x_copy_from_across_container_and_account(self):
self.app.register('GET', '/v1/a1/c1/o1', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a2/c2/o2', swob.HTTPCreated, {},
'passed')
req = Request.blank('/v1/a2/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c1/o1',
'X-Copy-From-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c1/o1') in headers)
self.assertTrue(('X-Copied-From-Account', 'a1') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a1/c1/o1', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a2/c2/o2', self.authorized[1].path)
def test_copy_non_zero_content_length(self):
req = Request.blank('/v1/a/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '10',
'X-Copy-From': 'c1/o1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '400 Bad Request')
def test_copy_non_zero_content_length_with_account(self):
req = Request.blank('/v1/a2/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '10',
'X-Copy-From': 'c1/o1',
'X-Copy-From-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '400 Bad Request')
def test_copy_with_slashes_in_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o/o2'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_slashes_in_x_copy_from_and_account(self):
self.app.register('GET', '/v1/a1/c1/o/o1', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a2/c2/o2', swob.HTTPCreated, {})
req = Request.blank('/v1/a2/c2/o2', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c1/o/o1',
'X-Copy-From-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c1/o/o1') in headers)
self.assertTrue(('X-Copied-From-Account', 'a1') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a1/c1/o/o1', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a2/c2/o2', self.authorized[1].path)
def test_copy_with_spaces_in_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
# space in soure path
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o%20o2'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o o2', path)
self.assertTrue(('X-Copied-From', 'c/o%20o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o%20o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_spaces_in_x_copy_from_and_account(self):
self.app.register('GET', '/v1/a/c/o o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
# space in soure path
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': 'c/o%20o2',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o o2', path)
self.assertTrue(('X-Copied-From', 'c/o%20o2') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o%20o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_copy_with_leading_slash_in_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
# repeat tests with leading /
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_leading_slash_in_x_copy_from_and_account(self):
# repeat tests with leading /
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_copy_with_leading_slash_and_slashes_in_x_copy_from(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o/o2'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o/o2', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_leading_slash_and_slashes_in_x_copy_from_acct(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o/o2',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o/o2', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_copy_with_no_object_in_x_copy_from(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_copy_with_no_object_in_x_copy_from_and_account(self):
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_copy_with_bad_x_copy_from_account(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': '/i/am/bad'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_copy_server_error_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '503 Service Unavailable')
def test_copy_server_error_reading_source_and_account(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {})
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '503 Service Unavailable')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_copy_not_found_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_copy_not_found_reading_source_and_account(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a1/c1/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Copy-From-Account': 'a'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_copy_with_object_metadata(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Object-Meta-Ours': 'okay'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay')
self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_with_object_metadata_and_account(self):
self.app.register('GET', '/v1/a1/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o',
'X-Object-Meta-Ours': 'okay',
'X-Copy-From-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay')
self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a1/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_copy_source_larger_than_max_file_size(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "largebody")
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'Content-Length': '0',
'X-Copy-From': '/c/o'})
with mock.patch('swift.common.middleware.copy.'
'MAX_FILE_SIZE', 1):
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_basic_COPY(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {
'etag': 'is sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o-copy', swob.HTTPCreated, {})
req = Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o-copy', self.authorized[1].path)
self.assertEqual(self.app.calls, [
('GET', '/v1/a/c/o'),
('PUT', '/v1/a/c/o-copy')])
self.assertIn('etag', self.app.headers[1])
self.assertEqual(self.app.headers[1]['etag'], 'is sent')
# For basic test cases, assert orig_req_method behavior
self.assertEqual(req.environ['swift.orig_req_method'], 'COPY')
def test_basic_DLO(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {
'x-object-manifest': 'some/path',
'etag': 'is not sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o-copy', swob.HTTPCreated, {})
req = Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(self.app.calls, [
('GET', '/v1/a/c/o'),
('PUT', '/v1/a/c/o-copy')])
self.assertNotIn('x-object-manifest', self.app.headers[1])
self.assertNotIn('etag', self.app.headers[1])
def test_basic_DLO_manifest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {
'x-object-manifest': 'some/path',
'etag': 'is sent'}, 'passed')
self.app.register('PUT', '/v1/a/c/o-copy', swob.HTTPCreated, {})
req = Request.blank(
'/v1/a/c/o?multipart-manifest=get', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(2, len(self.app.calls))
self.assertEqual('GET', self.app.calls[0][0])
get_path, qs = self.app.calls[0][1].split('?')
params = urllib.parse.parse_qs(qs)
self.assertDictEqual(
{'format': ['raw'], 'multipart-manifest': ['get']}, params)
self.assertEqual(get_path, '/v1/a/c/o')
self.assertEqual(self.app.calls[1], ('PUT', '/v1/a/c/o-copy'))
self.assertIn('x-object-manifest', self.app.headers[1])
self.assertEqual(self.app.headers[1]['x-object-manifest'], 'some/path')
self.assertIn('etag', self.app.headers[1])
self.assertEqual(self.app.headers[1]['etag'], 'is sent')
def test_COPY_source_metadata(self):
source_headers = {
'x-object-sysmeta-test1': 'copy me',
'x-object-meta-test2': 'copy me too',
'x-object-transient-sysmeta-test3': 'ditto',
'x-object-sysmeta-container-update-override-etag': 'etag val',
'x-object-sysmeta-container-update-override-size': 'size val',
'x-object-sysmeta-container-update-override-foo': 'bar',
'x-delete-at': 'delete-at-time'}
get_resp_headers = source_headers.copy()
get_resp_headers['etag'] = 'source etag'
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
headers=get_resp_headers, body='passed')
def verify_headers(expected_headers, unexpected_headers,
actual_headers):
for k, v in actual_headers:
if k.lower() in expected_headers:
expected_val = expected_headers.pop(k.lower())
self.assertEqual(expected_val, v)
self.assertNotIn(k.lower(), unexpected_headers)
self.assertFalse(expected_headers)
# use a COPY request
self.app.register('PUT', '/v1/a/c/o-copy0', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy0'})
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('201 Created', status)
verify_headers(source_headers.copy(), [], resp_headers)
method, path, put_headers = self.app.calls_with_headers[-1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o-copy0', path)
verify_headers(source_headers.copy(), [], put_headers.items())
self.assertIn('etag', put_headers)
self.assertEqual(put_headers['etag'], 'source etag')
req = Request.blank('/v1/a/c/o-copy0', method='GET')
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('200 OK', status)
verify_headers(source_headers.copy(), [], resp_headers)
# use a COPY request with a Range header
self.app.register('PUT', '/v1/a/c/o-copy1', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy1',
'Range': 'bytes=1-2'})
status, resp_headers, body = self.call_ssc(req)
expected_headers = source_headers.copy()
unexpected_headers = (
'x-object-sysmeta-container-update-override-etag',
'x-object-sysmeta-container-update-override-size',
'x-object-sysmeta-container-update-override-foo')
for h in unexpected_headers:
expected_headers.pop(h)
self.assertEqual('201 Created', status)
verify_headers(expected_headers, unexpected_headers, resp_headers)
method, path, put_headers = self.app.calls_with_headers[-1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o-copy1', path)
verify_headers(
expected_headers, unexpected_headers, put_headers.items())
# etag should not be copied with a Range request
self.assertNotIn('etag', put_headers)
req = Request.blank('/v1/a/c/o-copy1', method='GET')
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('200 OK', status)
verify_headers(expected_headers, unexpected_headers, resp_headers)
# use a PUT with x-copy-from
self.app.register('PUT', '/v1/a/c/o-copy2', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o-copy2', method='PUT',
headers={'Content-Length': 0,
'X-Copy-From': 'c/o'})
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('201 Created', status)
verify_headers(source_headers.copy(), [], resp_headers)
method, path, put_headers = self.app.calls_with_headers[-1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o-copy2', path)
verify_headers(source_headers.copy(), [], put_headers.items())
self.assertIn('etag', put_headers)
self.assertEqual(put_headers['etag'], 'source etag')
req = Request.blank('/v1/a/c/o-copy2', method='GET')
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('200 OK', status)
verify_headers(source_headers.copy(), [], resp_headers)
# copy to same path as source
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='PUT',
headers={'Content-Length': 0,
'X-Copy-From': 'c/o'})
status, resp_headers, body = self.call_ssc(req)
self.assertEqual('201 Created', status)
verify_headers(source_headers.copy(), [], resp_headers)
method, path, put_headers = self.app.calls_with_headers[-1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
verify_headers(source_headers.copy(), [], put_headers.items())
self.assertIn('etag', put_headers)
self.assertEqual(put_headers['etag'], 'source etag')
def test_COPY_no_destination_header(self):
req = Request.blank(
'/v1/a/c/o', method='COPY', headers={'Content-Length': 0})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(len(self.authorized), 0)
def test_basic_COPY_account(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o2', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c1/o2',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('GET', method)
self.assertEqual('/v1/a/c/o', path)
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o2', path)
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o2', self.authorized[1].path)
def test_COPY_across_containers(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c2/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c2/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c2/o', self.authorized[1].path)
def test_COPY_source_with_slashes_in_name(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_source_with_slashes_in_name(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_destination_leading_slash(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_destination_leading_slash(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o', path)
self.assertTrue(('X-Copied-From', 'c/o') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_source_with_slashes_destination_leading_slash(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_source_with_slashes_destination_leading_slash(self):
self.app.register('GET', '/v1/a/c/o/o2', swob.HTTPOk, {}, 'passed')
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o/o2',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o', path)
self.assertTrue(('X-Copied-From', 'c/o/o2') in headers)
self.assertTrue(('X-Copied-From-Account', 'a') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o/o2', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_COPY_account_no_object_in_destination(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': 'c_o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_COPY_account_bad_destination_account(self):
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o',
'Destination-Account': '/i/am/bad'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '412 Precondition Failed')
def test_COPY_server_error_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '503 Service Unavailable')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_account_server_error_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPServiceUnavailable, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '503 Service Unavailable')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_not_found_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_account_not_found_reading_source(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPNotFound, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_with_metadata(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "passed")
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o',
'X-Object-Meta-Ours': 'okay'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c/o', path)
self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay')
self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_with_metadata(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "passed")
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'X-Object-Meta-Ours': 'okay',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a1/c1/o', path)
self.assertEqual(req_headers['X-Object-Meta-Ours'], 'okay')
self.assertTrue(('X-Object-Meta-Ours', 'okay') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_source_zero_content_length(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_source_larger_than_max_file_size(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "largebody")
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
with mock.patch('swift.common.middleware.copy.'
'MAX_FILE_SIZE', 1):
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_account_source_zero_content_length(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, None)
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_account_source_larger_than_max_file_size(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, "largebody")
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
with mock.patch('swift.common.middleware.copy.'
'MAX_FILE_SIZE', 1):
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '413 Request Entity Too Large')
self.assertEqual(len(self.authorized), 1)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def test_COPY_newest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'Last-Modified': '123'}, "passed")
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c/o'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From-Last-Modified', '123') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/c/o', self.authorized[1].path)
def test_COPY_account_newest(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'Last-Modified': '123'}, "passed")
self.app.register('PUT', '/v1/a1/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'COPY'},
headers={'Destination': '/c1/o',
'Destination-Account': 'a1'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
self.assertTrue(('X-Copied-From-Last-Modified', '123') in headers)
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a1/c1/o', self.authorized[1].path)
def test_COPY_in_OPTIONS_response(self):
self.app.register('OPTIONS', '/v1/a/c/o', swob.HTTPOk,
{'Allow': 'GET, PUT'})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'OPTIONS'}, headers={})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '200 OK')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('OPTIONS', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('Allow', 'GET, PUT, COPY') in headers)
self.assertEqual(len(self.authorized), 1)
self.assertEqual('OPTIONS', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
# For basic test cases, assert orig_req_method behavior
self.assertNotIn('swift.orig_req_method', req.environ)
def test_COPY_in_OPTIONS_response_CORS(self):
self.app.register('OPTIONS', '/v1/a/c/o', swob.HTTPOk,
{'Allow': 'GET, PUT',
'Access-Control-Allow-Methods': 'GET, PUT'})
req = Request.blank('/v1/a/c/o',
environ={'REQUEST_METHOD': 'OPTIONS'}, headers={})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '200 OK')
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('OPTIONS', method)
self.assertEqual('/v1/a/c/o', path)
self.assertTrue(('Allow', 'GET, PUT, COPY') in headers)
self.assertTrue(('Access-Control-Allow-Methods',
'GET, PUT, COPY') in headers)
self.assertEqual(len(self.authorized), 1)
self.assertEqual('OPTIONS', self.authorized[0].method)
self.assertEqual('/v1/a/c/o', self.authorized[0].path)
def _test_COPY_source_headers(self, extra_put_headers):
# helper method to perform a COPY with some metadata headers that
# should always be sent to the destination
put_headers = {'Destination': '/c1/o',
'X-Object-Meta-Test2': 'added',
'X-Object-Sysmeta-Test2': 'added',
'X-Object-Transient-Sysmeta-Test2': 'added'}
put_headers.update(extra_put_headers)
get_resp_headers = {
'X-Timestamp': '1234567890.12345',
'X-Backend-Timestamp': '1234567890.12345',
'Content-Type': 'text/original',
'Content-Encoding': 'gzip',
'Content-Disposition': 'attachment; filename=myfile',
'X-Object-Meta-Test': 'original',
'X-Object-Sysmeta-Test': 'original',
'X-Object-Transient-Sysmeta-Test': 'original',
'X-Foo': 'Bar'}
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk, headers=get_resp_headers)
self.app.register('PUT', '/v1/a/c1/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='COPY', headers=put_headers)
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
self.assertEqual(2, len(calls))
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
# these headers should always be applied to the destination
self.assertEqual('added', req_headers.get('X-Object-Meta-Test2'))
self.assertEqual('added', req_headers.get('X-Object-Sysmeta-Test2'))
self.assertEqual('added',
req_headers.get('X-Object-Transient-Sysmeta-Test2'))
return req_headers
def test_COPY_source_headers_no_updates(self):
# copy should preserve existing metadata if not updated
req_headers = self._test_COPY_source_headers({})
self.assertEqual('text/original', req_headers.get('Content-Type'))
self.assertEqual('gzip', req_headers.get('Content-Encoding'))
self.assertEqual('attachment; filename=myfile',
req_headers.get('Content-Disposition'))
self.assertEqual('original', req_headers.get('X-Object-Meta-Test'))
self.assertEqual('original', req_headers.get('X-Object-Sysmeta-Test'))
self.assertEqual('original',
req_headers.get('X-Object-Transient-Sysmeta-Test'))
self.assertEqual('Bar', req_headers.get('X-Foo'))
self.assertNotIn('X-Timestamp', req_headers)
self.assertNotIn('X-Backend-Timestamp', req_headers)
def test_COPY_source_headers_with_updates(self):
# copy should apply any updated values to existing metadata
put_headers = {
'Content-Type': 'text/not_original',
'Content-Encoding': 'not_gzip',
'Content-Disposition': 'attachment; filename=notmyfile',
'X-Object-Meta-Test': 'not_original',
'X-Object-Sysmeta-Test': 'not_original',
'X-Object-Transient-Sysmeta-Test': 'not_original',
'X-Foo': 'Not Bar'}
req_headers = self._test_COPY_source_headers(put_headers)
self.assertEqual('text/not_original', req_headers.get('Content-Type'))
self.assertEqual('not_gzip', req_headers.get('Content-Encoding'))
self.assertEqual('attachment; filename=notmyfile',
req_headers.get('Content-Disposition'))
self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Sysmeta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Transient-Sysmeta-Test'))
self.assertEqual('Not Bar', req_headers.get('X-Foo'))
self.assertNotIn('X-Timestamp', req_headers)
self.assertNotIn('X-Backend-Timestamp', req_headers)
def test_COPY_x_fresh_metadata_no_updates(self):
# existing user metadata should not be copied, sysmeta is copied
put_headers = {
'X-Fresh-Metadata': 'true',
'X-Extra': 'Fresh'}
req_headers = self._test_COPY_source_headers(put_headers)
self.assertEqual('text/original', req_headers.get('Content-Type'))
self.assertEqual('Fresh', req_headers.get('X-Extra'))
self.assertEqual('original',
req_headers.get('X-Object-Sysmeta-Test'))
self.assertIn('X-Fresh-Metadata', req_headers)
self.assertNotIn('X-Object-Meta-Test', req_headers)
self.assertNotIn('X-Object-Transient-Sysmeta-Test', req_headers)
self.assertNotIn('X-Timestamp', req_headers)
self.assertNotIn('X-Backend-Timestamp', req_headers)
self.assertNotIn('Content-Encoding', req_headers)
self.assertNotIn('Content-Disposition', req_headers)
self.assertNotIn('X-Foo', req_headers)
def test_COPY_x_fresh_metadata_with_updates(self):
# existing user metadata should not be copied, new metadata replaces it
put_headers = {
'X-Fresh-Metadata': 'true',
'Content-Type': 'text/not_original',
'Content-Encoding': 'not_gzip',
'Content-Disposition': 'attachment; filename=notmyfile',
'X-Object-Meta-Test': 'not_original',
'X-Object-Sysmeta-Test': 'not_original',
'X-Object-Transient-Sysmeta-Test': 'not_original',
'X-Foo': 'Not Bar',
'X-Extra': 'Fresh'}
req_headers = self._test_COPY_source_headers(put_headers)
self.assertEqual('Fresh', req_headers.get('X-Extra'))
self.assertEqual('text/not_original', req_headers.get('Content-Type'))
self.assertEqual('not_gzip', req_headers.get('Content-Encoding'))
self.assertEqual('attachment; filename=notmyfile',
req_headers.get('Content-Disposition'))
self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Sysmeta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Transient-Sysmeta-Test'))
self.assertEqual('Not Bar', req_headers.get('X-Foo'))
def _test_POST_source_headers(self, extra_post_headers):
# helper method to perform a POST with metadata headers that should
# always be sent to the destination
post_headers = {'X-Object-Meta-Test2': 'added',
'X-Object-Sysmeta-Test2': 'added',
'X-Object-Transient-Sysmeta-Test2': 'added'}
post_headers.update(extra_post_headers)
get_resp_headers = {
'X-Timestamp': '1234567890.12345',
'X-Backend-Timestamp': '1234567890.12345',
'Content-Type': 'text/original',
'Content-Encoding': 'gzip',
'Content-Disposition': 'attachment; filename=myfile',
'X-Object-Meta-Test': 'original',
'X-Object-Sysmeta-Test': 'original',
'X-Object-Transient-Sysmeta-Test': 'original',
'X-Foo': 'Bar'}
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk, headers=get_resp_headers)
self.app.register('PUT', '/v1/a/c/o', swob.HTTPCreated, {})
req = Request.blank('/v1/a/c/o', method='POST', headers=post_headers)
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '202 Accepted')
calls = self.app.calls_with_headers
self.assertEqual(2, len(calls))
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
# these headers should always be applied to the destination
self.assertEqual('added', req_headers.get('X-Object-Meta-Test2'))
self.assertEqual('added',
req_headers.get('X-Object-Transient-Sysmeta-Test2'))
# POSTed sysmeta should never be applied to the destination
self.assertNotIn('X-Object-Sysmeta-Test2', req_headers)
# existing sysmeta should always be preserved
self.assertEqual('original',
req_headers.get('X-Object-Sysmeta-Test'))
return req_headers
def test_POST_no_updates(self):
post_headers = {}
req_headers = self._test_POST_source_headers(post_headers)
self.assertEqual('text/original', req_headers.get('Content-Type'))
self.assertNotIn('X-Object-Meta-Test', req_headers)
self.assertNotIn('X-Object-Transient-Sysmeta-Test', req_headers)
self.assertNotIn('X-Timestamp', req_headers)
self.assertNotIn('X-Backend-Timestamp', req_headers)
self.assertNotIn('Content-Encoding', req_headers)
self.assertNotIn('Content-Disposition', req_headers)
self.assertNotIn('X-Foo', req_headers)
def test_POST_with_updates(self):
post_headers = {
'Content-Type': 'text/not_original',
'Content-Encoding': 'not_gzip',
'Content-Disposition': 'attachment; filename=notmyfile',
'X-Object-Meta-Test': 'not_original',
'X-Object-Sysmeta-Test': 'not_original',
'X-Object-Transient-Sysmeta-Test': 'not_original',
'X-Foo': 'Not Bar',
}
req_headers = self._test_POST_source_headers(post_headers)
self.assertEqual('text/not_original', req_headers.get('Content-Type'))
self.assertEqual('not_gzip', req_headers.get('Content-Encoding'))
self.assertEqual('attachment; filename=notmyfile',
req_headers.get('Content-Disposition'))
self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Transient-Sysmeta-Test'))
self.assertEqual('Not Bar', req_headers.get('X-Foo'))
def test_POST_x_fresh_metadata_with_updates(self):
# post-as-copy trumps x-fresh-metadata i.e. existing user metadata
# should not be copied, sysmeta is copied *and not updated with new*
post_headers = {
'X-Fresh-Metadata': 'true',
'Content-Type': 'text/not_original',
'Content-Encoding': 'not_gzip',
'Content-Disposition': 'attachment; filename=notmyfile',
'X-Object-Meta-Test': 'not_original',
'X-Object-Sysmeta-Test': 'not_original',
'X-Object-Transient-Sysmeta-Test': 'not_original',
'X-Foo': 'Not Bar',
}
req_headers = self._test_POST_source_headers(post_headers)
self.assertEqual('text/not_original', req_headers.get('Content-Type'))
self.assertEqual('not_gzip', req_headers.get('Content-Encoding'))
self.assertEqual('attachment; filename=notmyfile',
req_headers.get('Content-Disposition'))
self.assertEqual('not_original', req_headers.get('X-Object-Meta-Test'))
self.assertEqual('not_original',
req_headers.get('X-Object-Transient-Sysmeta-Test'))
self.assertEqual('Not Bar', req_headers.get('X-Foo'))
self.assertIn('X-Fresh-Metadata', req_headers)
def test_COPY_with_single_range(self):
# verify that source etag is not copied when copying a range
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'etag': 'bogus etag'}, "abcdefghijklmnop")
self.app.register('PUT', '/v1/a/c1/o', swob.HTTPCreated, {})
req = swob.Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Destination': 'c1/o',
'Range': 'bytes=5-10'})
status, headers, body = self.call_ssc(req)
self.assertEqual(status, '201 Created')
calls = self.app.calls_with_headers
self.assertEqual(2, len(calls))
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c1/o', path)
self.assertNotIn('etag', (h.lower() for h in req_headers))
self.assertEqual('6', req_headers['content-length'])
req = swob.Request.blank('/v1/a/c1/o', method='GET')
status, headers, body = self.call_ssc(req)
self.assertEqual('fghijk', body)
class TestServerSideCopyConfiguration(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_post_as_copy_defaults_to_false(self):
ssc = copy.filter_factory({})("no app here")
self.assertEqual(ssc.object_post_as_copy, False)
def test_reading_proxy_conf_when_no_middleware_conf_present(self):
proxy_conf = dedent("""
[DEFAULT]
bind_ip = 10.4.5.6
[pipeline:main]
pipeline = catch_errors copy ye-olde-proxy-server
[filter:copy]
use = egg:swift#copy
[app:ye-olde-proxy-server]
use = egg:swift#proxy
object_post_as_copy = no
""")
conffile = tempfile.NamedTemporaryFile()
conffile.write(proxy_conf)
conffile.flush()
ssc = copy.filter_factory({
'__file__': conffile.name
})("no app here")
self.assertEqual(ssc.object_post_as_copy, False)
def test_middleware_conf_precedence(self):
proxy_conf = dedent("""
[DEFAULT]
bind_ip = 10.4.5.6
[pipeline:main]
pipeline = catch_errors copy ye-olde-proxy-server
[filter:copy]
use = egg:swift#copy
object_post_as_copy = no
[app:ye-olde-proxy-server]
use = egg:swift#proxy
object_post_as_copy = yes
""")
conffile = tempfile.NamedTemporaryFile()
conffile.write(proxy_conf)
conffile.flush()
with mock.patch('swift.common.middleware.copy.get_logger',
return_value=debug_logger('copy')):
ssc = copy.filter_factory({
'object_post_as_copy': 'no',
'__file__': conffile.name
})("no app here")
self.assertEqual(ssc.object_post_as_copy, False)
self.assertFalse(ssc.logger.get_lines_for_level('warning'))
def _test_post_as_copy_emits_warning(self, conf):
with mock.patch('swift.common.middleware.copy.get_logger',
return_value=debug_logger('copy')):
ssc = copy.filter_factory(conf)("no app here")
self.assertEqual(ssc.object_post_as_copy, True)
log_lines = ssc.logger.get_lines_for_level('warning')
self.assertEqual(1, len(log_lines))
self.assertIn('object_post_as_copy=true is deprecated', log_lines[0])
def test_post_as_copy_emits_warning(self):
self._test_post_as_copy_emits_warning({'object_post_as_copy': 'yes'})
proxy_conf = dedent("""
[DEFAULT]
bind_ip = 10.4.5.6
[pipeline:main]
pipeline = catch_errors copy ye-olde-proxy-server
[filter:copy]
use = egg:swift#copy
[app:ye-olde-proxy-server]
use = egg:swift#proxy
object_post_as_copy = yes
""")
conffile = tempfile.NamedTemporaryFile()
conffile.write(proxy_conf)
conffile.flush()
self._test_post_as_copy_emits_warning({'__file__': conffile.name})
@patch_policies(with_ec_default=True)
class TestServerSideCopyMiddlewareWithEC(unittest.TestCase):
container_info = {
'status': 200,
'write_acl': None,
'read_acl': None,
'storage_policy': None,
'sync_key': None,
'versions': None,
}
def setUp(self):
self.logger = debug_logger('proxy-server')
self.logger.thread_locals = ('txn1', '127.0.0.2')
self.app = PatchedObjControllerApp(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing(), logger=self.logger)
self.ssc = copy.filter_factory({
'object_post_as_copy': 'yes',
})(self.app)
self.ssc.logger = self.app.logger
self.policy = POLICIES.default
self.app.container_info = dict(self.container_info)
def test_COPY_with_single_range(self):
req = swob.Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Destination': 'c1/o',
'Range': 'bytes=5-10'})
# turn a real body into fragments
segment_size = self.policy.ec_segment_size
real_body = ('asdf' * segment_size)[:-10]
# split it up into chunks
chunks = [real_body[x:x + segment_size]
for x in range(0, len(real_body), segment_size)]
# we need only first chunk to rebuild 5-10 range
fragments = self.policy.pyeclib_driver.encode(chunks[0])
fragment_payloads = []
fragment_payloads.append(fragments)
node_fragments = zip(*fragment_payloads)
self.assertEqual(len(node_fragments),
self.policy.object_ring.replicas) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))}
responses = [(200, ''.join(node_fragments[i]), headers)
for i in range(POLICIES.default.ec_ndata)]
responses += [(201, '', {})] * self.policy.object_ring.replicas
status_codes, body_iter, headers = zip(*responses)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
put_hdrs = []
def capture_conn(host, port, dev, part, method, path, *args, **kwargs):
if method == 'PUT':
put_hdrs.append(args[0])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers, expect_headers=expect_headers,
give_connect=capture_conn):
resp = req.get_response(self.ssc)
self.assertEqual(resp.status_int, 201)
expected_puts = POLICIES.default.ec_ndata + POLICIES.default.ec_nparity
self.assertEqual(expected_puts, len(put_hdrs))
for hdrs in put_hdrs:
# etag should not be copied from source
self.assertNotIn('etag', (h.lower() for h in hdrs))
def test_COPY_with_invalid_ranges(self):
# real body size is segment_size - 10 (just 1 segment)
segment_size = self.policy.ec_segment_size
real_body = ('a' * segment_size)[:-10]
# range is out of real body but in segment size
self._test_invalid_ranges('COPY', real_body,
segment_size, '%s-' % (segment_size - 10))
# range is out of both real body and segment size
self._test_invalid_ranges('COPY', real_body,
segment_size, '%s-' % (segment_size + 10))
def _test_invalid_ranges(self, method, real_body, segment_size, req_range):
# make a request with range starts from more than real size.
body_etag = md5(real_body).hexdigest()
req = swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'Destination': 'c1/o',
'Range': 'bytes=%s' % (req_range)})
fragments = self.policy.pyeclib_driver.encode(real_body)
fragment_payloads = [fragments]
node_fragments = zip(*fragment_payloads)
self.assertEqual(len(node_fragments),
self.policy.object_ring.replicas) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body)),
'X-Object-Sysmeta-Ec-Etag': body_etag}
start = int(req_range.split('-')[0])
self.assertTrue(start >= 0) # sanity
title, exp = swob.RESPONSE_REASONS[416]
range_not_satisfiable_body = \
'<html><h1>%s</h1><p>%s</p></html>' % (title, exp)
if start >= segment_size:
responses = [(416, range_not_satisfiable_body, headers)
for i in range(POLICIES.default.ec_ndata)]
else:
responses = [(200, ''.join(node_fragments[i]), headers)
for i in range(POLICIES.default.ec_ndata)]
status_codes, body_iter, headers = zip(*responses)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
# TODO possibly use FakeApp here
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers, expect_headers=expect_headers):
resp = req.get_response(self.ssc)
self.assertEqual(resp.status_int, 416)
self.assertEqual(resp.content_length, len(range_not_satisfiable_body))
self.assertEqual(resp.body, range_not_satisfiable_body)
self.assertEqual(resp.etag, body_etag)
self.assertEqual(resp.headers['Accept-Ranges'], 'bytes')
| {
"content_hash": "9669d7d2c6c2e276b32071be4bd05014",
"timestamp": "",
"source": "github",
"line_count": 1680,
"max_line_length": 79,
"avg_line_length": 48.963095238095235,
"alnum_prop": 0.5686863283814341,
"repo_name": "clayg/swift",
"id": "bbf74bbc1bad34125f83f679a6eea890b5f3cfbd",
"size": "82870",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/unit/common/middleware/test_copy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "248"
},
{
"name": "PHP",
"bytes": "377"
},
{
"name": "Python",
"bytes": "8555598"
},
{
"name": "Shell",
"bytes": "1804"
}
],
"symlink_target": ""
} |
from random import random
from tensorforce.environments import Environment
import numpy as np
import random
BOARD_HEIGHT = 6
BOARD_WIDTH = 6
BOARD_SIZE = BOARD_HEIGHT * BOARD_WIDTH
PLANE_SIZE = 8
def init_board():
hidden_board = np.zeros((BOARD_HEIGHT, BOARD_WIDTH))
# Populate the plane's position
# First figure out the plane's orientation
# 0: heading right
# 1: heading up
# 2: heading left
# 3: heading down
plane_orientation = random.randint(0, 3)
# Figrue out plane core's position as the '*' below
# | | |
# -*- |-*-
# | | |
# ---
if plane_orientation == 0:
plane_core_row = random.randint(1, BOARD_HEIGHT - 2)
plane_core_column = random.randint(2, BOARD_WIDTH - 2)
# Populate the tail
hidden_board[plane_core_row][plane_core_column - 2] = 1
hidden_board[plane_core_row - 1][plane_core_column - 2] = 1
hidden_board[plane_core_row + 1][plane_core_column - 2] = 1
elif plane_orientation == 1:
plane_core_row = random.randint(1, BOARD_HEIGHT - 3)
plane_core_column = random.randint(1, BOARD_WIDTH - 3)
# Populate the tail
hidden_board[plane_core_row + 2][plane_core_column] = 1
hidden_board[plane_core_row + 2][plane_core_column + 1] = 1
hidden_board[plane_core_row + 2][plane_core_column - 1] = 1
elif plane_orientation == 2:
plane_core_row = random.randint(1, BOARD_HEIGHT - 2)
plane_core_column = random.randint(1, BOARD_WIDTH - 3)
# Populate the tail
hidden_board[plane_core_row][plane_core_column + 2] = 1
hidden_board[plane_core_row - 1][plane_core_column + 2] = 1
hidden_board[plane_core_row + 1][plane_core_column + 2] = 1
elif plane_orientation == 3:
plane_core_row = random.randint(2, BOARD_HEIGHT - 2)
plane_core_column = random.randint(1, BOARD_WIDTH - 2)
# Populate the tail
hidden_board[plane_core_row - 2][plane_core_column] = 1
hidden_board[plane_core_row - 2][plane_core_column + 1] = 1
hidden_board[plane_core_row - 2][plane_core_column - 1] = 1
# Populate the cross
hidden_board[plane_core_row][plane_core_column] = 1
hidden_board[plane_core_row + 1][plane_core_column] = 1
hidden_board[plane_core_row - 1][plane_core_column] = 1
hidden_board[plane_core_row][plane_core_column + 1] = 1
hidden_board[plane_core_row][plane_core_column - 1] = 1
return hidden_board.reshape(BOARD_SIZE, )
class PlaneStrike(Environment):
def __init__(self):
self.hidden_state = init_board()
def __str__(self):
return 'PlaneStrike'
def close(self):
pass
def reset(self):
self.state = np.zeros(N)
self.count = 0
return self.state
def execute(self, action):
if self.state[action] == 1 and self.state[action] == -1:
reward = -1
else:
if self.hidden_state[action] == 1:
self.state[action] = 1
self.count = self.count + 1
reward = 1
else:
self.state[action] = -1
reward = -1
terminal = (self.count == PLANE_SIZE)
return self.state, reward, terminal
@property
def states(self):
return dict(shape=(BOARD_SIZE,), type='float')
@property
def actions(self):
return dict(continuous=False, num_actions=BOARD_SIZE)
| {
"content_hash": "271a2664e384f485294b361e4bbee3f8",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 67,
"avg_line_length": 33.77884615384615,
"alnum_prop": 0.5881013378878451,
"repo_name": "windmaple/planestrike-tensorforce",
"id": "e745588115932902deadf96b05bec6a50e86468a",
"size": "3513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "planestrike_env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7194"
}
],
"symlink_target": ""
} |
from dataviva import db
from dataviva.utils.auto_serialize import AutoSerialize
from dataviva.api.attrs.models import Wld, Hs, Bra
from sqlalchemy.ext.declarative import declared_attr
class BaseSecex(db.Model, AutoSerialize):
__abstract__ = True
year = db.Column(db.Integer(4), primary_key=True)
month = db.Column(db.Integer(2), primary_key=True)
import_val = db.Column(db.Numeric(16,2))
export_val = db.Column(db.Numeric(16,2))
import_kg = db.Column(db.Integer(11))
export_kg = db.Column(db.Integer(11))
import_val_growth = db.Column(db.Float())
import_val_growth_5 = db.Column(db.Float())
export_val_growth = db.Column(db.Float())
export_val_growth_5 = db.Column(db.Float())
class BraDiversity(object):
bra_diversity = db.Column(db.Integer(11))
bra_diversity_eff = db.Column(db.Float())
class WldDiversity(object):
wld_diversity = db.Column(db.Integer(11))
wld_diversity_eff = db.Column(db.Float())
class HsDiversity(object):
hs_diversity = db.Column(db.Integer(11))
hs_diversity_eff = db.Column(db.Float())
class HsId(object):
hs_id_len = db.Column(db.Integer(1))
@declared_attr
def hs_id(cls):
return db.Column(db.String(6), db.ForeignKey(Hs.id), primary_key=True)
class WldId(object):
wld_id_len = db.Column(db.Integer(1))
@declared_attr
def wld_id(cls):
return db.Column(db.String(5),db.ForeignKey(Wld.id), primary_key=True)
class BraId(object):
bra_id_len = db.Column(db.Integer(1))
@declared_attr
def bra_id(cls):
return db.Column(db.String(9), db.ForeignKey(Bra.id), primary_key=True)
class Distance(object):
distance = db.Column(db.Float())
distance_wld = db.Column(db.Float())
class OppGain(object):
opp_gain = db.Column(db.Float())
opp_gain_wld = db.Column(db.Float())
class Rca(object):
rca = db.Column(db.Float())
class Rca_wld(object):
rca_wld = db.Column(db.Float())
class Eci(object):
eci = db.Column(db.Float())
class Rcd(object):
rcd = db.Column(db.Float())
| {
"content_hash": "ad3de5c9fc2e33354682f897131e7b85",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 28.164383561643834,
"alnum_prop": 0.6697470817120622,
"repo_name": "DataViva/dataviva-site",
"id": "1da4ad2e77709ffe3475224a368245c77d10d903",
"size": "2056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dataviva/api/secex/abstract_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "278402"
},
{
"name": "HTML",
"bytes": "870921"
},
{
"name": "JavaScript",
"bytes": "604412"
},
{
"name": "Python",
"bytes": "653742"
},
{
"name": "Shell",
"bytes": "879"
},
{
"name": "TSQL",
"bytes": "168883"
},
{
"name": "Vue",
"bytes": "33089"
}
],
"symlink_target": ""
} |
import re
from bs4 import BeautifulSoup
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
import json
class BaseURLSpider(CrawlSpider):
'''
This class is responsible for crawling globe and mail articles urls
'''
name = 'BaseURL'
allowed_domains = ["www.theglobeandmail.com"]
# seed urls
url_path = "../../Sample_Resources/Online_Resources/sample_seed_urls.txt"
start_urls = [line.strip() for line in open(url_path).readlines()]
# Rules for including and excluding urls
rules = (
Rule(LinkExtractor(allow=r'\/opinion\/.*\/article\d+\/$'), callback="parse_articles"),
)
def __init__(self, **kwargs):
'''
Read user arguments and initialize variables
:param kwargs: command line input
:return: None
'''
CrawlSpider.__init__(self)
self.headers = ({'User-Agent': 'Mozilla/5.0',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest'})
self.output_path = "../../Sample_Resources/Online_Resources/sample_base_urls.txt"
self.ids_seen = set()
def parse_articles(self, response):
'''
Crawl more urls and keep original urls that start with "http://www.theglobeandmail.com/opinion/"
:param response: url response
:return: None. Print the urls into base urls
'''
article_ptn = "http://www.theglobeandmail.com/opinion/(.*?)/article(\d+)/"
resp_url = response.url
article_m = re.match(article_ptn, resp_url)
article_id = ''
if article_m != None:
article_id = article_m.group(2)
if article_id not in self.ids_seen:
self.ids_seen.add(article_id)
soup = BeautifulSoup(response.text, 'html.parser')
content = soup.find('div', {"class":"column-2 gridcol"})
if content != None:
text = content.findAll('p', {"class":''})
if len(text) > 0:
print('*****Article ID*****', article_id)
print('***Article URL***', resp_url)
with open(self.output_path, 'a') as out:
out.write(resp_url+"\n") | {
"content_hash": "fae184d037f435ab572c9bae339c64aa",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 104,
"avg_line_length": 38.193548387096776,
"alnum_prop": 0.5612331081081081,
"repo_name": "sfu-discourse-lab/SFU_Comment_Extractor",
"id": "5bb539aa85142f478dc6b6b5b37578a7e79f575d",
"size": "2368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Source_Code/Online_Source/article_base_url_spider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13777011"
},
{
"name": "Python",
"bytes": "134422"
},
{
"name": "Shell",
"bytes": "326"
}
],
"symlink_target": ""
} |
'''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
Author(s):
George Paulos
RackHD Functional Integration Test (FIT) library
This is the main common function library for RackHD FIT tests.
'''
# Standard imports
import fit_path # NOQA: unused import
import os
import sys
import json
import subprocess
import time
import datetime
import unittest
import signal
import re
import requests
import pexpect
import shutil
import inspect
import nose
import argparse
from mkcfg import mkcfg
VERBOSITY = 1
TEST_PATH = None
CONFIG_PATH = None
API_PORT = "None"
API_PROTOCOL = "None"
AUTH_TOKEN = "None"
REDFISH_TOKEN = "None"
BMC_LIST = []
def fitcfg():
"""
returns the configuration dictionary
:return: dictionary
"""
return mkcfg().get()
def fitrackhd():
"""
returns the ['rackhd-config'] dictionary
:return: dictionary or None
"""
return fitcfg().get('rackhd-config', None)
def fitargs():
"""
returns the ['cmd-args-list'] dictionary
:return: dictionary or None
"""
return fitcfg().get('cmd-args-list', None)
def fitcreds():
"""
returns the ['credentials'] dictionary
:return: dictionary or None
"""
return fitcfg().get('credentials', None)
def fitinstall():
"""
returns the ['install-config']['install'] dictionary
:return: dictionary or None
"""
if 'install-config' not in fitcfg():
return None
return fitcfg()['install-config'].get('install', None)
def fitports():
"""
returns the ['install-config']['ports'] dictionary
:return: dictionary or None
"""
if 'install-config' not in fitcfg():
return None
return fitcfg()['install-config'].get('ports', None)
def fitcit():
"""
returns the ['cit-config'] dictionary
:return: dictionary or None
"""
return fitcfg().get('cit-config', None)
def fitglobals():
"""
returns the ['install-config']['global'] dictionary
:return: dictionary or None
"""
return fitcfg().get('globals', None)
def fitproxy():
"""
returns the ['install-config']['proxy'] dictionary
:return: dictionary or None
"""
if 'install-config' not in fitcfg():
return None
return fitcfg()['install-config'].get('proxy', None)
def fitskupack():
if 'install-config' not in fitcfg():
return None
return fitcfg()['install-config'].get('skupack', None)
def compose_config(use_sysargs=False):
"""
creates a configuration based on
:param use_sysargs: set to true if sys.argv is to be processed.
:return: None
"""
# create configuration object
cfg_obj = mkcfg()
if cfg_obj.config_is_loaded():
# a previously generated configuration has been loaded
# restore previously setup globals
update_globals()
else:
# create new configuration
# * add cmd-args-list section
# * add the default config json file composition.
# * add stack overlay
# * save off environment
# * generate a few globals
# * save (generate) the configuration to a file
args_list = {}
if use_sysargs:
# Args from command line, pass -config option to create
args_list['cmd-args-list'] = mkargs()
config = args_list['cmd-args-list']['config']
cfg_obj.create(config)
else:
# Args from default set
no_args = {}
args_list['cmd-args-list'] = mkargs(no_args)
cfg_obj.create()
# add the 'cmd-args-list' section
cfg_obj.add_from_dict(args_list)
if fitargs()['config'] != 'config':
print "*** Using config file path:", fitcfg()['cmd-args-list']['config']
if cfg_obj.get_path() is None:
default_composition = ['rackhd_default.json',
'credentials_default.json',
'install_default.json',
'cit_default.json']
# config file composition
cfg_obj.add_from_file_list(default_composition)
# stack overlay configuration
apply_stack_config()
# add significant environment variables
cfg_obj.add_from_dict({
'env': {
'HOME': os.environ['HOME'],
'PATH': os.environ['PATH']
}
})
add_globals()
# generate the configuration file
cfg_obj.generate()
print "*** Using config file: {0}".format(cfg_obj.get_path())
def apply_stack_config():
"""
does the necessary stack configuration changes
:return: None
"""
stack = fitargs()['stack']
if stack is not None:
mkcfg().add_from_file('stack_config.json', stack)
if 'rackhd_host' in fitcfg():
fitargs()['rackhd_host'] = fitcfg()['rackhd_host']
else:
fitargs()['rackhd_host'] = 'localhost'
if 'bmc' in fitcfg():
fitargs()['bmc'] = fitcfg()['bmc']
if 'hyper' in fitcfg():
fitargs()['hyper'] = fitcfg()['hyper']
if 'ucs_ip' in fitcfg():
fitargs()['ucs_ip'] = fitcfg()['ucs_ip']
if 'ucs_port' in fitcfg():
fitargs()['ucs_port'] = fitcfg()['ucs_port']
def add_globals():
"""
create a handlful of global shortcuts
:return:
"""
global TEST_PATH
global CONFIG_PATH
global API_PORT
global API_PROTOCOL
global VERBOSITY
# set api port and protocol from command line
if fitargs()['port'] != "None":
API_PORT = fitargs()['port']
if fitargs()['http'] == "True":
API_PROTOCOL = "http"
if API_PORT == "None":
API_PORT = fitports()['http']
if fitargs()['https'] == "True":
API_PROTOCOL = "https"
if API_PORT == "None":
API_PORT = fitports()['https']
if fitargs()['rackhd_host'] == "localhost":
if API_PROTOCOL == "None":
API_PROTOCOL = 'http'
if API_PORT == "None":
API_PORT = '8080'
# add globals section to base configuration
TEST_PATH = fit_path.fit_path_root + '/'
CONFIG_PATH = TEST_PATH + fitargs()['config'] + "/"
mkcfg().add_from_dict({
'globals': {
'API_PORT': API_PORT,
'API_PROTOCOL': API_PROTOCOL,
'TEST_PATH': TEST_PATH,
'CONFIG_PATH': CONFIG_PATH,
'VERBOSITY': fitargs()['v']
}
})
# set OVA template from command line
if fitargs()["template"] == "None":
fitargs()["template"] = fitcfg()['install-config']['template']
def update_globals():
global API_PORT
global API_PROTOCOL
global TEST_PATH
global CONFIG_PATH
global VERBOSITY
API_PORT = fitglobals()['API_PORT']
API_PROTOCOL = fitglobals()['API_PROTOCOL']
TEST_PATH = fitglobals()['TEST_PATH']
CONFIG_PATH = fitglobals()['CONFIG_PATH']
VERBOSITY = fitglobals()['VERBOSITY']
def mkargs(in_args=None):
"""
processes the command line options as passed in by in_args.
:param in_args: input arguments
:return: dictionary of processed arguments
"""
if in_args is None:
in_args = sys.argv[1:]
# command line argument parser returns cmd_args dict
arg_parser = argparse.ArgumentParser(
description="Command Help", add_help=False)
arg_parser.add_argument('-h', '--help', action='store_true', default=False,
help='show this help message and exit')
arg_parser.add_argument("-test", default="tests/",
help="test to execute, default: tests/")
arg_parser.add_argument("-config", default="config",
help="config file location, default: config")
arg_parser.add_argument("-group", default="all",
help="test group to execute: 'smoke', 'regression', 'extended', default: 'all'")
arg_parser.add_argument("-stack", default="vagrant",
help="stack label (test bed)")
arg_parser.add_argument("-rackhd_host", default="localhost",
help="RackHD appliance IP address or hostname, default: localhost")
arg_parser.add_argument("-template", default="None",
help="path or URL link to OVA template or RackHD OVA")
arg_parser.add_argument("-xunit", default="False", action="store_true",
help="generates xUnit XML report files")
arg_parser.add_argument("-numvms", default=1, type=int,
help="number of virtual machines for deployment on specified stack")
arg_parser.add_argument("-list", default="False", action="store_true",
help="generates test list only")
arg_parser.add_argument("-sku", default="all",
help="node SKU name, example: Quanta-T41, default=all")
group = arg_parser.add_mutually_exclusive_group(required=False)
group.add_argument("-obmmac", default="all",
help="node OBM MAC address, example:00:1e:67:b1:d5:64")
group.add_argument("-nodeid", default="None",
help="node identifier string of a discovered node, example: 56ddcf9a8eff16614e79ec74")
group2 = arg_parser.add_mutually_exclusive_group(required=False)
group2.add_argument("-http", default="False", action="store_true",
help="forces the tests to utilize the http API protocol")
group2.add_argument("-https", default="False", action="store_true",
help="forces the tests to utilize the https API protocol")
arg_parser.add_argument("-port", default="None",
help="API port number override, default from install_config.json")
arg_parser.add_argument("-v", default=4, type=int,
help="Verbosity level of console and log output (see -nose-help for more options), Built Ins: " +
"0: Minimal logging, "+
"1: Display ERROR and CRITICAL to console and to files, " +
"3: Display INFO to console and to files, " +
"4: (default) Display INFO to console, and DEBUG to files, " +
"5: Display infra.run and test.run DEBUG to both, " +
"6: Add display of test.data (rest calls and status) DEBUG to both, " +
"7: Add display of infra.data (ipmi, ssh) DEBUG to both, " +
"9: Display infra.* and test.* at DEBUG_9 (max output) ")
arg_parser.add_argument("-nose-help", default=False, action="store_true", dest="nose_help",
help="display help from underlying nosetests command, including additional log options")
# we want to grab the arguments we want, and pass the rest
# into the nosetest invocation.
parse_results, other_args = arg_parser.parse_known_args(in_args)
# if 'help' was set, handle it as best we can. We use argparse to
# display usage and arguments, and then give nose a shot at printing
# things out (if they set that option)
if parse_results.help:
arg_parser.print_help()
if parse_results.nose_help:
print
print "NOTE: below is the --help output from nosetests."
print
rcode = _run_nose_help()
else:
rcode = 0
sys.exit(rcode)
# And if they only did --nose-help
if parse_results.nose_help:
rcode = _run_nose_help()
sys.exit(rcode)
# Now handle mapping -v to infra-logging. Check stream-monitor/flogging/README.md
# for how loggers and handlers fit together.
if parse_results.v >= 9:
# Turn them all up to 11.
vargs = ['--sm-set-combo-level', 'console*', 'DEBUG_9']
elif parse_results.v >= 7:
# ends up turning everything up to DEBUG_5 (levels 5 + 6 + infra.data)
vargs = ['--sm-set-combo-level', 'console*', 'DEBUG_5']
elif parse_results.v >= 6:
# infra.run and test.* to DEBUG (level 5 + test.data)
vargs = ['--sm-set-combo-level', 'console*:(test.data|*.run)', 'DEBUG_5']
elif parse_results.v >= 5:
# infra and test.run to DEBUG
vargs = ['--sm-set-combo-level', 'console*:*.run', 'DEBUG_5']
elif parse_results.v >= 4:
# default
vargs = []
elif parse_results.v >= 3:
# dial BACK output to files to INFO_5
vargs = ['--sm-set-logger-level', '*', 'INFO_5']
elif parse_results.v >= 1:
# dial BACK output to everything to just ERROR, CRITICAL to console and logs
vargs = ['--sm-set-combo-level', '*', 'ERROR_5']
else:
# 0 and 1 currently try to squish ALL logging output.
vargs = ['--sm-set-combo-level', '*', 'CRITICAL_0']
other_args.extend(vargs)
# Put all the args we did not use and put them
# into the parse_results so they can be found
# by run_nose()
parse_results.unhandled_arguments = other_args
# parse arguments to cmd_args dict
cmd_args = vars(parse_results)
return cmd_args
def timestamp(): # return formatted current timestamp
return time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())
# This routine executes a sleep with countdown
def countdown(sleep_time, sleep_interval=1):
sys.stdout.write("Sleeping for " + str(sleep_time * sleep_interval) + " seconds.")
sys.stdout.flush()
for _ in range(0, sleep_time):
time.sleep(sleep_interval)
sys.stdout.write(".")
sys.stdout.flush()
print "Waking!"
return
def remote_shell(shell_cmd, expect_receive="", expect_send="", timeout=300,
address=None, user=None, password=None):
'''
Run ssh based shell command on a remote machine at fitargs()['rackhd_host']
:param shell_cmd: string based command
:param expect_receive:
:param expect_send:
:param timeout: in seconds
:param address: IP or hostname of remote host
:param user: username of remote host
:param password: password of remote host
:return: dict = {'stdout': str:ouput, 'exitcode': return code}
'''
if not address:
address = fitargs()['rackhd_host']
if not user:
user = fitcreds()['rackhd_host'][0]['username']
if not password:
password = fitcreds()['rackhd_host'][0]['password']
logfile_redirect = None
if VERBOSITY >= 4:
print "remote_shell: Host =", address
print "remote_shell: Command =", shell_cmd
if VERBOSITY >= 9:
print "remote_shell: STDOUT =\n"
logfile_redirect = sys.stdout
# if localhost just run the command local
if fitargs()['rackhd_host'] == 'localhost':
(command_output, exitstatus) = \
pexpect.run("sudo bash -c \"" + shell_cmd + "\"",
withexitstatus=1,
events={"assword": password + "\n"},
timeout=timeout, logfile=logfile_redirect)
return {'stdout': command_output, 'exitcode': exitstatus}
# this clears the ssh key from ~/.ssh/known_hosts
subprocess.call(["touch ~/.ssh/known_hosts;ssh-keygen -R " +
address + " -f ~/.ssh/known_hosts >/dev/null 2>&1"], shell=True)
shell_cmd.replace("'", "\\\'")
if expect_receive == "" or expect_send == "":
(command_output, exitstatus) = \
pexpect.run("ssh -q -o StrictHostKeyChecking=no -t " + user + "@" +
address + " sudo bash -c \\\"" + shell_cmd + "\\\"",
withexitstatus=1,
events={"assword": password + "\n"},
timeout=timeout, logfile=logfile_redirect)
else:
(command_output, exitstatus) = \
pexpect.run("ssh -q -o StrictHostKeyChecking=no -t " + user + "@" +
address + " sudo bash -c \\\"" + shell_cmd + "\\\"",
withexitstatus=1,
events={"assword": password + "\n",
expect_receive: expect_send + "\n"},
timeout=timeout, logfile=logfile_redirect)
if VERBOSITY >= 4:
print shell_cmd, "\nremote_shell: Exit Code =", exitstatus
return {'stdout': command_output, 'exitcode': exitstatus}
def scp_file_to_ora(src_file_name):
# legacy call
scp_file_to_host(src_file_name)
def scp_file_to_host(src_file_name):
'''
scp the given file over to the RackHD host and place it in the home directory.
:param src_file_name: name of file to copy over. May include path
:type src_file_name: basestring
:return: just name of file on target (no path)
:rtype: basestring
'''
logfile_redirect = file('/dev/null', 'w')
just_fname = os.path.basename(src_file_name)
# if localhost just copy to home dir
if fitargs()['rackhd_host'] == 'localhost':
remote_shell('cp ' + src_file_name + ' ~/' + src_file_name)
return src_file_name
scp_target = fitcreds()['rackhd_host'][0]['username'] + '@{0}:'.format(fitargs()['rackhd_host'])
cmd = 'scp -o StrictHostKeyChecking=no {0} {1}'.format(src_file_name, scp_target)
if VERBOSITY >= 4:
print "scp_file_to_host: '{0}'".format(cmd)
if VERBOSITY >= 9:
logfile_redirect = sys.stdout
(command_output, ecode) = pexpect.run(
cmd, withexitstatus=1,
events={'(?i)assword: ': fitcreds()['rackhd_host'][0]['password'] + '\n'},
logfile=logfile_redirect)
if VERBOSITY >= 4:
print "scp_file_to_host: Exit Code = {0}".format(ecode)
assert ecode == 0, \
'failed "{0}" because {1}. Output={2}'.format(cmd, ecode, command_output)
return just_fname
def get_auth_token():
# This is run once to get an auth token which is set to global AUTH_TOKEN and used for rest of session
global AUTH_TOKEN
global REDFISH_TOKEN
api_login = {"username": fitcreds()["api"][0]["admin_user"], "password": fitcreds()["api"][0]["admin_pass"]}
redfish_login = {"UserName": fitcreds()["api"][0]["admin_user"], "Password": fitcreds()["api"][0]["admin_pass"]}
try:
restful("https://" + fitargs()['rackhd_host'] + ":" + str(API_PORT) +
"/login", rest_action="post", rest_payload=api_login, rest_timeout=2)
except:
AUTH_TOKEN = "Unavailable"
return False
else:
api_data = restful("https://" + fitargs()['rackhd_host'] + ":" + str(API_PORT) +
"/login", rest_action="post", rest_payload=api_login, rest_timeout=2)
if api_data['status'] == 200:
AUTH_TOKEN = str(api_data['json']['token'])
redfish_data = restful("https://" + fitargs()['rackhd_host'] + ":" + str(API_PORT) +
"/redfish/v1/SessionService/Sessions",
rest_action="post", rest_payload=redfish_login, rest_timeout=2)
if 'x-auth-token' in redfish_data['headers']:
REDFISH_TOKEN = redfish_data['headers']['x-auth-token']
return True
else:
print "WARNING: Redfish API token not available."
else:
AUTH_TOKEN = "Unavailable"
return False
def rackhdapi(url_cmd, action='get', payload=[], timeout=None, headers={}):
'''
This routine will build URL for RackHD API, enable port, execute, and return data
Example: rackhdapi('/api/current/nodes') - simple 'get' command
Example: rackhdapi("/api/current/nodes/ID/dhcp/whitelist", action="post")
:param url_cmd: url command for monorail api
:param action: rest action (get/put/post/delete)
:param payload: rest payload
:param timeout: rest timeout
:param headers: rest_headers
:return: {'json':result_data.json(), 'text':result_data.text,
'status':result_data.status_code,
'headers':result_data.headers.get('content-type'),
'timeout':False}
'''
# Automatic protocol selection: unless protocol is specified, test protocols, save settings globally
global API_PROTOCOL
global API_PORT
if API_PROTOCOL == "None":
if API_PORT == "None":
API_PORT = str(fitports()['http'])
if restful("http://" + fitargs()['rackhd_host'] + ":" + str(API_PORT) + "/", rest_timeout=2)['status'] == 0:
API_PROTOCOL = 'https'
API_PORT = str(fitports()['https'])
else:
API_PROTOCOL = 'http'
API_PORT = str(fitports()['http'])
# Retrieve authentication token for the session
if AUTH_TOKEN == "None":
get_auth_token()
return restful(API_PROTOCOL + "://" + fitargs()['rackhd_host'] + ":" + str(API_PORT) + url_cmd,
rest_action=action, rest_payload=payload, rest_timeout=timeout, rest_headers=headers)
def restful(url_command, rest_action='get', rest_payload=[], rest_timeout=None, sslverify=False, rest_headers={}):
'''
This routine executes a rest API call to the host.
:param url_command: the full URL for the command
:param rest_action: what the restful do (get/post/put/delete)
:param rest_payload: payload for rest request
:param rest_headers: headers (JSON dict)
:param rest_timeout: timeout for rest request
:param sslverify: ssl Verify (True/False)
:return: {'json':result_data.json(), 'text':result_data.text,
'status':result_data.status_code,
'headers':result_data.headers,
'timeout':False}
'''
result_data = None
# print URL and action
if VERBOSITY >= 4:
print "restful: Action = ", rest_action, ", URL = ", url_command
# prepare payload for XML output
payload_print = []
try:
json.dumps(rest_payload)
except:
payload_print = []
else:
payload_print = json.dumps(rest_payload, sort_keys=True, indent=4,)
if len(payload_print) > 4096:
payload_print = payload_print[0:4096] + '\n...truncated...\n'
if VERBOSITY >= 7 and rest_payload != []:
print "restful: Payload =\n", payload_print
rest_headers.update({"Content-Type": "application/json"})
if VERBOSITY >= 5:
print "restful: Request Headers =", rest_headers, "\n"
# If AUTH_TOKEN is set, add to header
if AUTH_TOKEN != "None" and AUTH_TOKEN != "Unavailable" and "authorization" not in rest_headers:
rest_headers.update({"authorization": "JWT " + AUTH_TOKEN, "X-Auth-Token": REDFISH_TOKEN})
# Perform rest request
try:
if rest_action == "get":
result_data = requests.get(url_command,
timeout=rest_timeout,
verify=sslverify,
headers=rest_headers)
if rest_action == "delete":
result_data = requests.delete(url_command,
data=json.dumps(rest_payload),
timeout=rest_timeout,
verify=sslverify,
headers=rest_headers)
if rest_action == "put":
result_data = requests.put(url_command,
data=json.dumps(rest_payload),
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify,
)
if rest_action == "binary-put":
rest_headers.update({"Content-Type": "application/x-www-form-urlencoded"})
result_data = requests.put(url_command,
data=rest_payload,
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify,
)
if rest_action == "text-put":
rest_headers.update({"Content-Type": "text/plain"})
result_data = requests.put(url_command,
data=rest_payload,
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify,
)
if rest_action == "post":
result_data = requests.post(url_command,
data=json.dumps(rest_payload),
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify
)
if rest_action == "binary-post":
rest_headers.update({"Content-Type": "application/x-www-form-urlencoded"})
result_data = requests.post(url_command,
data=rest_payload,
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify
)
if rest_action == "text-post":
rest_headers.update({"Content-Type": "text/plain"})
result_data = requests.post(url_command,
data=rest_payload,
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify
)
if rest_action == "patch":
result_data = requests.patch(url_command,
data=json.dumps(rest_payload),
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify
)
except requests.exceptions.Timeout:
return {'json': {}, 'text': '',
'status': 0,
'headers': '',
'timeout': True}
try:
result_data.json()
except ValueError:
if VERBOSITY >= 9:
print "restful: TEXT =\n"
print result_data.text
if VERBOSITY >= 6:
print "restful: Response Headers =", result_data.headers, "\n"
if VERBOSITY >= 4:
print "restful: Status code =", result_data.status_code, "\n"
return {'json': {}, 'text': result_data.text, 'status': result_data.status_code,
'headers': result_data.headers,
'timeout': False}
else:
if VERBOSITY >= 9:
print "restful: JSON = \n"
print json.dumps(result_data.json(), sort_keys=True, indent=4)
if VERBOSITY >= 6:
print "restful: Response Headers =", result_data.headers, "\n"
if VERBOSITY >= 4:
print "restful: Status code =", result_data.status_code, "\n"
return {'json': result_data.json(), 'text': result_data.text,
'status': result_data.status_code,
'headers': result_data.headers,
'timeout': False}
# Get the list of BMC IP addresses that we can find
def get_bmc_ips():
idlist = [] # list of unique dcmi node IDs
# If we have already done this, use that list
if len(BMC_LIST) == 0:
ipscan = remote_shell('arp')['stdout'].split()
for ipaddr in ipscan:
if ipaddr[0:3] == "172" and remote_shell('ping -c 1 -w 5 ' + ipaddr)['exitcode'] == 0:
# iterate through all known IPMI users
for item in fitcreds()['bmc']:
# check BMC credentials
ipmicheck = remote_shell('ipmitool -I lanplus -H ' + ipaddr + ' -U ' + item['username'] +
' -P ' + item['password'] + ' -R 1 -N 3 chassis power status')
if ipmicheck['exitcode'] == 0:
# retrieve the ID string
return_code = remote_shell('ipmitool -I lanplus -H ' + ipaddr + ' -U ' + item['username'] +
' -P ' + item['password'] + ' -R 1 -N 3 dcmi get_mc_id_string')
bmc_info = {"ip": ipaddr, "user": item['username'], "pw": item['password']}
if return_code['exitcode'] == 0 and return_code['stdout'] not in idlist:
# add to list if unique
idlist.append(return_code['stdout'])
BMC_LIST.append(bmc_info)
break
else:
# simulated nodes don't yet support dcmi, remove this else branch when supported
BMC_LIST.append(bmc_info)
break
if VERBOSITY >= 6:
print "get_bmc_ips: "
print "**** BMC IP node count =", len(BMC_LIST), "****"
return len(BMC_LIST)
# power on/off all compute nodes in the stack via the BMC
def power_control_all_nodes(state):
if state != "on" and state != "off":
print "power_control_all_nodes: invalid state " + state
return
# Get the list of BMCs that we know about
node_count = get_bmc_ips()
# Send power on/off to all of them
for bmc in BMC_LIST:
return_code = remote_shell('ipmitool -I lanplus -H ' + bmc['ip'] +
' -U ' + bmc['user'] + ' -P ' +
bmc['pw'] + ' -R 4 -N 3 chassis power ' + state)
if return_code['exitcode'] != 0:
print "Error powering " + state + " node: " + bmc['ip']
return node_count
def mongo_reset():
# clears the Mongo database on host to default, returns 0 if successful
remote_shell('service onrack-conductor stop')
remote_shell('/opt/onrack/bin/monorail stop')
remote_shell("mongo pxe --eval 'db.dropDatabase\\\(\\\)'")
remote_shell('rm -f /var/lib/dhcp/dhcpd.leases')
remote_shell('rm -f /var/log/onrack-conductor-event.log')
remote_shell('/opt/onrack/bin/monorail start')
if remote_shell('service onrack-conductor start')['exitcode'] > 0:
return 1
return 0
def appliance_reset():
return_code = subprocess.call("ipmitool -I lanplus -H " + fitargs()["bmc"] +
" -U root -P 1234567 chassis power reset", shell=True)
return return_code
def node_select():
# returns a list with valid compute node IDs that match fitargs()["sku"] in 'Name' or 'Model' field
# and matches node BMC MAC address in fitargs()["obmmac"] if specified
# Otherwise returns list of all IDs that are not 'Unknown' or 'Unmanaged'
nodelist = []
skuid = "None"
# check if user specified a single nodeid to run against
# user must know the nodeid and any check for a valid nodeid is skipped
if fitargs()["nodeid"] != 'None':
nodelist.append(fitargs()["nodeid"])
return nodelist
else:
# Find SKU ID
skumap = rackhdapi('/api/2.0/skus')
if skumap['status'] != 200:
print '**** Unable to retrieve SKU list via API.\n'
sys.exit(255)
for skuentry in skumap['json']:
if str(fitargs()['sku']) in json.dumps(skuentry):
skuid = skuentry['id']
# Collect node IDs
catalog = rackhdapi('/api/2.0/nodes')
if skumap['status'] != 200:
print '**** Unable to retrieve node list via API.\n'
sys.exit(255)
# Select node by SKU
for nodeentry in catalog['json']:
if fitargs()["sku"] == 'all':
# Select only managed compute nodes
if nodeentry['type'] == 'compute':
nodelist.append(nodeentry['id'])
else:
if 'sku' in nodeentry and skuid in json.dumps(nodeentry['sku']):
nodelist.append(nodeentry['id'])
# Select by node BMC MAC addr
if fitargs()["obmmac"] != 'all':
idlist = nodelist
nodelist = []
for member in idlist:
nodeentry = rackhdapi('/api/2.0/nodes/' + member)
if fitargs()["obmmac"] in json.dumps(nodeentry['json']):
nodelist = [member]
break
if VERBOSITY >= 6:
print "Node List:"
print nodelist, '\n'
if len(nodelist) == 0:
print '**** Empty node list.\n'
return nodelist
def list_skus():
# return list of installed SKU names
skunames = []
api_data = rackhdapi('/api/2.0/skus')['json']
for item in api_data:
skunames.append(item['name'])
return skunames
def get_node_sku(nodeid):
# return name field of node SKU if available
nodetype = ""
sku = ""
# get node info
mondata = rackhdapi("/api/2.0/nodes/" + nodeid)
if mondata['status'] == 200:
# get the sku id contained in the node
sku = mondata['json'].get("sku")
if sku:
skudata = rackhdapi(sku)
if skudata['status'] == 200:
nodetype = skudata['json'].get("name")
else:
if VERBOSITY >= 2:
errmsg = "Error: SKU API failed {}, return code {} ".format(sku, skudata['status'])
print errmsg
return "unknown"
else:
return "unknown"
return nodetype
def check_active_workflows(nodeid):
# Return True if active workflows are found on node
workflows = rackhdapi('/api/2.0/nodes/' + nodeid + '/workflows')['json']
for item in workflows:
if '_status' in item:
if item['_status'] in ['running', 'pending']:
return True
if 'status' in item:
if item['status'] in ['running', 'pending']:
return True
else:
return False
return False
def cancel_active_workflows(nodeid):
# cancel all active workflows on node
exitstatus = True
apistatus = rackhdapi('/api/2.0/nodes/' + nodeid + '/workflows/action',
action='put', payload={"command": "cancel"})['status']
if apistatus != 202:
exitstatus = False
return exitstatus
def apply_obm_settings(retry=30):
# New routine to install OBM credentials via workflows in parallel
count = 0
for creds in fitcreds()['bmc']:
# greate graph for setting OBM credentials
payload = {
"friendlyName": "IPMI" + str(count),
"injectableName": 'Graph.Obm.Ipmi.CreateSettings' + str(count),
"options": {
"obm-ipmi-task": {
"user": creds["username"],
"password": creds["password"]
}
},
"tasks": [
{
"label": "obm-ipmi-task",
"taskName": "Task.Obm.Ipmi.CreateSettings"
}
]
}
api_data = rackhdapi("/api/2.0/workflows/graphs", action="put", payload=payload)
if api_data['status'] != 201:
print "**** OBM workflow failed to load!"
return False
count += 1
# Setup additional OBM settings for nodes that currently use RMM port (still same bmc username/password used)
count = 0
for creds in fitcreds()['bmc']:
# greate graph for setting OBM credentials for RMM
payload = {
"friendlyName": "RMM.IPMI" + str(count),
"injectableName": 'Graph.Obm.Ipmi.CreateSettings.RMM' + str(count),
"options": {
"obm-ipmi-task": {
"ipmichannel": "3",
"user": creds["username"],
"password": creds["password"]
}
},
"tasks": [
{
"label": "obm-ipmi-task",
"taskName": "Task.Obm.Ipmi.CreateSettings"
}
]
}
api_data = rackhdapi("/api/2.0/workflows/graphs", action="put", payload=payload)
if api_data['status'] != 201:
print "**** OBM workflow failed to load!"
return False
count += 1
# run each OBM credential workflow on each node in parallel until success
nodestatus = {} # dictionary with node IDs and status of each node
for dummy in range(0, retry):
nodelist = node_select()
for node in nodelist:
if node not in nodestatus:
nodestatus[node] = {"status": "pending", "instanceId": "", "sku": get_node_sku(node), "retry": 0}
for num in range(0, count):
for node in nodelist:
# try workflow
if nodestatus[node]['status'] == "pending":
skuid = rackhdapi('/api/2.0/nodes/' + node)['json'].get("sku")
if skuid:
if nodestatus[node]['sku'] == "unknown":
nodestatus[node].update({"sku": get_node_sku(node)})
skudata = rackhdapi(skuid)['text']
if "rmm.data.MAC" in skudata:
workflow = {"name": 'Graph.Obm.Ipmi.CreateSettings.RMM' + str(num)}
else:
workflow = {"name": 'Graph.Obm.Ipmi.CreateSettings' + str(num)}
result = rackhdapi("/api/2.0/nodes/" + node + "/workflows", action="post", payload=workflow)
if result['status'] == 201:
nodestatus[node].update({"status": "running", "instanceId": result['json']["instanceId"]})
for node in nodelist:
# check OBM workflow status
if nodestatus[node]['status'] == "running":
nodestatus[node]['retry'] += 1
state_data = rackhdapi("/api/2.0/workflows/" + nodestatus[node]['instanceId'])
if state_data['status'] == 200:
if "_status" in state_data['json']:
state = state_data['json']['_status']
else:
state = state_data['json']['status']
if state == "succeeded":
nodestatus[node]['status'] = "succeeded"
if state in ["failed", "cancelled", "timeout"]:
nodestatus[node]['status'] = "pending"
# if the workflow left an invalid OBM, delete it
result = rackhdapi("/api/2.0/nodes/" + node)
if result['status'] == 200:
if result['json']['obms']:
for ref in result['json']['obms']:
obmref = ref.get('ref')
if obmref:
rackhdapi(obmref, action="delete")
if VERBOSITY >= 4:
print "**** Node(s) OBM status:\n", json.dumps(nodestatus, sort_keys=True, indent=4,)
if "pending" not in str(nodestatus) and "running" not in str(nodestatus):
# All OBM settings successful
return True
time.sleep(30)
# Failures occurred
print "**** Node(s) OBM settings failed."
return False
def apply_obm_settings_seq():
# legacy routine to install OBM credentials via workflows sequentially one-at-a-time
count = 0
for creds in fitcreds()['bmc']:
# greate graph for setting OBM credentials
payload = {
"friendlyName": "IPMI" + str(count),
"injectableName": 'Graph.Obm.Ipmi.CreateSettings' + str(count),
"options": {
"obm-ipmi-task": {
"user": creds["username"],
"password": creds["password"]
}
},
"tasks": [
{
"label": "obm-ipmi-task",
"taskName": "Task.Obm.Ipmi.CreateSettings"
}
]
}
api_data = rackhdapi("/api/2.0/workflows/graphs", action="put", payload=payload)
if api_data['status'] != 201:
print "**** OBM workflow failed to load!"
return False
count += 1
# Setup additional OBM settings for nodes that currently use RMM port (still same bmc username/password used)
count = 0
for creds in fitcreds()['bmc']:
# greate graph for setting OBM credentials for RMM
payload = {
"friendlyName": "RMM.IPMI" + str(count),
"injectableName": 'Graph.Obm.Ipmi.CreateSettings.RMM' + str(count),
"options": {
"obm-ipmi-task": {
"ipmichannel": "3",
"user": creds["username"],
"password": creds["password"]
}
},
"tasks": [
{
"label": "obm-ipmi-task",
"taskName": "Task.Obm.Ipmi.CreateSettings"
}
]
}
api_data = rackhdapi("/api/2.0/workflows/graphs", action="put", payload=payload)
if api_data['status'] != 201:
print "**** OBM workflow failed to load!"
return False
count += 1
# run each OBM workflow against each node until success
nodelist = node_select()
failedlist = []
for node in nodelist:
for num in range(0, count):
nodestatus = ""
wfstatus = ""
skuid = rackhdapi('/api/2.0/nodes/' + node)['json'].get("sku")
# Check is sku is empty
sku = skuid.rstrip("/api/2.0/skus/")
if sku:
skudata = rackhdapi(skuid)['text']
if "rmm.data.MAC" in skudata:
workflow = {"name": 'Graph.Obm.Ipmi.CreateSettings.RMM' + str(num)}
else:
workflow = {"name": 'Graph.Obm.Ipmi.CreateSettings' + str(num)}
else:
print "*** SKU not set for node ", node
nodestatus = "failed"
break
# wait for existing workflow to complete
for dummy in range(0, 60):
print "*** Using workflow: ", workflow
result = rackhdapi("/api/2.0/nodes/" + node + "/workflows", action="post", payload=workflow)
if result['status'] != 201:
time.sleep(5)
elif dummy == 60:
print "*** Workflow failed to start"
wfstatus = "failed"
else:
break
if wfstatus != "failed":
# wait for OBM workflow to complete
counter = 0
for counter in range(0, 60):
time.sleep(10)
state_data = rackhdapi("/api/2.0/workflows/" + result['json']["instanceId"])
if state_data['status'] == 200:
if "_status" in state_data['json']:
nodestatus = state_data['json']['_status']
else:
nodestatus = state_data['json']['status']
if nodestatus != "running" and nodestatus != "pending":
break
if nodestatus == "succeeded":
print "*** Succeeded on workflow ", workflow
break
if counter == 60:
# print "Timed out status", nodestatus
nodestatus = "failed"
print "*** Node failed OBM settings - timeout:", node
print "*** Failed on workflow ", workflow
# check final loop status for node workflow
if wfstatus == "failed" or nodestatus == "failed":
failedlist.append(node)
# cleanup failed nodes OBM settings on nodes, need to remove failed settings
for node in failedlist:
result = rackhdapi("/api/2.0/nodes/" + node)
if result['status'] == 200:
if result['json']['obms']:
obms = result['json']['obms'][0]
obmref = obms.get('ref')
if obmref:
result = rackhdapi(obmref, action="delete")
if result['status'] != 204:
print "*** Warning: failed to delete invalid OBM setting ", obmref
if len(failedlist) > 0:
print "**** Nodes failed OBM settings:", failedlist
return False
return True
def run_nose(nosepath=None):
if not nosepath:
nosepath = fitcfg()['cmd-args-list']['test']
# this routine runs nosetests from wrapper using path spec 'nosepath'
def _noserunner(pathspecs, noseopts):
xmlfile = str(time.time()) + ".xml" # XML report file name
env = {
'FIT_CONFIG': mkcfg().get_path(),
'HOME': os.environ['HOME'],
'PATH': os.environ['PATH'],
'PYTHONPATH': ':'.join(sys.path)
}
argv = ['nosetests']
argv.extend(noseopts)
argv.append('--xunit-file')
argv.append(xmlfile)
argv.extend(pathspecs)
argv.extend(fitcfg()['cmd-args-list']['unhandled_arguments'])
return subprocess.call(argv, env=env)
exitcode = 0
# set nose options
noseopts = ['--exe', '--with-nosedep', '--with-stream-monitor']
if fitargs()['group'] != 'all' and fitargs()['group'] != '':
noseopts.append('-a')
noseopts.append(str(fitargs()['group']))
if fitargs()['list'] is True or fitargs()['list'] == "True":
noseopts.append('--collect-only')
fitargs()['v'] = 0
print "\nTest Listing for:", fitargs()['test']
print "----------------------------------------------------------------------"
if fitargs()['xunit'] is True or fitargs()['xunit'] == "True":
noseopts.append('--with-xunit')
else:
noseopts.append('-s')
noseopts.append('-v')
# if nosepath is a directory, recurse through subdirs else run single test file
if os.path.isdir(nosepath):
# Skip the CIT test directories that match these expressions
regex = '(tests/*$)|(tests/api-cit/*)|(tests/api$)|(tests/api/.*)'
pathspecs = []
for root, _, _ in os.walk(nosepath):
if not re.search(regex, root):
pathspecs.append(root)
exitcode += _noserunner(pathspecs, noseopts)
else:
exitcode += _noserunner([nosepath], noseopts)
return exitcode
def _run_nose_help():
# This is used ONLY to fire off 'nosetests --help' for use from mkargs() when
# it is handling --help itself.
argv = ['nosetests', '--help']
return subprocess.call(argv)
def run_from_module(file_name):
# Use this method in 'name == "__main__"' style test invocations
# within individual test files
run_nose(file_name)
# determine who imported us.
importer = inspect.getframeinfo(inspect.getouterframes(inspect.currentframe())[1][0])[0]
if 'run_tests.py' in importer:
# we are being imported through run_tests.py (the fit wrapper)
# process sys.args as received by run_tests.py
compose_config(True)
else:
# we are being imported directly through a unittest module
# args will be nose-base args
compose_config(False)
| {
"content_hash": "af1549838e6a8976cde36212f779aa85",
"timestamp": "",
"source": "github",
"line_count": 1228,
"max_line_length": 125,
"avg_line_length": 39.11807817589577,
"alnum_prop": 0.5290713408414347,
"repo_name": "tannoa2/RackHD",
"id": "9c6e2bfe31f991ce189fb53fb9c9edd499b4a6ec",
"size": "48037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/common/fit_common.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "696"
},
{
"name": "Python",
"bytes": "908121"
},
{
"name": "Ruby",
"bytes": "10704"
},
{
"name": "Shell",
"bytes": "65199"
}
],
"symlink_target": ""
} |
import collections
import contextlib
import itertools
from django.utils.translation import ugettext_lazy as _, pgettext
from configuration.configuration import QuestionnaireQuestion, \
QuestionnaireQuestiongroup
from summary.parsers.questionnaire import QuestionnaireParser
class Technology2015Parser(QuestionnaireParser):
"""
Specific methods for technologies 2015.
"""
def get_human_env_access(self, child: QuestionnaireQuestion):
# e.g. 5.9 - get all info from parent questiongroup
# combine children from questiongroup with 'other'
children = itertools.chain(
*[qg.children for qg in child.questiongroup.parent_object.questiongroups]
)
for child in children:
try:
value = int(self._get_qg_selected_value(child))
except (ValueError, TypeError):
continue
if not child.keyword.startswith('tech_access_other_'):
# defined fields
yield from self._qg_scale_format(
child=child,
value=value,
label_left=child.choices[0][1],
label_right=child.choices[-1][1]
)
elif child.keyword == 'tech_access_other_measure':
# 'other'
yield from self._qg_scale_format(
child=child,
label=self._get_qg_selected_value(
child, all_values=True
).get('tech_access_other_specify'),
value=value,
label_left=child.choices[0][1],
label_right=child.choices[-1][1]
)
def get_tech_costbenefit(self, child: QuestionnaireQuestion):
# e.g. 6.4. - all info from radio buttons
values = self._get_qg_selected_value(child, all_values=True)
for child in child.questiongroup.children:
str_value = values.get(child.keyword, '')
# in the template, the numeric position of the value in the
# 'range' is required.
try:
choice_keys = dict(child.choices).keys()
value = list(choice_keys).index(str_value) + 1
except ValueError:
continue
yield from self._qg_scale_format(
child=child,
value=value,
label_left=child.choices[0][1],
label_right=child.choices[-1][1]
)
def get_impact(self, child: QuestionnaireQuestion, has_siblings: False):
"""
The last block (off-site impacts) has no siblings, all other blocks
have nested questiongroups.
Also, as 'other' may be repeating, 'other_spefify' is in a loop as well.
Sorry!
"""
if has_siblings:
categories = child.questiongroup.parent_object.parent_object.subcategories
questiongroups = itertools.chain(
*[category.questiongroups for category in categories]
)
else:
questiongroups = child.questiongroup.parent_object.questiongroups
for group in questiongroups:
if len(group.children) < 3:
# omit 'tech_specify'
continue
if group.children[0].keyword == 'tech_impacts_other_specify':
for items in self.values.get(group.keyword, []):
value_child = 2
label = items.get(group.children[0].keyword)
value = items.get(group.children[value_child].keyword)
label_left = items.get(group.children[1].keyword)
label_right = items.get(group.children[3].keyword)
before_label = _(group.children[4].label)
before_value = items.get(group.children[4].keyword)
after_label = _(group.children[5].label)
after_value = items.get(group.children[5].keyword)
comment_value = items.get(group.children[6].keyword)
yield from self._get_impact_row(
child=group.children[value_child], label=label,
value=value, label_left=label_left,
label_right=label_right, before_value=before_value,
before_label=before_label, after_value=after_value,
after_label=after_label, comment_value=comment_value
)
else:
value_child = 0
label = group.children[value_child].label
value = self._get_qg_selected_value(group.children[value_child])
label_left = group.children[0].additional_translations.get('label_left')
label_right = group.children[0].additional_translations.get('label_right')
before_label = _(group.children[1].label)
before_value = self._get_qg_selected_value(group.children[1])
after_label = _(group.children[2].label)
after_value = self._get_qg_selected_value(group.children[2])
comment_value = self._get_qg_selected_value(group.children[3])
yield from self._get_impact_row(
child=group.children[value_child], label=label,
value=value, label_left=label_left, label_right=label_right,
before_value=before_value, before_label=before_label,
after_value=after_value, after_label=after_label,
comment_value=comment_value,
)
def _get_impact_row(self, child: QuestionnaireQuestion, label: str,
value: int, label_left: str, label_right: str,
before_value: str, before_label: str, after_value: str,
after_label: str, comment_value: str):
if value and isinstance(value, int):
comment = ''
if before_value or after_value:
comment = f'{before_label}: {before_value}\n{after_label}: {after_value}'
# if a comment is set, add it
if comment_value:
comment += '\n' + (comment_value or '')
yield from self._qg_scale_format(
child=child,
value=value,
label=label,
label_left=label_left,
label_right=label_right,
comment=comment
)
def get_climate_change(self, child: QuestionnaireQuestion):
# based on this first question, get all questiongroups with at least
# one filled in question.
climate_change_categories = child.questiongroup.parent_object. \
parent_object.parent_object
groups = []
for main_category in filter(self._subcategory_has_value,
climate_change_categories.subcategories):
# A store for all 'lines' for the main groups
items = []
questiongroups = [subcategory.questiongroups for subcategory in
main_category.subcategories]
for group in itertools.chain(*questiongroups):
values = self.values.get(group.keyword, [])
# if more than one element is available, a set of 'sibling'
# questions was filled in. Duplicate this question, resulting
# in one line per value/answer.
for value in values:
items.append(self._prepare_climate_change_row(group, **value))
groups.append({
'title': main_category.label,
'items': items
})
return groups
def _prepare_climate_change_row(self, group: QuestionnaireQuestiongroup, **values):
"""
Create elements for a single line. The structure of questions varies
between all elements, regarding number of questions and content/scale of
questions.
"""
label = group.label
comment = ''
# One set of questions equals one line in the summary. The field names
# are stable/repeating so string comparison is nasty but semi-ok.
for question in group.questions:
if question.keyword == 'tech_exposure_incrdecr':
# Indicator for direction of development (increased/decreased)
question_label = values.get(question.keyword)
if question_label:
# context is important here; a label may be named differently according to
# configuration,
translated = pgettext(f'{self.config_object.keyword} label', question_label)
label = f'{label} {translated}'
elif question.keyword == 'tech_exposure_sensitivity':
# The actual value for our range-field.
# The first and the last choice are irrelevant to this
# mode of layout. If the selected value is empty or unknown,
# this is added as comment.
choice_keys = list(
collections.OrderedDict(question.choices).keys()
)
with contextlib.suppress(ValueError):
choice_keys.remove('')
choice_keys.remove('cope_unknown')
value = values.get(question.keyword)
if value not in choice_keys:
string_value = dict(question.choices).get(value)
if string_value:
comment += _(' Answer: {}').format(string_value)
else:
value = choice_keys.index(value)
elif 'other' in question.keyword:
label = values.get(question.keyword, '')
else:
# All other fields, such as 'season' go into the comments.
comment_key = values.get(question.keyword)
if not group.label.startswith('other'):
comment_key = dict(question.choices).get(comment_key)
if comment_key:
comment += '{label}: {value}'.format(
label=question.label,
value=comment_key
)
return {
'label': label,
'range': range(0, len(choice_keys)),
'min': question.choices[1][1],
'max': question.choices[-2][1],
'selected': value,
'comment': comment
}
def _subcategory_has_value(self, subcategory):
"""
Filter only questiongroups with at least one filled in question.
"""
questiongroups = itertools.chain(
*[subcategory.questiongroups for subcategory in subcategory.subcategories]
)
qg_keywords = [qg.keyword for qg in questiongroups]
return not set(qg_keywords).isdisjoint(set(self.values.keys()))
| {
"content_hash": "290e2cf2a49af5de7590a1b279beff7c",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 96,
"avg_line_length": 43.888888888888886,
"alnum_prop": 0.5487341772151899,
"repo_name": "CDE-UNIBE/qcat",
"id": "7f5388ee47f8a136a08184b800dbd7b4be79ca98",
"size": "11060",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apps/summary/parsers/technologies_2015.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1098"
},
{
"name": "HTML",
"bytes": "823938"
},
{
"name": "Handlebars",
"bytes": "224139"
},
{
"name": "JavaScript",
"bytes": "153067"
},
{
"name": "Python",
"bytes": "3515948"
},
{
"name": "SCSS",
"bytes": "165400"
},
{
"name": "Shell",
"bytes": "1943"
}
],
"symlink_target": ""
} |
import importlib
import warnings
from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
from docutils import nodes
from sphinx.util.nodes import nested_parse_with_titles
def type_description(schema):
"""Return a concise type description for the given schema"""
if schema == {}:
return 'any'
elif "$ref" in schema:
return ":class:`{0}`".format(schema['$ref'].split('/')[-1])
elif 'enum' in schema:
return "[{0}]".format(', '.join(repr(s) for s in schema['enum']))
elif 'type' in schema:
if isinstance(schema['type'], list):
return '[{0}]'.format(', '.join(schema['type']))
elif schema['type'] == 'array':
return 'array({0})'.format(type_description(schema.get('items', {})))
elif schema['type'] == 'object':
return 'dict'
else:
return schema['type']
elif 'anyOf' in schema:
return "anyOf({0})".format(', '.join(type_description(s)
for s in schema['anyOf']))
else:
warnings.warn('cannot infer type for schema with keys {0}'
''.format(schema.keys()))
return '--'
def iter_properties(cls):
"""Iterate over (property, type, description)"""
import m2r # convert markdown to rst
schema = cls.resolve_references(cls._schema)
properties = schema.get('properties', {})
for prop, propschema in properties.items():
yield (prop,
type_description(propschema),
m2r.convert(propschema.get('description', ' ')))
def build_rst_table(rows, titles):
"""Build an rst table from a table of entries (i.e. list of lists)"""
ncols = len(titles)
assert all(len(row) == ncols for row in rows)
lengths = [max(map(len, col)) for col in zip(*rows)]
def make_line(row, fill=' '):
return ' '.join(entry.ljust(length, fill)
for length, entry in zip(lengths, row))
divider = make_line(ncols * [''], '=')
return ([divider, make_line(titles), divider] +
[make_line(row) for row in rows] +
[divider])
def construct_schema_table(cls):
"""Construct an RST table describing the properties within a schema."""
props = list(iter_properties(cls))
names, types, defs = zip(*props)
defs = [defn.replace('\n', ' ') for defn in defs]
props = list(zip(names, types, defs))
return build_rst_table(props, ["Property", "Type", "Description"])
class AltairObjectTableDirective(Directive):
"""
Directive for building a table of attribute descriptions.
Usage:
.. altair-object-table:: altair.MarkConfig
"""
has_content = False
required_arguments = 1
def run(self):
objectname = self.arguments[0]
modname, classname = objectname.rsplit('.', 1)
module = importlib.import_module(modname)
cls = getattr(module, classname)
# create the table from the object
table = construct_schema_table(cls)
# parse and return documentation
result = ViewList()
for line in table:
result.append(line, "<altair-class>")
node = nodes.paragraph()
node.document = self.state.document
nested_parse_with_titles(self.state, result, node)
return node.children
def setup(app):
app.add_directive('altair-object-table', AltairObjectTableDirective)
| {
"content_hash": "9a6d7e7b4dd618bc3f8e9ad1593d6939",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 81,
"avg_line_length": 32.33644859813084,
"alnum_prop": 0.6014450867052024,
"repo_name": "ellisonbg/altair",
"id": "958a0fec4a0a29e25caf108fe421b986e9ef8156",
"size": "3460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "altair/sphinxext/schematable.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "136763"
},
{
"name": "Makefile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "1150719"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import glob
import os
import shutil
import subprocess
import sys
from lib.util import get_electron_branding, rm_rf, scoped_cwd
PROJECT_NAME = get_electron_branding()['project_name']
PRODUCT_NAME = get_electron_branding()['product_name']
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
SNAPSHOT_SOURCE = os.path.join(SOURCE_ROOT, 'spec', 'fixtures', 'testsnap.js')
def main():
args = parse_args()
source_root = os.path.abspath(args.source_root)
initial_app_path = os.path.join(source_root, args.build_dir)
app_path = create_app_copy(initial_app_path)
returncode = 0
try:
with scoped_cwd(app_path):
if args.snapshot_files_dir is None:
mkargs = [ get_binary_path('mksnapshot', app_path), \
SNAPSHOT_SOURCE, '--startup_blob', 'snapshot_blob.bin', \
'--turbo_instruction_scheduling',
'--no-native-code-counters' ]
subprocess.check_call(mkargs)
print('ok mksnapshot successfully created snapshot_blob.bin.')
context_snapshot = 'v8_context_snapshot.bin'
context_snapshot_path = os.path.join(app_path, context_snapshot)
gen_binary = get_binary_path('v8_context_snapshot_generator', \
app_path)
genargs = [ gen_binary, \
'--output_file={0}'.format(context_snapshot_path) ]
subprocess.check_call(genargs)
print('ok v8_context_snapshot_generator successfully created ' \
+ context_snapshot)
if args.create_snapshot_only:
return 0
else:
gen_bin_path = os.path.join(args.snapshot_files_dir, '*.bin')
generated_bin_files = glob.glob(gen_bin_path)
for bin_file in generated_bin_files:
shutil.copy2(bin_file, app_path)
test_path = os.path.join(SOURCE_ROOT, 'spec', 'fixtures', \
'snapshot-items-available')
if sys.platform == 'darwin':
bin_files = glob.glob(os.path.join(app_path, '*.bin'))
app_dir = os.path.join(app_path, '{0}.app'.format(PRODUCT_NAME))
electron = os.path.join(app_dir, 'Contents', 'MacOS', PRODUCT_NAME)
bin_out_path = os.path.join(app_dir, 'Contents', 'Frameworks',
'{0} Framework.framework'.format(PROJECT_NAME),
'Resources')
for bin_file in bin_files:
shutil.copy2(bin_file, bin_out_path)
elif sys.platform == 'win32':
electron = os.path.join(app_path, '{0}.exe'.format(PROJECT_NAME))
else:
electron = os.path.join(app_path, PROJECT_NAME)
subprocess.check_call([electron, test_path])
print('ok successfully used custom snapshot.')
except subprocess.CalledProcessError as e:
print('not ok an error was encountered while testing mksnapshot.')
print(e)
returncode = e.returncode
except KeyboardInterrupt:
print('Other error')
returncode = 0
print('Returning with error code: {0}'.format(returncode))
return returncode
# Create copy of app to install custom snapshot
def create_app_copy(initial_app_path):
print('Creating copy of app for testing')
app_path = os.path.join(os.path.dirname(initial_app_path),
os.path.basename(initial_app_path)
+ '-mksnapshot-test')
rm_rf(app_path)
shutil.copytree(initial_app_path, app_path, symlinks=True)
return app_path
def get_binary_path(binary_name, root_path):
if sys.platform == 'win32':
binary_path = os.path.join(root_path, '{0}.exe'.format(binary_name))
else:
binary_path = os.path.join(root_path, binary_name)
return binary_path
def parse_args():
parser = argparse.ArgumentParser(description='Test mksnapshot')
parser.add_argument('-b', '--build-dir',
help='Path to an Electron build folder. \
Relative to the --source-root.',
default=None,
required=True)
parser.add_argument('--create-snapshot-only',
help='Just create snapshot files, but do not run test',
action='store_true')
parser.add_argument('--snapshot-files-dir',
help='Directory containing snapshot files to use \
for testing',
default=None,
required=False)
parser.add_argument('--source-root',
default=SOURCE_ROOT,
required=False)
return parser.parse_args()
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "13334e7f3eff3614980c27b9c07979c9",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 78,
"avg_line_length": 39.016806722689076,
"alnum_prop": 0.6073659272022399,
"repo_name": "the-ress/electron",
"id": "6530265bde8982d9859b78a0ef2f7a33938ba3d2",
"size": "4665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/verify-mksnapshot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1255"
},
{
"name": "C++",
"bytes": "2635834"
},
{
"name": "CSS",
"bytes": "2379"
},
{
"name": "Dockerfile",
"bytes": "1395"
},
{
"name": "HCL",
"bytes": "244"
},
{
"name": "HTML",
"bytes": "14882"
},
{
"name": "JavaScript",
"bytes": "946728"
},
{
"name": "Objective-C",
"bytes": "48789"
},
{
"name": "Objective-C++",
"bytes": "331379"
},
{
"name": "Python",
"bytes": "97441"
},
{
"name": "Shell",
"bytes": "23038"
},
{
"name": "TypeScript",
"bytes": "381037"
}
],
"symlink_target": ""
} |
import theano
import lasagne
import numpy as np
import theano.tensor as T
class ModRelu(lasagne.layers.Layer):
def __init__(self, incoming, b=lasagne.init.Uniform(range=0.01), **kwargs):
super(ModRelu, self).__init__(incoming, **kwargs)
print(self.input_shape)
self.n_hidden = self.input_shape[-1] // 2
self.hb = self.add_param(b, (self.n_hidden,), name='hb', regularizable=False, trainable=True)
def get_output_for(self, input, **kwargs):
eps = 1e-5
print("Inside a ModReLU")
input_flattened = input.reshape((-1, self.n_hidden*2))
swap_re_im = np.concatenate((np.arange(self.n_hidden, 2*self.n_hidden), np.arange(self.n_hidden)))
modulus = T.sqrt(input_flattened**2 + input_flattened[:, swap_re_im]**2 + eps)
rescale = T.maximum(modulus + T.tile(self.hb, [2]).dimshuffle('x', 0), 0.) / (modulus + 1e-5)
out = (input_flattened * rescale).reshape(input.shape)
return out | {
"content_hash": "356cc824b286aa24ca432eb41147438b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 106,
"avg_line_length": 40.75,
"alnum_prop": 0.6288343558282209,
"repo_name": "Nehoroshiy/urnn",
"id": "212b00ad51a59bf37b5fbd14a807ad45436df330",
"size": "978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "layers/modrelu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "340613"
},
{
"name": "Python",
"bytes": "109409"
}
],
"symlink_target": ""
} |
'''OpenGL extension VERSION.GL_2_1
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_VERSION_GL_2_1'
_DEPRECATED = False
GL_PIXEL_PACK_BUFFER = constant.Constant( 'GL_PIXEL_PACK_BUFFER', 0x88EB )
GL_PIXEL_UNPACK_BUFFER = constant.Constant( 'GL_PIXEL_UNPACK_BUFFER', 0x88EC )
GL_PIXEL_PACK_BUFFER_BINDING = constant.Constant( 'GL_PIXEL_PACK_BUFFER_BINDING', 0x88ED )
GL_PIXEL_UNPACK_BUFFER_BINDING = constant.Constant( 'GL_PIXEL_UNPACK_BUFFER_BINDING', 0x88EF )
GL_FLOAT_MAT2x3 = constant.Constant( 'GL_FLOAT_MAT2x3', 0x8B65 )
GL_FLOAT_MAT2x4 = constant.Constant( 'GL_FLOAT_MAT2x4', 0x8B66 )
GL_FLOAT_MAT3x2 = constant.Constant( 'GL_FLOAT_MAT3x2', 0x8B67 )
GL_FLOAT_MAT3x4 = constant.Constant( 'GL_FLOAT_MAT3x4', 0x8B68 )
GL_FLOAT_MAT4x2 = constant.Constant( 'GL_FLOAT_MAT4x2', 0x8B69 )
GL_FLOAT_MAT4x3 = constant.Constant( 'GL_FLOAT_MAT4x3', 0x8B6A )
GL_SRGB = constant.Constant( 'GL_SRGB', 0x8C40 )
GL_SRGB8 = constant.Constant( 'GL_SRGB8', 0x8C41 )
GL_SRGB_ALPHA = constant.Constant( 'GL_SRGB_ALPHA', 0x8C42 )
GL_SRGB8_ALPHA8 = constant.Constant( 'GL_SRGB8_ALPHA8', 0x8C43 )
GL_COMPRESSED_SRGB = constant.Constant( 'GL_COMPRESSED_SRGB', 0x8C48 )
GL_COMPRESSED_SRGB_ALPHA = constant.Constant( 'GL_COMPRESSED_SRGB_ALPHA', 0x8C49 )
glUniformMatrix2x3fv = platform.createExtensionFunction(
'glUniformMatrix2x3fv',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLint,constants.GLsizei,constants.GLboolean,arrays.GLfloatArray,),
doc='glUniformMatrix2x3fv(GLint(location), GLsizei(count), GLboolean(transpose), GLfloatArray(value)) -> None',
argNames=('location','count','transpose','value',),
deprecated=_DEPRECATED,
)
glUniformMatrix3x2fv = platform.createExtensionFunction(
'glUniformMatrix3x2fv',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLint,constants.GLsizei,constants.GLboolean,arrays.GLfloatArray,),
doc='glUniformMatrix3x2fv(GLint(location), GLsizei(count), GLboolean(transpose), GLfloatArray(value)) -> None',
argNames=('location','count','transpose','value',),
deprecated=_DEPRECATED,
)
glUniformMatrix2x4fv = platform.createExtensionFunction(
'glUniformMatrix2x4fv',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLint,constants.GLsizei,constants.GLboolean,arrays.GLfloatArray,),
doc='glUniformMatrix2x4fv(GLint(location), GLsizei(count), GLboolean(transpose), GLfloatArray(value)) -> None',
argNames=('location','count','transpose','value',),
deprecated=_DEPRECATED,
)
glUniformMatrix4x2fv = platform.createExtensionFunction(
'glUniformMatrix4x2fv',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLint,constants.GLsizei,constants.GLboolean,arrays.GLfloatArray,),
doc='glUniformMatrix4x2fv(GLint(location), GLsizei(count), GLboolean(transpose), GLfloatArray(value)) -> None',
argNames=('location','count','transpose','value',),
deprecated=_DEPRECATED,
)
glUniformMatrix3x4fv = platform.createExtensionFunction(
'glUniformMatrix3x4fv',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLint,constants.GLsizei,constants.GLboolean,arrays.GLfloatArray,),
doc='glUniformMatrix3x4fv(GLint(location), GLsizei(count), GLboolean(transpose), GLfloatArray(value)) -> None',
argNames=('location','count','transpose','value',),
deprecated=_DEPRECATED,
)
glUniformMatrix4x3fv = platform.createExtensionFunction(
'glUniformMatrix4x3fv',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLint,constants.GLsizei,constants.GLboolean,arrays.GLfloatArray,),
doc='glUniformMatrix4x3fv(GLint(location), GLsizei(count), GLboolean(transpose), GLfloatArray(value)) -> None',
argNames=('location','count','transpose','value',),
deprecated=_DEPRECATED,
)
# import legacy entry points to allow checking for bool(entryPoint)
from OpenGL.raw.GL.VERSION.GL_2_1_DEPRECATED import *
| {
"content_hash": "157d15d786e8538e99efa972340973bc",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 111,
"avg_line_length": 46.51724137931034,
"alnum_prop": 0.784037558685446,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "e07f468ce81693023ab8190351221d29216ed5cc",
"size": "4047",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/raw/GL/VERSION/GL_2_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import pdb
import numpy as np
import ConfigParser
import os
import logging
import pprint
import astropy.cosmology as ac
from astropy.cosmology import Planck15 as cosmo
from astropy.cosmology import Planck15, z_at_value
import astropy.units as u
#import NDpredict
from utils import string_is_true
#from astropy.cosmology import Planck15 as cosmo
def get_params(param_file_path):
"""
Get parameter values and return them in the form of a dictionary.
Parameters
----------
param_file_path : str
path to parameter file
Returns
-------
params : dict
"""
config = ConfigParser.SafeConfigParser()
config.read(param_file_path)
# Get "raw" dictionaries from `config` object
#pdb.set_trace()
raw_params = dict(config.items('general'))
raw_cosmo_params = dict(config.items('cosmology'))
raw_pop_params = dict(config.items('populations'))
#try:
# raw_pop_params = dict(config.items('populations'))
#except:
# pass
try:
raw_cut_params = dict(config.items('cuts'))
except:
pass
raw_io_params = dict(config.items('io'))
raw_binning_params = dict(config.items('binning'))
raw_maps_to_stack_params = dict(config.items('maps_to_stack'))
raw_map_path_params = dict(config.items('map_path'))
raw_map_file_params = dict(config.items('map_file'))
raw_noise_path_params = dict(config.items('map_path'))
raw_noise_file_params = dict(config.items('noise_file'))
raw_beams_params = dict(config.items('beams'))
raw_color_correction_params = dict(config.items('color_correction'))
raw_catalogs_params = dict(config.items('catalogs'))
raw_io_params['param_file_path'] = os.path.abspath(param_file_path) # Store parameter file path
# Convert "raw" config dictionary to "organized" dictionary `params`
params = get_general_params(raw_params)
params['io'] = get_io_parameters(raw_io_params)
params['cosmo'] = get_cosmology_parameters(raw_cosmo_params)
params['populations'] = get_population_parameters(raw_pop_params,params)
#try:
# params['populations'] = get_population_parameters(raw_pop_params,params)
#except:
# pass
try:
params['cuts'] = get_cut_parameters(raw_cut_params)
except:
pass
params['map_files'] = get_maps_parameters(raw_maps_to_stack_params,raw_map_path_params,raw_map_file_params)
params['noise_files'] = get_maps_parameters(raw_maps_to_stack_params,raw_noise_path_params,raw_noise_file_params)
params['wavelength'] = get_wavelength_parameters(raw_maps_to_stack_params)
params['psfs'] = get_beams_parameters(raw_maps_to_stack_params,raw_beams_params)
params['color_correction'] = get_color_correction_parameters(raw_maps_to_stack_params,raw_color_correction_params)
params['catalogs'] = get_catalogs_parameters(raw_catalogs_params)
params['bins'] = get_binning_parameters(raw_binning_params)
params['library_keys'] = params['map_files'].keys()
logging.info("---------- PARAMETER VALUES ----------")
logging.info("======================================")
logging.info("\n" + pprint.pformat(params, indent=4) + "\n")
#pdb.set_trace()
return params
def get_general_params(raw_params):
params = {} # Initialize parameter dictionary
# Catalog specific names for keys
try:
params['zkey'] = raw_params['zkey']
params['mkey'] = raw_params['mkey']
except:
params['zkey'] = 'z_peak'
params['mkey'] = 'lmass'
try:
params['ra_key'] = raw_params['ra_key']
params['dec_key'] = raw_params['dec_key']
except:
params['ra_key'] = 'ra'
params['dec_key'] = 'dec'
try:
params['uv_key'] = raw_params['uv_key']
params['vj_key'] = raw_params['vj_key']
except:
params['uv_key'] = 'rf_U_V'
params['vj_key'] = 'rf_V_J'
# Have a floating background level instead of removing mean
try:
params['float_background'] = raw_params['float_background']
except:
params['float_background'] = False
#pdb.set_trace()
# Type of galaxy split. Default is UVJ star-forming / quiescent
try:
params['galaxy_splitting_scheme'] = raw_params['classification_scheme']
except KeyError:
params['galaxy_splitting_scheme'] = 'sf-qt'
try:
params['save_bin_ids'] = string_is_true(raw_params['save_bin_ids'])
except:
params['save_bin_ids'] = True
# If running bootstrap
if string_is_true(raw_params['bootstrap'].split()[0]) == True:
params['bootstrap'] = True
params['boot0'] = float(raw_params['bootstrap'].split()[1])
params['number_of_boots'] = float(raw_params['bootstrap'].split()[2])
try:
params['perturb_z'] = string_is_true(raw_params['bootstrap'].split()[3])
except:
params['perturb_z'] = False
try:
params['index_boots'] = string_is_true(raw_params['bootstrap'].split()[4])
except:
params['index_boots'] = False
#pdb.set_trace()
else:
params['bootstrap'] = False
params['boot0'] = 0
params['number_of_boots'] = 1
params['perturb_z'] = False
return params
def get_wavelength_parameters(raw_maps_to_stack_params):
wavelengths = {}
for imap in raw_maps_to_stack_params:
if string_is_true(raw_maps_to_stack_params[imap].split()[1]) == True:
wavelengths[imap] = float(raw_maps_to_stack_params[imap].split()[0])
return wavelengths
def get_binning_parameters(raw_params):
binning = {}
# Style of binning, optimal or evenly, and the number of bins (optional).
# If number_of_bins not provided, will be decided by the binning code.
#try:
# binning['optimal_binning'] = raw_params['optimal_binning'].split()[0]
#except KeyError:
# binning['optimal_binning'] = False
try:
binning['optimal_binning'] = is_true(raw_params, 'optimal_binning')
except KeyError:
binning['optimal_binning'] = False
#Optional number of bins
if len(raw_params['optimal_binning'].split()) > 1:
try:
binning['number_of_bins'] = raw_params['optimal_binning'].split()[1]
except KeyError:
pass
# If binning masses by Number Densities
try:
binning['bin_in_number_density'] = is_true(raw_params, 'bin_in_number_density')
except KeyError:
binning['bin_in_number_density'] = False
# If binning redshifts in lookback time.
try:
binning['bin_in_lookback_time'] = is_true(raw_params, 'bin_in_lookback_time')
except KeyError:
binning['bin_in_lookback_time'] = False
# If stacking entire catalog at once, rather that in redshift slices.
# Still unclear if this is advantageous or not.
try:
binning['stack_all_z_at_once'] = is_true(raw_params, 'all_z_at_once')
except KeyError:
binning['stack_all_z_at_once'] = False
if is_true(raw_params,'optimal_binning') == False:
z_nodes = []
m_nodes = []
for i in raw_params['redshift_nodes'].split():
z_nodes.append(float(i))
for j in raw_params['mass_nodes'].split():
m_nodes.append(float(j))
if binning['bin_in_number_density'] == True:
nd_nodes = []
try:
for j in raw_params['number_density_nodes'].split():
nd_nodes.append(float(j))
except:
for j in raw_params['mass_nodes'].split():
nd_nodes.append(float(j))
binning['t_nodes'] = z_nodes
binning['z_nodes'] = z_nodes
binning['m_nodes'] = m_nodes
#binning['nd_nodes'] = nd_nodes
# This is tough...
if binning['bin_in_number_density'] == True:
pdb.set_trace()
binning['m_nodes'] = np.array([NDpredict.getmass_illustris(nd_nodes,z_mid[i]) for i in m_nodes])
if binning['bin_in_lookback_time'] == True:
binning['t_nodes'] = z_nodes
binning['z_nodes'] = np.array([z_at_value(Planck15.age,(cosmo.age(0).value - i) * u.Gyr) for i in z_nodes])
return binning
def get_io_parameters(raw_params):
io = {}
io['param_file_path'] = raw_params['param_file_path']
try:
io['shortname'] = raw_params['shortname']
except KeyError:
io['shortname'] = ''
io['output_folder'] = os.environ[raw_params['output_folder'].split()[0]] + raw_params['output_folder'].split()[1]
io['flux_densities_filename'] = raw_params['flux_densities_filename']
#io['flux_densities_filename'] = 'simstack_flux_densities'
return io
def get_cosmology_parameters(raw_params):
'''
Returns
-------
cosmo : astropy.cosmology object
object containing cosmological parameters
'''
omega_m0 = float(raw_params['omega_m']) # Present-day matter density
omega_l0 = float(raw_params['omega_l']) # Present-day dark energy density
omega_k0 = float(raw_params['omega_k']) # Present-day spatial curvature density
hubble_h0 = float(raw_params['h']) # Present-day reduced Hubble constant: h0 = H0 / (100 km/s/Mpc)
H0 = hubble_h0*100.
cosmo = ac.LambdaCDM(H0=H0, Om0=omega_m0, Ode0=omega_l0)
return cosmo
def get_cut_parameters(raw_cut_params):
cuts_dict = {}
# Special case for 5pop and 4pop
for pop in raw_cut_params:
cuts_dict[pop] = float(raw_cut_params[pop])
return cuts_dict
def get_population_parameters(raw_pop_params, params):
cuts_dict = {}
if params['galaxy_splitting_scheme'] == 'general':
for pop in raw_pop_params:
print pop
tst = [int(raw_pop_params[pop][0])]
if len(raw_pop_params[pop].split()) > 1:
tst.append([k for k in raw_pop_params[pop][1:].split()])
for k in range(len(tst[1])):
try:
bl = string_is_true(tst[1][k])
tst[1][k] = bl
#print 'is a boolean'
except NameError:
try:
float(tst[1][1])
tst[1][k]=float(tst[1][k])
#print 'is a float'
except ValueError:
#print 'do nothin'
pass
else:
tst.append([])
cuts_dict[pop] = tst
elif params['galaxy_splitting_scheme'] == 'sf-qt' or params['galaxy_splitting_scheme'] == '4pops' or params['galaxy_splitting_scheme'] == '5pops':
for pop in raw_pop_params:
cuts_dict[pop] = float(raw_pop_params[pop])
elif params['galaxy_splitting_scheme'] == 'uvj':
cuts_dict['c_nodes'] = [float(n) for n in raw_pop_params['uvj_nodes'].split()]
cuts_dict['pop_names'] = [n for n in raw_pop_params['pop_names'].split()]
return cuts_dict
def get_maps_parameters(raw_maps_to_stack_params,raw_map_path_params,raw_map_file_params):
maps = {}
for imap in raw_maps_to_stack_params:
if string_is_true(raw_maps_to_stack_params[imap].split()[1]) == True:
maps[imap] = os.environ[raw_map_path_params[imap].split()[0]] + raw_map_path_params[imap].split()[1] + raw_map_file_params[imap]
return maps
def get_beams_parameters(raw_maps_to_stack_params,raw_beams_params):
psfs = {}
for imap in raw_maps_to_stack_params:
if string_is_true(raw_maps_to_stack_params[imap].split()[1]) == True:
psfs[imap+'_beam_area'] = float(raw_beams_params[imap].split()[1])
if is_float(raw_beams_params[imap].split()[0]) == True:
psfs[imap+'_fwhm'] = float(raw_beams_params[imap].split()[0])
else:
psfs[imap+'_beam_file'] = raw_beams_params[imap].split()[0]
return psfs
def get_color_correction_parameters(raw_maps_to_stack_params,raw_color_correction_params):
color_correction = {}
for imap in raw_maps_to_stack_params:
if string_is_true(raw_maps_to_stack_params[imap].split()[1]) == True:
color_correction[imap+''] = float(raw_color_correction_params[imap])
return color_correction
def get_catalogs_parameters(raw_catalog_params):
catalog = {}
try:
catalog['catalog_path'] = os.environ[raw_catalog_params['catalog_path'].split()[0]] + raw_catalog_params['catalog_path'].split()[1]
except:
catalog['catalog_path'] = raw_catalog_params['catalog_path']
catalog['catalog_file'] = raw_catalog_params['catalog_file']
if 'features_file' in raw_catalog_params:
catalog['features_file'] = raw_catalog_params['features_file']
return catalog
def is_float(s):
try:
float(s)
return True
except ValueError:
return False
def string_is_true(sraw):
"""Is string true? Returns boolean value.
"""
s = sraw.lower() # Make case-insensitive
# Lists of acceptable 'True' and 'False' strings
true_strings = ['true', 't', 'yes', 'y', '1']
false_strings = ['false', 'f', 'no', 'n', '0']
if s in true_strings:
return True
elif s in false_strings:
return False
else:
logging.warning("Input not recognized for parameter: %s" % (key))
logging.warning("You provided: %s" % (sraw))
raise
def is_true(raw_params, key):
"""Is raw_params[key] true? Returns boolean value.
"""
sraw = raw_params[key]
s = sraw.lower() # Make case-insensitive
# Lists of acceptable 'True' and 'False' strings
true_strings = ['true', 't', 'yes', 'y', '1']
false_strings = ['false', 'f', 'no', 'n', '0']
if s in true_strings:
return True
elif s in false_strings:
return False
else:
logging.warning("Input not recognized for parameter: %s" % (key))
logging.warning("You provided: %s" % (sraw))
raise
### FOR TESTING ###
if __name__=='__main__':
import os, sys
import pprint
param_fp = sys.argv[1]
print("")
print("Testing %s on %s..." % (os.path.basename(__file__), param_fp))
print("")
pprint.pprint(get_params(param_fp))
| {
"content_hash": "c8dc76c74c608c3bbc3462de84e516f8",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 150,
"avg_line_length": 35.74129353233831,
"alnum_prop": 0.593402004454343,
"repo_name": "marcoviero/simstack",
"id": "e135ec290bc0d99ccadb47221be5e8cca7a4bc4c",
"size": "14368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "45805"
},
{
"name": "Python",
"bytes": "127087"
}
],
"symlink_target": ""
} |
"""Shapley value analysis"""
__author__ = 'thor'
from numpy import unique, concatenate, sort
import numpy as np
import pandas as pd
from collections import Counter, OrderedDict, defaultdict
import string
import random as rnd
import itertools
from scipy.misc import factorial
from ut.util.uiter import all_subsets_of, powerset
from ut.stats.bin_est.set_est import Shapley as Shapley_1
# from ut.daf.manip import rollin_col
def _coalition_of(iter_of_items):
return tuple(unique(iter_of_items))
def compute_shapley_values_from_coalition_values(
coalition, normalize=False, verbose=False
):
return compute_shapley_values_from_coalition_values_using_formula(
coalition, normalize=normalize, verbose=verbose
)
def compute_shapley_values_from_coalition_values_using_formula(
coalition_values, normalize=False, verbose=False
):
"""
Computes the Shapley values of a game specified by coalition values.
See https://en.wikipedia.org/wiki/Shapley_value.
:param coalition_values: The definition of the game (a dict of values of coalitions of players)
:param normalize: True or [False]: Whether to normalize the Shapley values so they sum to 1
:param verbose: True or [False]: Whether to print info while computing the Shapley values
:return: Shapley values of the game specified by coalition_values
"""
players = _universe_set_of_keys_of_dict(coalition_values)
n = len(players)
factorial_n = float(factorial(n))
if verbose:
print(('Normalizing factor: {}'.format(factorial_n)))
def _shapley_unnormalized_weight(s):
return factorial(s) * factorial(
n - s - 1
) # all possible permutations of players before and after
coalition_values = defaultdict(float, coalition_values)
# print coalition_values
shapley_values = dict()
for player in players:
if verbose:
print(('\n-------------------- {} ----------------------'.format(player)))
shapley_values[player] = 0.0
for s in map(_coalition_of, powerset(players - {player})):
shapley_values[player] += _shapley_unnormalized_weight(len(s)) * (
coalition_values[_coalition_of(list(set(s).union({player})))]
- coalition_values[s]
)
if verbose:
weight = _shapley_unnormalized_weight(len(s))
s_with_player = coalition_values[
_coalition_of(list(set(s).union({player})))
]
s_alone = coalition_values[s]
print(
(
'... contributed {} * ({} - {}) = {} \tto {} \t(running sum is {})'.format(
weight,
s_with_player,
s_alone,
weight * (s_with_player - s_alone),
_coalition_of(list(set(s).union({player}))),
shapley_values[player],
)
)
)
shapley_values[
player
] /= factorial_n # normalize according to all possible permutations
if normalize:
return _normalize_dict_values(shapley_values)
else:
return shapley_values
def _shapley_weight(s, n):
return (factorial(s) * factorial(n - s - 1)) / float(factorial(n))
def compute_shapley_values_from_coalition_values_01(
coalition_values, normalize=False, verbose=False
):
_complete_missing_coalitions_with_zero_valued_coalitions_in_place(coalition_values)
coalition_values = pd.DataFrame(
index=list(coalition_values.keys()),
data=list(coalition_values.values()),
columns=['value'],
)
se = Shapley_1(coalition_values, success='value')
se.change_type_of_d_index(tuple)
shapley_values = se.compute_shapley_values()
if normalize:
return _normalize_dict_values(shapley_values)
else:
return shapley_values
def compute_shapley_values_from_unit_valued_sequences(sequences, normalize=False):
dm = ShapleyDataModel()
dm.absorb_coalition_obs(sequences)
coalition_values = dm.coalition_values()
return compute_shapley_values_from_coalition_values(
coalition_values, normalize=normalize
)
# def compute_shapley_values_from_valued_sequences(sequence_and_value_dict, normalize=False):
# dm = ShapleyDataModel()
# dm.absorb_sequence_into_coalition_obs(sequence_and_value_dict)
# coalition_values = dm.coalition_values()
#
# return compute_shapley_values_from_coalition_values(coalition_values, normalize=normalize)
def _normalize_dict_values(d):
value_sum = float(np.sum(list(d.values())))
return {k: v / value_sum for k, v in list(d.items())}
def all_proper_subsets_iterator(superset):
return itertools.chain(
*map(
lambda subset_size: itertools.combinations(superset, subset_size),
list(range(1, len(superset))),
)
)
def all_subsets_or_eq_iterator(superset):
return itertools.chain(
*map(
lambda subset_size: itertools.combinations(superset, subset_size),
list(range(1, len(superset) + 1)),
)
)
def all_superset_iterator(subset, universe_set):
subset = set(subset)
remaining_set = set(universe_set).difference(subset)
return map(
lambda x: tuple(subset.union(x)), all_subsets_or_eq_iterator(remaining_set)
)
def _universe_set_of_keys_of_dict(d):
return set(itertools.chain(*list(d.keys())))
def _complete_missing_coalitions_with_zero_valued_coalitions_in_place(
coalition_values, universe_set=None
):
"""
complete coalition_contributions with missing combinations (assigning 0.0 to them)
"""
if universe_set is None:
universe_set = set(itertools.chain(*list(coalition_values.keys())))
superset = ShapleyDataModel.coalition_of(list(universe_set))
coalition_values[superset] = coalition_values.get(superset, 0.0)
for subset in map(_coalition_of, all_proper_subsets_iterator(universe_set)):
coalition_values[subset] = coalition_values.get(subset, 0.0)
class ShapleyDataModel(object):
def __init__(self, data=None, data_type=None):
"""
Inputs:
* data: data used to make the coalition values.
* data_type: type of data, either:
- 'coalition_obs': data is the counter (a dict) of coalition_obs directly
- 'coalition_obs_collection': a coalition_obs dict to be added to the existing
- 'item_collections': an iterator of sequences to absorbe to make the coalition_obs
"""
self.coalition_obs = Counter()
self.item_list = []
self._coalition_size_map = None
if data is not None:
# if data_type not given, determine
if data_type is None:
if isinstance(data, Counter):
data_type = 'coalition_obs'
else:
data_type = 'item_collections'
# according to type, process and set data
if data_type == 'coalition_obs':
self.coalition_obs = data
elif data_type == 'coalition_obs_collection':
self.absorb_coalition_obs(data)
elif data_type == 'item_collections':
for d in data:
self.absorb_sequence_into_coalition_obs(d)
@staticmethod
def coalition_of(iter_of_items):
return tuple(unique(iter_of_items))
def absorb_sequence_into_coalition_obs(self, seq):
"""
Updates the self.coalition_obs with the input coalition (a list of items)
"""
self.coalition_obs.update([self.coalition_of(seq)])
return self
def absorb_coalition(self, collection_of_items_of_single_coalition):
"""
Updates the self.coalition_obs with the input coalition (a list of items)
"""
raise DeprecationWarning(
'absorb_coalition is being deprecated. Use absorb_sequence_into_coalition_obs() instead'
)
self.coalition_obs.update(
[self.coalition_of(collection_of_items_of_single_coalition)]
)
return self
def absorb_coalition_obs(self, coalition_obs_data):
"""
Updates the self.coalition_obs with the input dict of {coalition: obs_value}
"""
try:
for coalition, value in coalition_obs_data.items():
self.absorb_coalition_and_value(coalition, value)
except AttributeError:
try:
for seq in coalition_obs_data:
self.absorb_sequence_into_coalition_obs(seq)
except TypeError:
for seq in coalition_obs_data:
self.absorb_coalition_and_value(seq['coalition'], seq['value'])
return self
# coalition_obs_dict = \
# {self.coalition_of(coalition): value for coalition, value in coalition_obs_dict.iteritems()}
# self.coalition_obs.update(coalition_obs_dict)
# self.absorb_coalition_and_value(coalition_obs_dict.keys()[0], coalition_obs_dict.values()[0])
def absorb_coalition_and_value(self, coalition, value):
"""
Updates the self.coalition_obs with the input dict of coalition: obs_value
"""
self.coalition_obs.update({self.coalition_of(coalition): value})
return self
def coalition_values(self, coalition_obs=None, verbose=False):
"""
Computes the coalition_values from coalition_obs (counts or other values).
To do this, we accumulate the counts of all subsets of each unique coalition.
"""
if coalition_obs is None:
coalition_obs = self.coalition_obs
coalition_contributions = Counter(coalition_obs)
if verbose:
print(coalition_contributions)
universe_set = set(self.mk_item_list(coalition_obs=coalition_obs))
for coalition, count in coalition_obs.items(): # for every coalition
# ... get all non-empty strict subsets of this coalition, and assign the mother coalition count
superset_counts = {
self.coalition_of(sub_coalition): count
for sub_coalition in all_superset_iterator(coalition, universe_set)
}
# ... update the coalition_values counter with these counts
coalition_contributions.update(superset_counts)
if verbose:
print(
(
' after {} contributions:\n {}'.format(
coalition, coalition_contributions
)
)
)
# # complete coalition_contributions with missing combinations (assigning 0.0 to them)
# _complete_missing_coalitions_with_zero_valued_coalitions_in_place(coalition_contributions)
return coalition_contributions
def coalition_size_map(self):
if not self._coalition_size_map:
self._coalition_size_map = defaultdict(dict)
for coalition, count in self.coalition_obs.items():
self._coalition_size_map[len(coalition)].update({coalition: count})
self._coalition_size_map = OrderedDict(
sorted(list(self._coalition_size_map.items()), key=lambda t: t[0])
)
return self._coalition_size_map
def mk_poset(self):
d = defaultdict(list)
_coalition_size_map = self.coalition_size_map()
coalition_sizes = sorted(_coalition_size_map.keys())
# TODO: Finish, if necessary
def mk_item_list(self, coalition_obs=None):
if coalition_obs is None:
coalition_obs = self.coalition_obs
item_list = unique(concatenate(list(coalition_obs.keys())))
self.item_list = item_list
return item_list
def _test_shapley_data_model():
list_of_coalitions = [
['A', 'B', 'C'],
['A', 'C', 'B'],
['B', 'A', 'C'],
['A', 'A', 'B', 'C'],
['C', 'A'],
['B', 'C'],
['C', 'B'],
['C', 'B'],
['A'],
]
dm = ShapleyDataModel() # initialize the data model
for coalition in list_of_coalitions: # count the coalitions
dm.absorb_sequence_into_coalition_obs(coalition)
assert dm.coalition_obs == Counter(
{('A', 'B', 'C'): 4, ('B', 'C'): 3, ('A',): 1, ('A', 'C'): 1}
), 'Unexpected result for dm.coalition_obs'
print('All good in _test_shapley_data_model')
def rand_shapley_values(items=3):
if isinstance(items, int):
items = ','.join(string.ascii_uppercase[:items]).split(',')
if isinstance(items, list):
items = {items[i]: 2 ** i for i in range(len(items))}
return items
class LinearValuedCoalitionGenerator(object):
def __init__(self, shapley_values=3, normalize=False):
shapley_values = shapley_values or 3
if not isinstance(shapley_values, dict):
shapley_values = rand_shapley_values(items=shapley_values)
self.shapley_values = shapley_values
if normalize:
self.shapley_values = _normalize_dict_values(self.shapley_values)
@staticmethod
def coalition_of(coalition):
return tuple(sort(coalition))
def coalition_value(self, coalition):
return sum([self.shapley_values[item] for item in coalition])
def rand_coalition(self):
return self.coalition_of(
rnd.sample(
list(self.shapley_values.keys()),
rnd.randint(1, len(self.shapley_values)),
)
)
def rand_coalition_obs(self):
coalition = self.rand_coalition()
return {coalition: self.coalition_value(coalition)}
def rand_coalition_obs_cum(self, n_draws=None):
n_draws = n_draws or len(self.shapley_values) / 2
coalition_obs = Counter()
for x in itertools.starmap(
self.rand_coalition_obs, itertools.repeat([], n_draws)
):
coalition_obs.update(x)
return coalition_obs
def coalition_values(self):
return {
self.coalition_of(coalition): self.coalition_value(coalition)
for coalition in all_subsets_of(
list(self.shapley_values.keys()), include_empty_set=False
)
}
# class ShapleyDataModel_old(object):
# def __init__(self, item_seperator=','):
# """
# Inputs:
# * item_seperator will be used to construct string hashes from lists.
# You should choose a character that never shows up in the items, or you'll get problems.
# Other attributes:
# * coalition_obs is a Counter of coalitions
# * coalition_values is also a Counter of coalitions, but it counts not only
# the coalition_obs, but all non-empty subsets of the latter.
# """
# self.coalition_obs = Counter()
# self.coalition_values = None
# self.item_seperator = item_seperator
# self.contribution_df = None
# self.item_list = []
#
# def absorb_coalition(self, coalition):
# """
# Updates the self.coalition_obs with the input coalition (a list of items)
# """
# self.coalition_obs.update([self._list_to_key(coalition)])
#
# def mk_coalition_size_map(self):
#
# d = defaultdict(list)
# for coalition, count in self.coalition_obs.iteritems():
# d[len(self._key_to_list(coalition))].append({coalition: count})
# return d
#
# def mk_coalition_contributions(self, verbose=False):
# """
# Computes the self.coalition_values attribute.
# To do this, we accumulate the counts of all subsets of each unique coalition.
# """
# # init with coalition_obs
# self.coalition_values = Counter(self.coalition_obs)
# if verbose:
# print(self.coalition_values)
# for coalition, count in self.coalition_obs.iteritems(): # for every coalition
# # get list corresponding to the key
# coalition = self._key_to_list(coalition)
# # get all non-empty strict subsets of this list,
# # and assign the mother coalition count
# subset_counts = \
# {self._list_to_key(sub_coalition): count
# for sub_coalition in all_proper_subsets_iterator(coalition)}
# # update the coalition_values counter with these counts
# self.coalition_values.update(subset_counts)
# if verbose:
# print(" after {} contributions:\n {}" \
# .format(coalition, self.coalition_values))
#
# def mk_item_list(self):
# self.item_list = list(unique(self.item_seperator.join(dm.coalition_obs.keys()) \
# .split(self.item_seperator)))
#
# # def all_supersets_iterator(self, subset):
#
# # subset = dm
#
# def mk_contribution_df(self):
# self._fill_counters()
# self.contribution_df = \
# pd.DataFrame(index=self.coalition_values.keys(), columns=dm.item_list)
# for coalition in self.contribution_df.index.values:
# print self._remove_and_remain_dicts(coalition)
# for rr in self._remove_and_remain_dicts(coalition):
# # the contribution of each item is the total contribution
# # minus what the contribution would be without this item
# contribution = \
# self.coalition_values[coalition] \
# - self.coalition_values[rr['remaining']]
# # enter this in the contribution_df
# self.contribution_df.loc[coalition, rr['removed']] = contribution
#
# def _fill_counters(self):
# """
# adds missing item combinations to counters, giving them 0 count
# """
# self.mk_item_list()
# zero_counts = {k: 0 for k in itertools.imap(self._list_to_key,
# all_proper_subsets_iterator(self.item_list))
# }
# self.coalition_obs.update(zero_counts)
# self.coalition_values.update(zero_counts)
#
# def _list_to_key(self, coalition):
# """
# Transforms a list of strings to a comma (or item_seperator) separated string
# of unique items of the input list.
# """
# return self.item_seperator.join(unique(coalition))
#
# def _key_to_list(self, coalition_key):
# """
# Inverse of _list_to_key:
# Returns a list from a character (item_seperator) seperated string of items.
# """
# return coalition_key.split(self.item_seperator)
#
# def _remove_and_remain_dicts(self, superset):
# """
# Returns a list of {removed, remaining} dicts listing all (keys of) superset - item
# sets for every item in superset.
# Returns an empty list if the input superset has only one element.
# Example:
# self._remove_and_remain_dicts('A,B,C')
# returns
# [{'remaining': 'B,C', 'removed': 'A'},
# {'remaining': 'A,B', 'removed': 'C'},
# {'remaining': 'A,C', 'removed': 'B'}]
# """
# superset = set(self._key_to_list(superset))
# if len(superset) > 1:
# return [{'removed': x,
# 'remaining': self._list_to_key(
# list(superset.difference(x)))}
# for x in superset]
# else:
# return list() # return empty list if superset has only one element
#
#
# def _test_shapley_data_model():
# list_of_coalitions = [['A', 'B', 'C'], ['A', 'C', 'B'], ['B', 'A', 'C'], ['A', 'A', 'B', 'C'],
# ['C', 'A'], ['B', 'C'], ['C', 'B'], ['C', 'B'], ['A']]
# dm = ShapleyDataModel_old() # initialize the data model
#
# for coalition in list_of_coalitions: # count the coalitions
# dm.absorb_coalition(coalition)
# assert dm.coalition_obs == Counter({'A,B,C': 4, 'B,C': 3, 'A': 1, 'A,C': 1}), \
# "Unexpected result for dm.coalition_obs"
#
# dm.mk_coalition_contributions()
# assert dm.coalition_values \
# == Counter({'C': 8, 'B': 7, 'B,C': 7, 'A': 6, 'A,C': 5, 'A,B,C': 4, 'A,B': 4}), \
# "Unexpected result for dm.coalition_values"
#
# print("All good in _test_shapley_data_model")
| {
"content_hash": "100014e3583cd2a368633889b98a7cec",
"timestamp": "",
"source": "github",
"line_count": 542,
"max_line_length": 107,
"avg_line_length": 38.108856088560884,
"alnum_prop": 0.5873638344226579,
"repo_name": "thorwhalen/ut",
"id": "26f7059e46a7ec7766a07933864665e87298aa13",
"size": "20655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ut/stats/bin_est/shapley.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1174"
},
{
"name": "Python",
"bytes": "2258941"
}
],
"symlink_target": ""
} |
import atexit
import fcntl
import grp
import logging as std_logging
from logging import handlers
import os
import pwd
import signal
import sys
from oslo_log import log as logging
from neutron.common import exceptions
from neutron.i18n import _LE, _LI
LOG = logging.getLogger(__name__)
DEVNULL = object()
def setuid(user_id_or_name):
try:
new_uid = int(user_id_or_name)
except (TypeError, ValueError):
new_uid = pwd.getpwnam(user_id_or_name).pw_uid
if new_uid != 0:
try:
os.setuid(new_uid)
except OSError:
msg = _('Failed to set uid %s') % new_uid
LOG.critical(msg)
raise exceptions.FailToDropPrivilegesExit(msg)
def setgid(group_id_or_name):
try:
new_gid = int(group_id_or_name)
except (TypeError, ValueError):
new_gid = grp.getgrnam(group_id_or_name).gr_gid
if new_gid != 0:
try:
os.setgid(new_gid)
except OSError:
msg = _('Failed to set gid %s') % new_gid
LOG.critical(msg)
raise exceptions.FailToDropPrivilegesExit(msg)
def unwatch_log():
"""Replace WatchedFileHandler handlers by FileHandler ones.
Neutron logging uses WatchedFileHandler handlers but they do not
support privileges drop, this method replaces them by FileHandler
handlers supporting privileges drop.
"""
log_root = logging.getLogger(None).logger
to_replace = [h for h in log_root.handlers
if isinstance(h, handlers.WatchedFileHandler)]
for handler in to_replace:
new_handler = std_logging.FileHandler(handler.baseFilename,
mode=handler.mode,
encoding=handler.encoding,
delay=handler.delay)
log_root.removeHandler(handler)
log_root.addHandler(new_handler)
def drop_privileges(user=None, group=None):
"""Drop privileges to user/group privileges."""
if user is None and group is None:
return
if os.geteuid() != 0:
msg = _('Root permissions are required to drop privileges.')
LOG.critical(msg)
raise exceptions.FailToDropPrivilegesExit(msg)
if group is not None:
try:
os.setgroups([])
except OSError:
msg = _('Failed to remove supplemental groups')
LOG.critical(msg)
raise exceptions.FailToDropPrivilegesExit(msg)
setgid(group)
if user is not None:
setuid(user)
LOG.info(_LI("Process runs with uid/gid: %(uid)s/%(gid)s"),
{'uid': os.getuid(), 'gid': os.getgid()})
class Pidfile(object):
def __init__(self, pidfile, procname, uuid=None):
self.pidfile = pidfile
self.procname = procname
self.uuid = uuid
try:
self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR)
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
LOG.exception(_LE("Error while handling pidfile: %s"), pidfile)
sys.exit(1)
def __str__(self):
return self.pidfile
def unlock(self):
if not not fcntl.flock(self.fd, fcntl.LOCK_UN):
raise IOError(_('Unable to unlock pid file'))
def write(self, pid):
os.ftruncate(self.fd, 0)
os.write(self.fd, "%d" % pid)
os.fsync(self.fd)
def read(self):
try:
pid = int(os.read(self.fd, 128))
os.lseek(self.fd, 0, os.SEEK_SET)
return pid
except ValueError:
return
def is_running(self):
pid = self.read()
if not pid:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
exec_out = f.readline()
return self.procname in exec_out and (not self.uuid or
self.uuid in exec_out)
except IOError:
return False
class Daemon(object):
"""A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin=DEVNULL, stdout=DEVNULL,
stderr=DEVNULL, procname='python', uuid=None,
user=None, group=None, watch_log=True):
"""Note: pidfile may be None."""
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.procname = procname
self.pidfile = (Pidfile(pidfile, procname, uuid)
if pidfile is not None else None)
self.user = user
self.group = group
self.watch_log = watch_log
def _fork(self):
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError:
LOG.exception(_LE('Fork failed'))
sys.exit(1)
def daemonize(self):
"""Daemonize process by doing Stevens double fork."""
# fork first time
self._fork()
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# fork second time
self._fork()
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
devnull = open(os.devnull, 'w+')
stdin = devnull if self.stdin is DEVNULL else self.stdin
stdout = devnull if self.stdout is DEVNULL else self.stdout
stderr = devnull if self.stderr is DEVNULL else self.stderr
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
if self.pidfile is not None:
# write pidfile
atexit.register(self.delete_pid)
signal.signal(signal.SIGTERM, self.handle_sigterm)
self.pidfile.write(os.getpid())
def delete_pid(self):
if self.pidfile is not None:
os.remove(str(self.pidfile))
def handle_sigterm(self, signum, frame):
sys.exit(0)
def start(self):
"""Start the daemon."""
if self.pidfile is not None and self.pidfile.is_running():
self.pidfile.unlock()
LOG.error(_LE('Pidfile %s already exist. Daemon already '
'running?'), self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def run(self):
"""Override this method and call super().run when subclassing Daemon.
start() will call this method after the process has daemonized.
"""
if not self.watch_log:
unwatch_log()
drop_privileges(self.user, self.group)
| {
"content_hash": "5e2c19a9fdcf33de6a191a6562cef4a5",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 77,
"avg_line_length": 30.01769911504425,
"alnum_prop": 0.5673643867924528,
"repo_name": "wenhuizhang/neutron",
"id": "670a239898b3a533d5649c4e29d0e191f7eee0ea",
"size": "7411",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/agent/linux/daemon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7385970"
},
{
"name": "Shell",
"bytes": "12912"
}
],
"symlink_target": ""
} |
import re
import requests
import json
import os
import vim
class GitRepo:
config = ""
user_name = ""
repo_name = ""
simplified_issues = []
def __init__(self, config_file):
f = open(config_file, 'r')
content = f.read()
self.parse_config(content)
f.close()
def parse_config(self, content):
# Read the config and parse
# TODO: parse and get the url
lines = content.split('\n')
remote_url = ""
# loop to find the line with remote origin
for i in range(len(lines)):
if lines[i] == '[remote "origin"]':
remote_url = lines[i + 1]
break
# print "remote url is " + remote_url
# TODO: cache and show api process
pattern = re.compile(r'.*github.com[:/](.*)/(.*).git')
match = pattern.match(remote_url)
user_name = match.group(1)
repo_name = match.group(2)
self.user_name = user_name
self.repo_name = repo_name
def issue_url(self):
# TODO: error handling
return "https://api.github.com/repos/" + self.user_name + "/" + self.repo_name + "/issues"
def get_issues(self):
if not self.get_cached_issues():
self.get_api_issues()
return self.simplified_issues
def get_cached_issues(self):
self.read_cache()
return self.simplified_issues
# TODO: add timeout to avoid network problems
def get_api_issues(self):
r = requests.get(self.issue_url())
issues = r.json()
simplified_issues = []
for issue in issues:
simplified_issues.append({
'title': issue['title'],
'number': str(issue['number'])
})
# print the issue names
for s in simplified_issues:
# TODO: use sprintf
print '#' + s['number'] + ' ' + s['title']
self.simplified_issues = simplified_issues
self.save_cache()
def read_cache(self):
try:
f = open(self.cache_name(), 'r')
data = f.read()
self.simplified_issues = json.loads(data)
return True
except IOError:
print 'no cache'
return False
def save_cache(self):
if not os.path.exists(self.cache_folder()):
os.makedirs(self.cache_folder())
data = json.dumps(self.simplified_issues)
f = open(self.cache_name(), 'w')
f.write(data)
f.close()
def cache_folder(self):
return os.path.dirname(os.path.realpath(__file__)) + '/cache'
def cache_name(self):
return self.cache_folder() + '/' + self.user_name + '_s_' + self.repo_name + '.cache'
def add_to_vim_list(list_name, issues):
for issue in issues:
vim.eval('add(' + list_name + ', "' + '#' + issue['number'] + ' ' + issue['title'] + '")')
def list_open_issues(config='.git/config'):
repo = GitRepo(config)
return repo.get_issues()
def update_issues(config='.git/config'):
repo = GitRepo(config)
repo.get_api_issues() | {
"content_hash": "9f3d0fb746bba644f6306e7a14180a17",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 98,
"avg_line_length": 28.981308411214954,
"alnum_prop": 0.5446630119316349,
"repo_name": "at15/issue-complete",
"id": "c72ab5516ede037c6759efe6737271eca1423615",
"size": "3101",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py2/ftplugin/COMMIT_EDITMSG/issue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10395"
},
{
"name": "Vim script",
"bytes": "1182"
}
],
"symlink_target": ""
} |
class Rec1:
"""Implementation of the application data type Rec1.
Generated by: EASy-Producer."""
intField: int
stringField: str
| {
"content_hash": "141e1771c1d5c66e340651b6641278ec",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 57,
"avg_line_length": 25,
"alnum_prop": 0.66,
"repo_name": "SSEHUB/EASyProducer",
"id": "6ec2a97fbeb2fae99518aa825f59dc6d4d086623",
"size": "150",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Plugins/EASy-Producer/ScenariosTest/testdata/real/IIP-Ecosphere/nov21/expected/SimpleMesh/SimpleMeshTestingApp/src/main/python/Rec1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AspectJ",
"bytes": "1184"
},
{
"name": "Batchfile",
"bytes": "6836"
},
{
"name": "GAP",
"bytes": "2073949"
},
{
"name": "HTML",
"bytes": "112226"
},
{
"name": "Java",
"bytes": "30149700"
},
{
"name": "Shell",
"bytes": "2416"
},
{
"name": "Velocity Template Language",
"bytes": "231811"
},
{
"name": "Xtend",
"bytes": "2141"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Questionnaire'
db.create_table('questionnaire_questionnaire', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=60)),
))
db.send_create_signal('questionnaire', ['Questionnaire'])
# Adding model 'UserQuestionnaire'
db.create_table('questionnaire_userquestionnaire', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='questionnaires', null=True, to=orm['auth.User'])),
('questionnaire', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='user_questionnaires', null=True, to=orm['questionnaire.Questionnaire'])),
))
db.send_create_signal('questionnaire', ['UserQuestionnaire'])
# Adding model 'QuestionContainer'
db.create_table('questionnaire_questioncontainer', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=60, null=True, blank=True)),
('questionnaire', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='containers', null=True, to=orm['questionnaire.Questionnaire'])),
('order', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('questionnaire', ['QuestionContainer'])
# Adding model 'Question'
db.create_table('questionnaire_question', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('question', self.gf('django.db.models.fields.CharField')(max_length=200)),
('choices', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)),
('answer_type', self.gf('django.db.models.fields.IntegerField')()),
('question_container', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='questions', null=True, to=orm['questionnaire.QuestionContainer'])),
))
db.send_create_signal('questionnaire', ['Question'])
# Adding model 'Answer'
db.create_table('questionnaire_answer', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('answer', self.gf('django.db.models.fields.CharField')(max_length=200)),
('question', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='answers', null=True, to=orm['questionnaire.Question'])),
('user_questionnaire', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='answers', null=True, to=orm['questionnaire.UserQuestionnaire'])),
))
db.send_create_signal('questionnaire', ['Answer'])
def backwards(self, orm):
# Deleting model 'Questionnaire'
db.delete_table('questionnaire_questionnaire')
# Deleting model 'UserQuestionnaire'
db.delete_table('questionnaire_userquestionnaire')
# Deleting model 'QuestionContainer'
db.delete_table('questionnaire_questioncontainer')
# Deleting model 'Question'
db.delete_table('questionnaire_question')
# Deleting model 'Answer'
db.delete_table('questionnaire_answer')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.answer': {
'Meta': {'object_name': 'Answer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'answers'", 'null': 'True', 'to': "orm['questionnaire.Question']"}),
'user_questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'answers'", 'null': 'True', 'to': "orm['questionnaire.UserQuestionnaire']"})
},
'questionnaire.question': {
'Meta': {'object_name': 'Question'},
'answer_type': ('django.db.models.fields.IntegerField', [], {}),
'choices': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'question_container': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'questions'", 'null': 'True', 'to': "orm['questionnaire.QuestionContainer']"})
},
'questionnaire.questioncontainer': {
'Meta': {'object_name': 'QuestionContainer'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'containers'", 'null': 'True', 'to': "orm['questionnaire.Questionnaire']"})
},
'questionnaire.questionnaire': {
'Meta': {'object_name': 'Questionnaire'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'questionnaire.userquestionnaire': {
'Meta': {'object_name': 'UserQuestionnaire'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_questionnaires'", 'null': 'True', 'to': "orm['questionnaire.Questionnaire']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'questionnaires'", 'null': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['questionnaire']
| {
"content_hash": "6a803674979162d83b131943fa8d1719",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 200,
"avg_line_length": 66.15277777777777,
"alnum_prop": 0.5892294772202393,
"repo_name": "pythonbyexample/PBE",
"id": "a9670698d172809b3e06f96eb9439a83ac973304",
"size": "9544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbe/questionnaire/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "251897"
},
{
"name": "CSS",
"bytes": "90956"
},
{
"name": "JavaScript",
"bytes": "85995"
},
{
"name": "Python",
"bytes": "1255167"
},
{
"name": "Shell",
"bytes": "82"
},
{
"name": "VimL",
"bytes": "46347"
}
],
"symlink_target": ""
} |
from nova.compute import manager as compute_manager
from nova.tests.functional.test_servers import ProviderUsageBaseTestCase
class TestServerResizeReschedule(ProviderUsageBaseTestCase):
"""Regression test for bug #1741125
During testing in the alternate host series, it was found that retries
when resizing an instance would always fail. This turned out to be true
even before alternate hosts for resize was introduced. Further
investigation showed that there was a race in call to retry the resize
and the revert of the original attempt.
This adds a functional regression test to show the failure. A follow up
patch with the fix will modify the test to show it passing again.
"""
compute_driver = 'fake.SmallFakeDriver'
def setUp(self):
super(TestServerResizeReschedule, self).setUp()
self.compute1 = self._start_compute(host='host1')
self.compute2 = self._start_compute(host='host2')
self.compute3 = self._start_compute(host='host3')
self.compute4 = self._start_compute(host='host4')
flavors = self.api.get_flavors()
self.flavor1 = flavors[0]
self.flavor2 = flavors[1]
if self.flavor1["disk"] > self.flavor2["disk"]:
# Make sure that flavor1 is smaller
self.flavor1, self.flavor2 = self.flavor2, self.flavor1
def test_resize_reschedule_uses_host_lists(self):
"""Test that when a resize attempt fails, the retry comes from the
supplied host_list, and does not call the scheduler.
"""
server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=self.flavor1['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks=[])
self.first_attempt = True
created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(self.api, created_server,
'ACTIVE')
actual_prep_resize = compute_manager.ComputeManager._prep_resize
def fake_prep_resize(*args, **kwargs):
if self.first_attempt:
# Only fail the first time
self.first_attempt = False
raise Exception('fake_prep_resize')
actual_prep_resize(*args, **kwargs)
# Yes this isn't great in a functional test, but it's simple.
self.stub_out('nova.compute.manager.ComputeManager._prep_resize',
fake_prep_resize)
server_uuid = server["id"]
data = {"resize": {"flavorRef": self.flavor2['id']}}
self.api.post_server_action(server_uuid, data)
server = self._wait_for_state_change(self.api, created_server,
'VERIFY_RESIZE')
self.assertEqual(self.flavor2['name'],
server['flavor']['original_name'])
| {
"content_hash": "00fb1caffb11ed22a018d244dd90652b",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 75,
"avg_line_length": 42.44117647058823,
"alnum_prop": 0.6406791406791407,
"repo_name": "phenoxim/nova",
"id": "2d361e20bd21756f5c36e4854a30308a776b2a0f",
"size": "3430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/functional/regressions/test_bug_1741125.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16289098"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "282020"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class AnglesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="anglesrc", parent_name="scattersmith.marker", **kwargs
):
super(AnglesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| {
"content_hash": "61fdcf1317c905306f3362dd8063a875",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 32.53846153846154,
"alnum_prop": 0.6099290780141844,
"repo_name": "plotly/plotly.py",
"id": "da1b87b65d2f7f445c4cad28cce700b853905467",
"size": "423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattersmith/marker/_anglesrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from sdc import hstr_ext
import llvmlite.binding as ll
from llvmlite import ir as lir
from collections import namedtuple
import operator
import numba
from numba import types
from numba.core import cgutils
from numba.core import ir_utils, ir
from numba.core.ir_utils import (guard, get_definition, find_callname, require,
add_offset_to_labels, find_topo_order, find_const)
from numba.core.typing import signature
from numba.core.typing.templates import infer_global, AbstractTemplate
from numba.core.imputils import lower_builtin
from numba.extending import overload, intrinsic, lower_cast
from numba.np import numpy_support
import numpy as np
import sdc
from sdc.str_ext import string_type, list_string_array_type
from sdc.str_arr_ext import string_array_type, num_total_chars, pre_alloc_string_array
from sdc.config import (config_use_parallel_overloads, config_inline_overloads)
from enum import Enum
import types as pytypes
from numba.extending import overload, overload_method, overload_attribute
from numba.extending import register_jitable, register_model
from numba.core.datamodel.registry import register_default
from functools import wraps
from itertools import filterfalse, chain
# int values for types to pass to C code
# XXX: _hpat_common.h
class CTypeEnum(Enum):
Int8 = 0
UInt8 = 1
Int32 = 2
UInt32 = 3
Int64 = 4
UInt64 = 7
Float32 = 5
Float64 = 6
Int16 = 8
UInt16 = 9
_numba_to_c_type_map = {
types.int8: CTypeEnum.Int8.value,
types.uint8: CTypeEnum.UInt8.value,
types.int32: CTypeEnum.Int32.value,
types.uint32: CTypeEnum.UInt32.value,
types.int64: CTypeEnum.Int64.value,
types.uint64: CTypeEnum.UInt64.value,
types.float32: CTypeEnum.Float32.value,
types.float64: CTypeEnum.Float64.value,
types.NPDatetime('ns'): CTypeEnum.UInt64.value,
# XXX: Numpy's bool array uses a byte for each value but regular booleans
# are not bytes
# TODO: handle boolean scalars properly
types.bool_: CTypeEnum.UInt8.value,
types.int16: CTypeEnum.Int16.value,
types.uint16: CTypeEnum.UInt16.value,
}
def min_dtype_int_val(dtype):
numpy_dtype = numpy_support.as_dtype(dtype)
return np.iinfo(numpy_dtype).min
def max_dtype_int_val(dtype):
numpy_dtype = numpy_support.as_dtype(dtype)
return np.iinfo(numpy_dtype).max
def min_dtype_float_val(dtype):
numpy_dtype = numpy_support.as_dtype(dtype)
return np.finfo(numpy_dtype).min
def max_dtype_float_val(dtype):
numpy_dtype = numpy_support.as_dtype(dtype)
return np.finfo(numpy_dtype).max
# silence Numba error messages for now
# TODO: customize through @sdc.jit
numba.core.errors.error_extras = {
'unsupported_error': '',
'typing': '',
'reportable': '',
'interpreter': '',
'constant_inference': ''}
# sentinel value representing non-constant values
class NotConstant:
pass
NOT_CONSTANT = NotConstant()
ReplaceFunc = namedtuple("ReplaceFunc",
["func", "arg_types", "args", "glbls", "pre_nodes"])
np_alloc_callnames = ('empty', 'zeros', 'ones', 'full')
def unliteral_all(args):
return tuple(types.unliteral(a) for a in args)
def get_constant(func_ir, var, default=NOT_CONSTANT):
def_node = guard(get_definition, func_ir, var)
if def_node is None:
return default
if isinstance(def_node, ir.Const):
return def_node.value
# call recursively if variable assignment
if isinstance(def_node, ir.Var):
return get_constant(func_ir, def_node, default)
return default
def inline_new_blocks(func_ir, block, i, callee_blocks, work_list=None):
# adopted from inline_closure_call
scope = block.scope
instr = block.body[i]
# 1. relabel callee_ir by adding an offset
callee_blocks = add_offset_to_labels(callee_blocks, ir_utils._max_label + 1)
callee_blocks = ir_utils.simplify_CFG(callee_blocks)
max_label = max(callee_blocks.keys())
# reset globals in ir_utils before we use it
ir_utils._max_label = max_label
topo_order = find_topo_order(callee_blocks)
# 5. split caller blocks into two
new_blocks = []
new_block = ir.Block(scope, block.loc)
new_block.body = block.body[i + 1:]
new_label = ir_utils.next_label()
func_ir.blocks[new_label] = new_block
new_blocks.append((new_label, new_block))
block.body = block.body[:i]
min_label = topo_order[0]
block.body.append(ir.Jump(min_label, instr.loc))
# 6. replace Return with assignment to LHS
numba.core.inline_closurecall._replace_returns(callee_blocks, instr.target, new_label)
# remove the old definition of instr.target too
if (instr.target.name in func_ir._definitions):
func_ir._definitions[instr.target.name] = []
# 7. insert all new blocks, and add back definitions
for label in topo_order:
# block scope must point to parent's
block = callee_blocks[label]
block.scope = scope
numba.core.inline_closurecall._add_definitions(func_ir, block)
func_ir.blocks[label] = block
new_blocks.append((label, block))
if work_list is not None:
for block in new_blocks:
work_list.append(block)
return callee_blocks
def is_alloc_call(func_var, call_table):
"""
return true if func_var represents an array creation call
"""
assert func_var in call_table
call_list = call_table[func_var]
return ((len(call_list) == 2 and call_list[1] == np
and call_list[0] in ['empty', 'zeros', 'ones', 'full'])
or call_list == [numba.unsafe.ndarray.empty_inferred])
def is_alloc_callname(func_name, mod_name):
"""
return true if function represents an array creation call
"""
return isinstance(mod_name, str) and ((mod_name == 'numpy'
and func_name in np_alloc_callnames)
or (func_name == 'empty_inferred'
and mod_name in ('numba.extending', 'numba.unsafe.ndarray'))
or (func_name == 'pre_alloc_string_array'
and mod_name == 'sdc.str_arr_ext')
or (func_name in ('alloc_str_list', 'alloc_list_list_str')
and mod_name == 'sdc.str_ext'))
def find_build_tuple(func_ir, var):
"""Check if a variable is constructed via build_tuple
and return the sequence or raise GuardException otherwise.
"""
# variable or variable name
require(isinstance(var, (ir.Var, str)))
var_def = get_definition(func_ir, var)
require(isinstance(var_def, ir.Expr))
require(var_def.op == 'build_tuple')
return var_def.items
def cprint(*s):
print(*s)
@infer_global(cprint)
class CprintInfer(AbstractTemplate):
def generic(self, args, kws):
assert not kws
return signature(types.none, *unliteral_all(args))
typ_to_format = {
types.int32: 'd',
types.uint32: 'u',
types.int64: 'lld',
types.uint64: 'llu',
types.float32: 'f',
types.float64: 'lf',
}
ll.add_symbol('print_str', hstr_ext.print_str)
ll.add_symbol('print_char', hstr_ext.print_char)
@lower_builtin(cprint, types.VarArg(types.Any))
def cprint_lower(context, builder, sig, args):
from sdc.str_ext import string_type, char_type
for i, val in enumerate(args):
typ = sig.args[i]
if typ == string_type:
fnty = lir.FunctionType(
lir.VoidType(), [lir.IntType(8).as_pointer()])
fn = cgutils.get_or_insert_function(builder.module, fnty, name="print_str")
builder.call(fn, [val])
cgutils.printf(builder, " ")
continue
if typ == char_type:
fnty = lir.FunctionType(
lir.VoidType(), [lir.IntType(8)])
fn = cgutils.get_or_insert_function(builder.module, fnty, name="print_char")
builder.call(fn, [val])
cgutils.printf(builder, " ")
continue
if isinstance(typ, types.ArrayCTypes):
cgutils.printf(builder, "%p ", val)
continue
format_str = typ_to_format[typ]
cgutils.printf(builder, "%{} ".format(format_str), val)
cgutils.printf(builder, "\n")
return context.get_dummy_value()
def print_dist(d):
from sdc.distributed_analysis import Distribution
if d == Distribution.REP:
return "REP"
if d == Distribution.OneD:
return "1D_Block"
if d == Distribution.OneD_Var:
return "1D_Block_Var"
if d == Distribution.Thread:
return "Multi-thread"
if d == Distribution.TwoD:
return "2D_Block"
def distribution_report():
import sdc.distributed
if sdc.distributed.dist_analysis is None:
return
print("Array distributions:")
for arr, dist in sdc.distributed.dist_analysis.array_dists.items():
print(" {0:20} {1}".format(arr, print_dist(dist)))
print("\nParfor distributions:")
for p, dist in sdc.distributed.dist_analysis.parfor_dists.items():
print(" {0:<20} {1}".format(p, print_dist(dist)))
def is_whole_slice(typemap, func_ir, var, accept_stride=False):
""" return True if var can be determined to be a whole slice """
require(typemap[var.name] == types.slice2_type
or (accept_stride and typemap[var.name] == types.slice3_type))
call_expr = get_definition(func_ir, var)
require(isinstance(call_expr, ir.Expr) and call_expr.op == 'call')
assert (len(call_expr.args) == 2
or (accept_stride and len(call_expr.args) == 3))
assert find_callname(func_ir, call_expr) == ('slice', 'builtins')
arg0_def = get_definition(func_ir, call_expr.args[0])
arg1_def = get_definition(func_ir, call_expr.args[1])
require(isinstance(arg0_def, ir.Const) and arg0_def.value is None)
require(isinstance(arg1_def, ir.Const) and arg1_def.value is None)
return True
def is_const_slice(typemap, func_ir, var, accept_stride=False):
""" return True if var can be determined to be a constant size slice """
require(typemap[var.name] == types.slice2_type
or (accept_stride and typemap[var.name] == types.slice3_type))
call_expr = get_definition(func_ir, var)
require(isinstance(call_expr, ir.Expr) and call_expr.op == 'call')
assert (len(call_expr.args) == 2
or (accept_stride and len(call_expr.args) == 3))
assert find_callname(func_ir, call_expr) == ('slice', 'builtins')
arg0_def = get_definition(func_ir, call_expr.args[0])
require(isinstance(arg0_def, ir.Const) and arg0_def.value is None)
size_const = find_const(func_ir, call_expr.args[1])
require(isinstance(size_const, int))
return True
def get_slice_step(typemap, func_ir, var):
require(typemap[var.name] == types.slice3_type)
call_expr = get_definition(func_ir, var)
require(isinstance(call_expr, ir.Expr) and call_expr.op == 'call')
assert len(call_expr.args) == 3
return call_expr.args[2]
def is_array(typemap, varname):
return (varname in typemap
and (is_np_array(typemap, varname)
or typemap[varname] in (string_array_type, list_string_array_type,
sdc.hiframes.split_impl.string_array_split_view_type)
or isinstance(typemap[varname], sdc.hiframes.pd_series_ext.SeriesType)))
def is_np_array(typemap, varname):
return (varname in typemap
and isinstance(typemap[varname], types.Array))
def is_array_container(typemap, varname):
return (varname in typemap
and isinstance(typemap[varname], (types.List, types.Set))
and (isinstance(typemap[varname].dtype, types.Array)
or typemap[varname].dtype == string_array_type
or isinstance(typemap[varname].dtype,
sdc.hiframes.pd_series_ext.SeriesType)))
# converts an iterable to array, similar to np.array, but can support
# other things like StringArray
# TODO: other types like datetime?
def to_array(A):
return np.array(A)
@overload(to_array)
def to_array_overload(A):
# try regular np.array and return it if it works
def to_array_impl(A):
return np.array(A)
try:
numba.njit(to_array_impl).get_call_template((A,), {})
return to_array_impl
except BaseException:
pass # should be handled elsewhere (e.g. Set)
def empty_like_type(n, arr):
return np.empty(n, arr.dtype)
@overload(empty_like_type)
def empty_like_type_overload(n, arr):
if isinstance(arr, sdc.hiframes.pd_categorical_ext.CategoricalArray):
from sdc.hiframes.pd_categorical_ext import fix_cat_array_type
return lambda n, arr: fix_cat_array_type(np.empty(n, arr.dtype))
if isinstance(arr, types.Array):
return lambda n, arr: np.empty(n, arr.dtype)
if isinstance(arr, types.List) and arr.dtype == string_type:
def empty_like_type_str_list(n, arr):
return [''] * n
return empty_like_type_str_list
# string array buffer for join
assert arr == string_array_type
def empty_like_type_str_arr(n, arr):
# average character heuristic
avg_chars = 20 # heuristic
if len(arr) != 0:
avg_chars = num_total_chars(arr) // len(arr)
return pre_alloc_string_array(n, n * avg_chars)
return empty_like_type_str_arr
def alloc_arr_tup(n, arr_tup, init_vals=()):
arrs = []
for in_arr in arr_tup:
arrs.append(np.empty(n, in_arr.dtype))
return tuple(arrs)
@overload(alloc_arr_tup)
def alloc_arr_tup_overload(n, data, init_vals=()):
count = data.count
allocs = ','.join(["empty_like_type(n, data[{}])".format(i)
for i in range(count)])
if init_vals is not ():
# TODO check for numeric value
allocs = ','.join(["np.full(n, init_vals[{}], data[{}].dtype)".format(i, i)
for i in range(count)])
func_text = "def f(n, data, init_vals=()):\n"
func_text += " return ({}{})\n".format(allocs,
"," if count == 1 else "") # single value needs comma to become tuple
loc_vars = {}
exec(func_text, {'empty_like_type': empty_like_type, 'np': np}, loc_vars)
alloc_impl = loc_vars['f']
return alloc_impl
@intrinsic
def get_ctypes_ptr(typingctx, ctypes_typ=None):
assert isinstance(ctypes_typ, types.ArrayCTypes)
def codegen(context, builder, sig, args):
in_carr, = args
ctinfo = context.make_helper(builder, sig.args[0], in_carr)
return ctinfo.data
return types.voidptr(ctypes_typ), codegen
def remove_return_from_block(last_block):
# remove const none, cast, return nodes
assert isinstance(last_block.body[-1], ir.Return)
last_block.body.pop()
assert (isinstance(last_block.body[-1], ir.Assign)
and isinstance(last_block.body[-1].value, ir.Expr)
and last_block.body[-1].value.op == 'cast')
last_block.body.pop()
if (isinstance(last_block.body[-1], ir.Assign)
and isinstance(last_block.body[-1].value, ir.Const)
and last_block.body[-1].value.value is None):
last_block.body.pop()
def include_new_blocks(blocks, new_blocks, label, new_body, remove_non_return=True, work_list=None, func_ir=None):
inner_blocks = add_offset_to_labels(new_blocks, ir_utils._max_label + 1)
blocks.update(inner_blocks)
ir_utils._max_label = max(blocks.keys())
scope = blocks[label].scope
loc = blocks[label].loc
inner_topo_order = find_topo_order(inner_blocks)
inner_first_label = inner_topo_order[0]
inner_last_label = inner_topo_order[-1]
if remove_non_return:
remove_return_from_block(inner_blocks[inner_last_label])
new_body.append(ir.Jump(inner_first_label, loc))
blocks[label].body = new_body
label = ir_utils.next_label()
blocks[label] = ir.Block(scope, loc)
if remove_non_return:
inner_blocks[inner_last_label].body.append(ir.Jump(label, loc))
# new_body.clear()
if work_list is not None:
topo_order = find_topo_order(inner_blocks)
for _label in topo_order:
block = inner_blocks[_label]
block.scope = scope
numba.core.inline_closurecall._add_definitions(func_ir, block)
work_list.append((_label, block))
return label
def find_str_const(func_ir, var):
"""Check if a variable can be inferred as a string constant, and return
the constant value, or raise GuardException otherwise.
"""
require(isinstance(var, ir.Var))
var_def = get_definition(func_ir, var)
if isinstance(var_def, ir.Const):
val = var_def.value
require(isinstance(val, str))
return val
# only add supported (s1+s2), TODO: extend to other expressions
require(isinstance(var_def, ir.Expr) and var_def.op == 'binop'
and var_def.fn == operator.add)
arg1 = find_str_const(func_ir, var_def.lhs)
arg2 = find_str_const(func_ir, var_def.rhs)
return arg1 + arg2
def gen_getitem(out_var, in_var, ind, calltypes, nodes):
loc = out_var.loc
getitem = ir.Expr.static_getitem(in_var, ind, None, loc)
calltypes[getitem] = None
nodes.append(ir.Assign(getitem, out_var, loc))
def sanitize_varname(varname):
return varname.replace('$', '_').replace('.', '_')
def is_call_assign(stmt):
return (isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == 'call')
def is_call(expr):
return (isinstance(expr, ir.Expr)
and expr.op == 'call')
def is_var_assign(inst):
return isinstance(inst, ir.Assign) and isinstance(inst.value, ir.Var)
def is_assign(inst):
return isinstance(inst, ir.Assign)
def dump_node_list(node_list):
for n in node_list:
print(" ", n)
def debug_prints():
return numba.config.DEBUG_ARRAY_OPT == 1
def update_globals(func, glbls):
if isinstance(func, pytypes.FunctionType):
func.__globals__.update(glbls)
def update_jit_options(jit_options, parallel, config_flag):
jit_options = jit_options.copy()
if parallel is not None:
if 'parallel' not in jit_options:
jit_options.update({'parallel': parallel})
else:
raise ValueError('Either jit_options "parallel" or parallel parameter could be specified at the same time')
if 'parallel' not in jit_options:
jit_options = jit_options.copy()
jit_options.update({'parallel': config_use_parallel_overloads})
return jit_options
def sdc_overload(func, jit_options={}, parallel=None, strict=True, inline=None, prefer_literal=True):
jit_options = update_jit_options(jit_options, parallel, config_use_parallel_overloads)
if inline is None:
inline = 'always' if config_inline_overloads else 'never'
return overload(func, jit_options=jit_options, strict=strict, inline=inline, prefer_literal=prefer_literal)
def patched_register_jitable(*args, **kwargs):
"""
register_jitable patched according to this:
https://github.com/numba/numba/issues/5142#issuecomment-579704346
"""
def wrap(fn):
# It is just a wrapper for @overload
inline = kwargs.pop('inline', 'never')
@overload(fn, jit_options=kwargs, inline=inline, strict=False)
def ov_wrap(*args, **kwargs):
return fn
return fn
if kwargs:
return wrap
else:
return wrap(*args)
def sdc_register_jitable(*args, **kwargs):
updated_kwargs = kwargs.copy()
updated_kwargs['parallel'] = updated_kwargs.get('parallel', config_use_parallel_overloads)
updated_kwargs['inline'] = updated_kwargs.get('inline', 'always' if config_inline_overloads else 'never')
def wrap(fn):
return patched_register_jitable(**updated_kwargs)(fn)
if kwargs:
return wrap
else:
return wrap(*args)
def sdc_overload_method(typ, name, jit_options={}, parallel=None, strict=True, inline=None, prefer_literal=True):
jit_options = update_jit_options(jit_options, parallel, config_use_parallel_overloads)
if inline is None:
inline = 'always' if config_inline_overloads else 'never'
return overload_method(
typ, name, jit_options=jit_options, strict=strict, inline=inline, prefer_literal=prefer_literal
)
def sdc_overload_attribute(typ, name, jit_options={}, parallel=None, strict=True, inline=None, prefer_literal=True):
jit_options = update_jit_options(jit_options, parallel, config_use_parallel_overloads)
if inline is None:
inline = 'always' if config_inline_overloads else 'never'
return overload_attribute(
typ, name, jit_options=jit_options, strict=strict, inline=inline, prefer_literal=prefer_literal
)
def print_compile_times(disp, level, func_names=None):
def print_times(cres, args):
print(f'Function: {cres.fndesc.unique_name}')
pad = ' ' * 2
if level:
print(f'{pad * 1}Args: {args}')
times = cres.metadata['pipeline_times']
for pipeline, pass_times in times.items():
print(f'{pad * 1}Pipeline: {pipeline}')
if level:
for name, timings in pass_times.items():
print(f'{pad * 2}{name:50s}{timings.run:.13f}')
pipeline_total = sum(t.init + t.run + t.finalize for t in pass_times.values())
print(f'{pad * 1}Time: {pipeline_total}\n')
# print times for compiled function indicated by disp
for args, cres in disp.overloads.items():
print_times(cres, args)
def has_no_cache(ovld):
return not (getattr(ovld, '_impl_cache', False) and ovld._impl_cache)
known_funcs = disp.typingctx._functions
all_templs = chain.from_iterable(known_funcs.values())
compiled_templs = filterfalse(has_no_cache, all_templs)
# filter only function names that are in the func_names list
if func_names:
compiled_templs = filterfalse(
lambda x: not any(f in str(x) for f in func_names),
compiled_templs
)
dispatchers_list = []
for template in compiled_templs:
tmpl_cached_impls = template._impl_cache.values()
dispatchers_list.extend(tmpl_cached_impls)
for impl_cache in set(dispatchers_list):
# impl_cache is usually a tuple of format (dispatcher, args)
# if not just skip these entires
if not (isinstance(impl_cache, tuple)
and len(impl_cache) == 2
and isinstance(impl_cache[0], type(disp))):
continue
fndisp, args = impl_cache
if not getattr(fndisp, 'overloads', False):
continue
cres, = list(fndisp.overloads.values())
print_times(cres, args)
| {
"content_hash": "568d59d6505ff72bdd37e0fbdb0ae631",
"timestamp": "",
"source": "github",
"line_count": 675,
"max_line_length": 119,
"avg_line_length": 34.15259259259259,
"alnum_prop": 0.6374007721337787,
"repo_name": "IntelLabs/hpat",
"id": "7a4c54deadfbc8fdb871fd19482d7b1e726108b1",
"size": "24567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdc/utilities/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2045"
},
{
"name": "C",
"bytes": "5555"
},
{
"name": "C++",
"bytes": "306500"
},
{
"name": "CMake",
"bytes": "933"
},
{
"name": "Dockerfile",
"bytes": "4859"
},
{
"name": "Makefile",
"bytes": "517"
},
{
"name": "Python",
"bytes": "1552168"
},
{
"name": "Shell",
"bytes": "4347"
}
],
"symlink_target": ""
} |
class WrapCallable(object):
"""
With python3 the unittest framework calls the __getattr__ method of the callable
with "__name__" parameter and this cases problems if the callable is something that
implements __getattr__ but doesn't expect "__name__" as a parameter there.
"""
def __init__(self, callable_):
self.callable = callable_
def __call__(self, *args, **kwargs):
return self.callable(*args, **kwargs)
| {
"content_hash": "5ac43d02da39cd9351ec2b01a57c3907",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 87,
"avg_line_length": 41.18181818181818,
"alnum_prop": 0.6490066225165563,
"repo_name": "pasztorpisti/json-cfg",
"id": "e390845d4e505a7fd48d34b33208f4275a91b7a2",
"size": "455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "101660"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = '2degrees API client'
copyright = '2015, 2degrees'
author = '2degrees'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0a1'
# The full version, including alpha/beta/rc tags.
release = '1.0a1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = '2degreesAPIclientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '2degreesAPIclient.tex', '2degrees API client Documentation',
'2degrees', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, '2degreesapiclient', '2degrees API client Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, '2degreesAPIclient', '2degrees API client Documentation',
author, '2degreesAPIclient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| {
"content_hash": "f79ff4fb46b591d4e578ec4812943cb6",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 79,
"avg_line_length": 32.50359712230216,
"alnum_prop": 0.7076139884904825,
"repo_name": "2degrees/twapi-connection",
"id": "64b67604090dfd279cd275d35bfd66380403e076",
"size": "9491",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "45039"
}
],
"symlink_target": ""
} |
import voodoo.gen.protocols.BaseSocket.Messages as Messages
import sys
def _retrieve_class(complete_class_name):
copy_of_complete_class_name = complete_class_name
while copy_of_complete_class_name != '' and not sys.modules.has_key(copy_of_complete_class_name):
copy_of_complete_class_name = copy_of_complete_class_name[:copy_of_complete_class_name.rfind('.')]
if copy_of_complete_class_name == '':
return None
module = sys.modules[copy_of_complete_class_name]
class_name = complete_class_name[len(copy_of_complete_class_name)+1:]
if not hasattr(module,class_name):
return None
return getattr(module,class_name)
# Stubs of client methods to dynamically generate
# All of them must have the same name format:
#
# _prefix_stub
#
# Where prefix can be:
# "": The method does what it must do (its action)
# "call_begin" Designed for asynchronous communication (not used for the moment)
# "call_is_running" Designed for asynchronous communication (not used for the moment)
# "call_get_result" Designed for asynchronous communication (not used for the moment)
def _generate_stub(METHOD_NAME):
# TODO: The docstring related to the protocol is always BaseSocket
def _stub(self, *parameters, **kparameters):
""" Dynamically generated method. Protocol: BaseSocket.
Method name: METHOD_NAME. Documentation: DOCUMENTATION """
formatter = Messages.MessageFormatter()
message_to = formatter.pack_call(METHOD_NAME, *parameters, **kparameters)
self._server.connect()
try:
self._server.send(message_to)
message_from = self._server.receive()
finally:
self._server.disconnect()
result = formatter.unpack_result(message_from)
return result.answer()
return _stub
def _generate_call_begin_stub(METHOD_NAME):
# Not used for the moment but requierd by ClientSkel
# TODO: The docstring related to the protocol is always BaseSocket
def _call_begin_stub(self,*parameters,**kparameters):
""" Dynamically generated method. Protocol: BaseSocket.
Method name: METHOD_NAME. Documentation: DOCUMENTATION """
pass
return _call_begin_stub
def _generate_call_is_running_stub(METHOD_NAME):
# Not used for the moment by ClientSkel
# TODO: The docstring related to the protocol is always BaseSocket
def _call_is_running_stub(self,server_key,block):
""" Dynamically generated method. Protocol: BaseSocket.
Method name: METHOD_NAME. Documentation: DOCUMENTATION """
pass
return _call_is_running_stub
def _generate_call_get_result_stub(METHOD_NAME):
# Not used for the moment by ClientSkel
# TODO: The docstring related to the protocol is always BaseSocket
def _call_get_result_stub(self,server_key):
""" Dynamically generated method. Protocol: BaseSocket.
Method name: METHOD_NAME. Documentation: DOCUMENTATION """
pass
return _call_get_result_stub
# Tuple with the stub pointers of the stubs to generate
stubs = (
_generate_stub,
_generate_call_begin_stub,
_generate_call_is_running_stub,
_generate_call_get_result_stub
)
def generate_base(methods, ClientSocket):
# Adding properly the testing method to check availability
if isinstance(methods, dict):
all_methods = methods.keys()
else:
all_methods = list(methods[:])
all_methods.append('test_me')
# Generating stubs dinamically
for method_name in all_methods:
# Each method can have many stubs (with different prefixes)
for stub in stubs:
func = stub(method_name)
# Setting docstring
func.__doc__ = (func.__doc__ if func.__doc__ is not None else '').replace('METHOD_NAME', method_name)
if isinstance(all_methods, dict):
func.__doc__ = (func.__doc__ if func.__doc__ is not None else '').replace('DOCUMENTATION', all_methods[method_name])
# Taking "prefix_" from "_prefix_stub"
stub_prefix = stub.func_name[len('_generate_'):]
stub_prefix = stub_prefix[:stub_prefix.rfind('stub')]
func_name = stub_prefix + method_name
func.func_name = func_name
setattr(ClientSocket, func_name, func)
return ClientSocket
| {
"content_hash": "e7a3342a0b324b54637add519cf043cb",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 132,
"avg_line_length": 41.60377358490566,
"alnum_prop": 0.6598639455782312,
"repo_name": "ganeshgore/myremolab",
"id": "e3e3500f736b86bc030f94160f54a13756e0e79b",
"size": "4795",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/src/voodoo/gen/protocols/BaseSocket/ClientBaseSocket.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "4785"
},
{
"name": "C#",
"bytes": "265761"
},
{
"name": "CSS",
"bytes": "39653"
},
{
"name": "Java",
"bytes": "689284"
},
{
"name": "JavaScript",
"bytes": "74198"
},
{
"name": "PHP",
"bytes": "97324"
},
{
"name": "Python",
"bytes": "5335681"
},
{
"name": "Shell",
"bytes": "794"
},
{
"name": "VHDL",
"bytes": "1372"
}
],
"symlink_target": ""
} |
import re
for test_string in ['555-1212', 'ILL-EGAL']:
if re.match(r'^\d{3}-\d{4}$', test_string):
print test_string, 'is a valid US local phone number'
else:
print test_string, 'rejected'
| {
"content_hash": "dc3d911378f4d06232b49c7cc07f0be4",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 61,
"avg_line_length": 35.5,
"alnum_prop": 0.6056338028169014,
"repo_name": "lizhuoli1126/MarkdownScript",
"id": "6ce9825f98c052096e753cc0725740fcca8dfb59",
"size": "272",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Oneliner/tests/6lines.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95601"
},
{
"name": "Shell",
"bytes": "44"
}
],
"symlink_target": ""
} |
import os
import unittest
from document import DjangoDocument
class Talk(DjangoDocument):
structure = {'topic': unicode}
class CrazyOne(DjangoDocument):
class Meta:
verbose_name = u"Crazy One"
structure = {'name': unicode}
class CrazyTwo(DjangoDocument):
class Meta:
verbose_name = u"Crazy Two"
verbose_name_plural = u"Crazies Two"
structure = {'names': unicode}
class LighteningTalk(Talk):
structure = {'has_slides': bool}
default_values = {'has_slides': True}
class DocumentTest(unittest.TestCase):
def setUp(self):
from shortcut import connection
connection.register([Talk, CrazyOne, CrazyTwo, LighteningTalk])
self.connection = connection
self.database = connection['django_mongokit_test_database']
def tearDown(self):
self.connection.drop_database('django_mongokit_test_database')
def test_meta_creation(self):
"""the class Talk define above should have been given an attribute
'_meta' by the metaclass that registers it"""
klass = Talk
self.assertTrue(klass._meta)
self.assertFalse(hasattr(klass._meta, 'abstract'))
self.assertEqual(klass._meta.verbose_name, u"Talk")
self.assertEqual(klass._meta.verbose_name_plural, u"Talks")
self.assertEqual(klass._meta.app_label, u"__main__") # test runner
self.assertEqual(klass._meta.model_name, u"Talk")
self.assertEqual(klass._meta.pk.attname, '_id')
repr_ = repr(klass._meta)
# <Meta Talk: 'Talk', 'Talks'>
self.assertEqual(repr_.count('Talk'), 3)
self.assertEqual(repr_.count('Talks'), 1)
def test_meta_creation_overwriting_verbose_name(self):
klass = CrazyOne
self.assertTrue(klass._meta)
self.assertEqual(klass._meta.verbose_name, u"Crazy One")
self.assertEqual(klass._meta.verbose_name_plural, u"Crazy Ones")
self.assertEqual(klass._meta.model_name, u"CrazyOne")
def test_meta_creation_overwriting_verbose_name_and_plural(self):
klass = CrazyTwo
self.assertTrue(klass._meta)
self.assertEqual(klass._meta.verbose_name, u"Crazy Two")
self.assertEqual(klass._meta.verbose_name_plural, u"Crazies Two")
self.assertEqual(klass._meta.model_name, u"CrazyTwo")
def test_subclassed_document(self):
klass = LighteningTalk
self.assertTrue(klass._meta)
self.assertEqual(klass._meta.verbose_name, u"Lightening Talk")
self.assertEqual(klass._meta.verbose_name_plural, u"Lightening Talks")
self.assertEqual(klass._meta.model_name, u"LighteningTalk")
def test_pk_shortcut(self):
# create an instance an expect to get the ID as a string
collection = self.database.talks
talk = collection.Talk()
self.assertRaises(KeyError, lambda t: t.pk, talk)
talk['topic'] = u"Something"
talk.save()
self.assertTrue(talk['_id'])
self.assertTrue(talk.pk)
self.assertTrue(isinstance(talk.pk, str))
self.assertEqual(talk.pk, str(talk['_id']))
def setter(inst, forced_id):
inst.pk = forced_id # will fail
self.assertRaises(ValueError, setter, talk, 'bla')
def test_signals(self):
_fired = []
def trigger_pre_delete(sender, instance, **__):
if sender is LighteningTalk:
if isinstance(instance, LighteningTalk):
_fired.append('pre_delete')
def trigger_post_delete(sender, instance, **__):
if sender is LighteningTalk:
if isinstance(instance, LighteningTalk):
_fired.append('post_delete')
def trigger_pre_save(sender, instance, raw=None, **__):
if sender is LighteningTalk:
if isinstance(instance, LighteningTalk):
if not getattr(instance, '_id', None):
_fired.append('pre_save')
def trigger_post_save(sender, instance, raw=None, created=False, **__):
assert created in (True, False), "created is supposed to be a bool"
if sender is LighteningTalk:
if isinstance(instance, LighteningTalk):
if created:
_fired.append('post_save created')
else:
_fired.append('post_save not created')
if '_id' in instance:
_fired.append('post_save')
from django.db.models import signals
signals.pre_delete.connect(trigger_pre_delete, sender=LighteningTalk)
signals.post_delete.connect(trigger_post_delete, sender=LighteningTalk)
signals.pre_save.connect(trigger_pre_save, sender=LighteningTalk)
signals.post_save.connect(trigger_post_save, sender=LighteningTalk)
collection = self.database.talks
talk = collection.LighteningTalk()
talk['topic'] = u"Bla"
talk.save()
self.assertTrue('pre_save' in _fired)
self.assertTrue('post_save' in _fired)
self.assertTrue('post_save created' in _fired)
self.assertTrue('post_save not created' not in _fired)
talk.delete()
self.assertTrue('pre_delete' in _fired)
self.assertTrue('post_delete' in _fired)
talk['topic'] = u"Different"
talk.save()
self.assertTrue('post_save not created' in _fired)
class ShortcutTestCase(unittest.TestCase):
def test_get_database(self):
from shortcut import get_database, connection
db = get_database()
self.assertEqual(db.connection, connection)
db = get_database(connection)
self.assertEqual(db.connection, connection)
def test_get_version(self):
from shortcut import get_version
version = get_version()
self.assertEqual(
version,
open(os.path.join(os.path.dirname(__file__),
'version.txt')).read()
)
class MongoDBBaseTestCase(unittest.TestCase):
def test_load_backend(self):
try:
from django.db import connections
except ImportError:
# Django <1.2
return # :(
self.assertTrue('mongodb' in connections)
from django.db.utils import load_backend
backend = load_backend('django_mongokit.mongodb')
self.assertTrue(backend is not None)
def test_database_wrapper(self):
try:
from django.db import connections
except ImportError:
# Django <1.2
return # :(
connection = connections['mongodb']
self.assertTrue(hasattr(connection, 'connection')) # stupid name!
# needed attribute
self.assertTrue(hasattr(connection.connection, 'autocommit'))
def test_create_test_database(self):
from django.conf import settings
try:
assert 'mongodb' in settings.DATABASES
except AttributeError:
# Django <1.2
return # :(
old_database_name = settings.DATABASES['mongodb']['NAME']
assert 'test_' not in old_database_name
# pretend we're the Django 'test' command
from django.db import connections
connection = connections['mongodb']
connection.creation.create_test_db()
test_database_name = settings.DATABASES['mongodb']['NAME']
self.assertTrue('test_' in test_database_name)
from mongokit import Connection
con = Connection()
# the test database isn't created till it's needed
self.assertTrue(test_database_name not in con.database_names())
# creates it
db = con[settings.DATABASES['mongodb']['NAME']]
coll = db.test_collection_name
# do a query on the collection to force the database to be created
list(coll.find())
test_database_name = settings.DATABASES['mongodb']['NAME']
self.assertTrue(test_database_name in con.database_names())
connection.creation.destroy_test_db(old_database_name)
self.assertTrue('test_' not in settings.DATABASES['mongodb']['NAME'])
self.assertTrue(test_database_name not in con.database_names())
# this should work even though it doesn't need to do anything
connection.close()
def test_create_test_database_by_specific_bad_name(self):
from django.conf import settings
try:
assert 'mongodb' in settings.DATABASES
except AttributeError:
# Django <1.2
return
settings.DATABASES['mongodb']['TEST_NAME'] = "muststartwith__test_"
from django.db import connections
connection = connections['mongodb']
# why doesn't this work?!?!
#from mongodb.base import DatabaseError
#self.assertRaises(DatabaseError, connection.creation.create_test_db)
self.assertRaises(Exception, connection.creation.create_test_db)
def test_create_test_database_by_specific_good_name(self):
from django.conf import settings
try:
assert 'mongodb' in settings.DATABASES
except AttributeError:
# Django <1.2
return
settings.DATABASES['mongodb']['TEST_NAME'] = "test_mustard"
old_database_name = settings.DATABASES['mongodb']['NAME']
from django.db import connections
connection = connections['mongodb']
connection.creation.create_test_db()
test_database_name = settings.DATABASES['mongodb']['NAME']
self.assertTrue('test_' in test_database_name)
from mongokit import Connection
con = Connection()
# the test database isn't created till it's needed
self.assertTrue(test_database_name not in con.database_names())
# creates it
db = con[settings.DATABASES['mongodb']['NAME']]
coll = db.test_collection_name
# do a query on the collection to force the database to be created
list(coll.find())
test_database_name = settings.DATABASES['mongodb']['NAME']
self.assertTrue(test_database_name in con.database_names())
connection.creation.destroy_test_db(old_database_name)
self.assertTrue('test_mustard' not in
settings.DATABASES['mongodb']['NAME'])
self.assertTrue(test_database_name not in con.database_names())
#
# DocumentForm tests follow
#
import datetime
from django_mongokit.forms import DocumentForm
from django_mongokit.forms import fields as mongokit_fields
from django import forms
class DetailedTalk(DjangoDocument):
"""
A detailed talk document for testing automated form creation.
"""
structure = {
'created_on': datetime.datetime,
'topic': unicode,
'when': datetime.datetime,
'tags': list,
'duration': float,
}
default_values = {
'created_on': datetime.datetime.utcnow
}
required_fields = ['topic', 'when', 'duration']
class BasicTalkForm(DocumentForm):
"""
A basic form, without customized behavior.
"""
class Meta:
document = DetailedTalk
class BasicDocumentFormTest(unittest.TestCase):
"Test the basic form construction without customization"
def setUp(self):
from shortcut import connection
self.connection = connection
self.database = self.connection['django_mongokit_test_database']
self.now = datetime.datetime.utcnow()
self.form = BasicTalkForm(collection=self.database.test_collection)
def tearDown(self):
self.connection.drop_database('django_mongokit_test_database')
def test_all_fields_created(self):
"Test all fields created for basic form, in no particular order."
self.assertEquals(set(self.form.fields.keys()),
set(['created_on', 'topic', 'when', 'tags', 'duration']))
self.assertEquals(self.form.fields['created_on'].__class__,
forms.fields.DateTimeField)
self.assertEquals(self.form.fields['topic'].__class__,
forms.fields.CharField)
self.assertEquals(self.form.fields['when'].__class__,
forms.fields.DateTimeField)
self.assertEquals(self.form.fields['tags'].__class__,
mongokit_fields.JsonListField)
self.assertEquals(self.form.fields['duration'].__class__,
forms.fields.FloatField)
def test_required_set_correctly(self):
"Test required set correctly for basic form."
for field_name, field in self.form.fields.items():
if field_name in DetailedTalk.required_fields:
self.assertTrue(
field.required,
"%s should be required" % field_name
)
else:
self.assertEquals(
field.required,
False,
"%s should not be required" % field_name
)
def test_initial_values_set_correctly(self):
"Test the default value for created_on was set for basic form."
self.assertEquals(self.form.fields['created_on'].initial.ctime(),
self.now.ctime())
def test_submit_with_good_values(self):
"Test saving a basic form with good values."
posted_form = BasicTalkForm({
'topic': 'science!',
'when': '3/10/2010',
'tags': '["science", "brains", "sf"]', # JSON
'duration': '45',
}, collection=self.database.test_collection)
self.assertTrue(posted_form.is_valid())
obj = posted_form.save()
self.assertEquals(obj['topic'], 'science!')
self.assertEquals(obj['when'], datetime.datetime(2010, 3, 10, 0, 0))
self.assertEquals(obj['tags'], ['science', 'brains', 'sf'])
self.assertEquals(obj['duration'], 45)
def test_submit_form_with_invalid_json(self):
"Test saving a basic form with bad JSON."
posted_form = BasicTalkForm({
'topic': 'science!',
'when': '3/10/2010',
'tags': '["science", "brains", "sf"', # INVALID JSON
'duration': '45',
}, collection=self.database.test_collection)
self.assertEquals(posted_form.is_valid(), False)
self.assertTrue(posted_form.errors['tags'])
self.assertTrue(posted_form.errors['tags'][0].startswith(
u'Expecting '))
def test_submit_empty_form(self):
"Test submitting an empty basic form shows proper errors."
posted_form = BasicTalkForm({
'topic': '',
'when': '',
'tags': '',
'duration': '',
}, collection=self.database.test_collection)
self.assertEquals(posted_form.is_valid(), False)
# In order of form specification.
self.assertEquals(posted_form.errors.keys(),
['topic', 'duration', 'when'])
self.assertEquals(posted_form.errors.values(), [
[u'This field is required.'],
[u'This field is required.'],
[u'This field is required.']])
class DetailedTalkForm(DocumentForm):
"""
A form that customizes a field and some custom validation tags.
"""
tags = forms.CharField(max_length=250, required=True)
def clean_tags(self):
value = self.cleaned_data['tags']
return [tag.strip() for tag in value.split(',')]
def clean_when(self):
w = self.cleaned_data['when']
when = datetime.datetime(w.year, w.month, w.day, 0, 0, 0)
return when
class Meta:
document = DetailedTalk
fields = ['topic', 'when', 'tags', 'duration']
class CustomizedDocumentFormTest(unittest.TestCase):
"Test form customization"
def setUp(self):
from shortcut import connection
self.connection = connection
self.database = self.connection['django_mongokit_test_database']
self.form = DetailedTalkForm(collection=self.database.test_collection)
def tearDown(self):
self.connection.drop_database('django_mongokit_test_database')
def test_all_fields_created(self):
"Test that fields are created in order specified in form."
self.assertEquals(self.form.fields.keys(),
['topic', 'when', 'tags', 'duration'])
self.assertEquals([fld.__class__ for fld in self.form.fields.values()],
[forms.fields.CharField, forms.fields.DateTimeField,
forms.fields.CharField, forms.fields.FloatField])
def test_required_set_correctly(self):
"Test that required values set correctly, even when overridden."
self.assertEquals(self.form.fields['topic'].required, True)
self.assertEquals(self.form.fields['when'].required, True)
self.assertEquals(self.form.fields['tags'].required, True)
self.assertEquals(self.form.fields['duration'].required, True)
def test_submit_form_with_correct_values(self):
"Test custom form submit."
posted_form = DetailedTalkForm({
'topic': 'science!',
'when': '3/10/2010',
'tags': 'science, brains, sf', # Comma Separated List
'duration': '45',
}, collection=self.database.test_collection)
self.assertTrue(posted_form.is_valid())
obj = posted_form.save()
self.assertEquals(obj['topic'], 'science!')
self.assertEquals(obj['when'], datetime.datetime(2010, 3, 10, 0, 0))
self.assertEquals(obj['tags'], ['science', 'brains', 'sf'])
self.assertEquals(obj['duration'], 45)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "06b6e7fa0af33941a8accdf8d0857d65",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 79,
"avg_line_length": 36.79417879417879,
"alnum_prop": 0.6147587297999774,
"repo_name": "peterbe/django-mongokit",
"id": "e42abfe49579afef4b0dc059ad82817573db7f35",
"size": "17698",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django_mongokit/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "628"
},
{
"name": "Python",
"bytes": "73525"
},
{
"name": "Shell",
"bytes": "229"
}
],
"symlink_target": ""
} |
import random
import string
import numpy as np
class DataProvider(object):
""""Provides a vector representation for the given ASCII string"""
def __init__(self, num_samples, max_str_len=32, alphabet=None, batch_size=32):
self.num_samples = num_samples
self.max_str_len = max_str_len # we will generate strings in the range 1 char to max_str_len chars
self.chars = alphabet
self.batch_size = batch_size
self.batch_num = 0 # batch number that the iterator is tracking
self.inputs, self.seqlen, self.d_inputs, self.d_seqlen, self.targets_e, self.targets_d = self.generate_ds()
self.pad_value = 0 # we will do 0 padding after seqlen
self.max_seq_len = 32
return
def get_embeddings(self):
# define the embedding matrix to map decoder symbols to a vector representation
num_symbols = 128 # total number of distinct symbols of decoder
embeddings = []
for symbol in range(num_symbols):
vec = [0] * num_symbols
vec[symbol] = 1
embeddings.append(vec)
return embeddings
def next(self):
# maxlen is determined by the max seq len found in the batch
get_batch = lambda my_list, start, size: my_list[(start * size) :(start*size) + size]
inputs = get_batch(self.inputs, self.batch_num, self.batch_size)
seqlen = get_batch(self.seqlen, self.batch_num, self.batch_size)
d_inputs = get_batch(self.d_inputs, self.batch_num, self.batch_size)
d_seqlen = get_batch(self.d_seqlen, self.batch_num, self.batch_size)
targets_e = get_batch(self.targets_e, self.batch_num, self.batch_size)
targets_d = get_batch(self.targets_d, self.batch_num, self.batch_size)
self.batch_num += 1
maxlen = self.get_batch_maxlen(targets_d) # get the max seqlen of the given batch
for x, xd, target in zip(inputs, d_inputs, targets_d):
diff = maxlen - len(target) # amount of padding needed
for _ in range(diff):
vec = [0] * 128
vec[self.pad_value] = 1
target.append(vec)
xd.append(vec)
x.append(vec)
assert seqlen == d_seqlen, "In our dataset seqlen and d_seqlen should b same"
return inputs, seqlen, d_inputs, d_seqlen, targets_e, targets_d
def get_batch_maxlen(self, targets_d):
mlen = 0
for seq in targets_d:
#print "len seq: ", len(seq)
if len(seq) > mlen:
mlen = len(seq)
return mlen
def reset(self):
self.batch_num = 0
return
def generate_random_strings(self):
inputs = []
if self.chars == None:
self.chars = [chr(x) for x in range(ord("0"), ord("9"))] #range("0", "9") #string.printable
for _sample in range(self.num_samples):
wlen = random.randint(1, self.max_str_len - 1)
word = ""
for _ in range(wlen):
word += random.choice(self.chars)
inputs.append(word)
return inputs
def generate_ds(self):
input_strs = self.generate_random_strings()
e_inputs = []
e_targets = [] # targets for the encoder
d_inputs = []
d_targets = []
seqlen = []
seqlen_d = []
eos_vec = [0] * 128 #
eos_vec[0] = 1 # we use 0 as the terminator
for word in input_strs: # for each word in the given input list
w_input = [] # word input is a seq of char reps
if len(word) == 0:
print "got a 0 len word: ", word
for c in word: # for each char
vec = [0] * 128 # create a 1-h vec for Softmax trg
vec[ord(c)] = 1
w_input.append(vec)
e_inputs.append(w_input[:])
e_targets.append([w_input[0]]) # the first char will be the encoder's output
seqlen.append(len(w_input))
assert len(w_input) >= 1, "w_input should have a len greater than eq 1, got %d" % (len(w_input))
d_inputs.append(w_input[:])
tgt = w_input[1:]
tgt.append(eos_vec)
d_targets.append(tgt)
seqlen_d.append(len(w_input))
assert len(e_inputs) == len(d_inputs), "number of inputs to encoder %d should equal %d decoder" % (len(e_inputs), len(d_inputs))
assert len(e_inputs) == len(e_targets), "number of inputs to encoder %d should equal %d encoder targets" % (len(e_inputs), len(e_targets))
assert len(seqlen) == len(seqlen_d), "seqlen %d should equal %d decoder seqlen for our dataset" % (len(e_inputs), len(d_inputs))
return e_inputs, seqlen, d_inputs, seqlen_d, e_targets, d_targets
if __name__ == "__main__":
sr = DataProvider(16, 10) # num samples, max len
e_inputs, seqlen, d_inputs, d_seqlen, d_targets = sr.next()
print d_inputs[:1]
print len(e_inputs[0]), len(e_inputs[0][0]), len(d_inputs[0]), len(d_inputs[0][0]), | {
"content_hash": "cb55a60c80a7f60d83edcc3e3e3b3bc0",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 146,
"avg_line_length": 44.13793103448276,
"alnum_prop": 0.5666015625,
"repo_name": "ananthpn/tensorflow",
"id": "f50b134bb623a9c60cea3cb4c2d955661cfd7393",
"size": "5823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seq2seq/data_provider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17422"
}
],
"symlink_target": ""
} |
import json
import time
import platform
import socket
import datetime
#import random # Used to generate sample data; comment out this line if real data is used
import requests
import urllib3 # Used to disable warnings about insecure SSL (optional)
# Import any special packages needed for a particular hardware platform,
# for example, for a Raspberry PI,
# import RPi.GPIO as GPIO
# This automatically initizalizes the robotics cape; to install this library, see
# https://github.com/mcdeoliveira/rcpy
import rcpy
# ************************************************************************
# Specify constant values (names, target URLS, et centera) needed by the script
# ************************************************************************
# Specify the name of this device, or simply use the hostname; this is the name
# of the PI AF Element that will be created, and it'll be included in the names
# of PI Points that get created as well
#DEVICE_NAME = (socket.gethostname()) + ""
DEVICE_NAME = "BBBlue Robot Controller 01"
# Specify a device location (optional); this will be added as a static
# string attribute to the AF Element that is created
DEVICE_LOCATION = "IoT Test Lab"
# Specify the name of the Assets type message; this will also end up becoming
# part of the name of the PI AF Element template that is created; for example, this could be
# "AssetsType_RaspberryPI" or "AssetsType_Dragonboard"
# You will want to make this different for each general class of IoT module that you use
ASSETS_MESSAGE_TYPE_NAME = DEVICE_NAME + "_assets_type"
#ASSETS_MESSAGE_TYPE_NAME = "assets_type" + "IoT Device Model 74656" # An example
# Similarly, specify the name of for the data values type; this should likewise be unique
# for each general class of IoT device--for example, if you were running this
# script on two different devices, each with different numbers and kinds of sensors,
# you'd specify a different data values message type name
# when running the script on each device. If both devices were the same,
# you could use the same DATA_VALUES_MESSAGE_TYPE_NAME
DATA_VALUES_MESSAGE_TYPE_NAME = DEVICE_NAME + "_data_values_type"
#DATA_VALUES_MESSAGE_TYPE_NAME = "data_values_type" + "IoT Device Model 74656" # An example
# Store the id of the container that will be used to receive live data values
DATA_VALUES_CONTAINER_ID = DEVICE_NAME + "_data_values_container"
# Specify the number of seconds to sleep in between value messages
NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES = 2
# Specify whether you're sending data to OSIsoft cloud services or not
SEND_DATA_TO_OSISOFT_CLOUD_SERVICES = False
# Specify the address of the destination endpoint; it should be of the form
# http://<host/ip>:<port>/ingress/messages
# For example, "https://myservername:8118/ingress/messages"
TARGET_URL = "https://lopezpiserver:777/ingress/messages"
# !!! Note: if sending data to OSIsoft cloud services,
# uncomment the below line in order to set the target URL to the OCS OMF endpoint:
#TARGET_URL = "https://qi-data.osisoft.com/api/omf"
# Specify the producer token, a unique token used to identify and authorize a given OMF producer. Consult the OSIsoft Cloud Services or PI Connector Relay documentation for further information.
PRODUCER_TOKEN = "OMFv1"
#PRODUCER_TOKEN = "778408" # An example
# !!! Note: if sending data to OSIsoft cloud services, the producer token should be the
# security token obtained for a particular Tenant and Publisher; see
# http://qi-docs.readthedocs.io/en/latest/OMF_Ingress_Specification.html#headers
#PRODUCER_TOKEN = ""
# ************************************************************************
# Specify options for sending web requests to the target
# ************************************************************************
# If self-signed certificates are used (true by default),
# do not verify HTTPS SSL certificates; normally, leave this as is
VERIFY_SSL = False
# Specify the timeout, in seconds, for sending web requests
# (if it takes longer than this to send a message, an error will be thrown)
WEB_REQUEST_TIMEOUT_SECONDS = 30
# ************************************************************************
# Helper function: run any code needed to initialize local sensors, if necessary for this hardware
# ************************************************************************
# Below is where you can initialize any global variables that are needed by your applicatio;
# certain sensors, for example, will require global interface or sensor variables
# myExampleInterfaceKitGlobalVar = None
# The following function is where you can insert specific initialization code to set up
# sensors for a particular IoT module or platform
def initialize_sensors():
print("\n--- Sensors initializing...")
try:
#For a raspberry pi, for example, to set up pins 4 and 5, you would add
#GPIO.setmode(GPIO.BCM)
#GPIO.setup(4, GPIO.IN)
#GPIO.setup(5, GPIO.IN)
# Set state to rcpy.RUNNING
rcpy.set_state(rcpy.RUNNING)
# Activate the magnetometer on the BeagleBone Blue
rcpy.mpu9250.initialize(enable_magnetometer = True)
print("--- Sensors initialized!")
# In short, in this example, by default,
# this function is called but doesn't do anything (it's just a placeholder)
except Exception as ex:
# Log any error, if it occurs
print(str(datetime.datetime.now()) + " Error when initializing sensors: " + str(ex))
# ************************************************************************
# Helper function: REQUIRED: create a JSON message that contains sensor data values
# ************************************************************************
# The following function you can customize to allow this script to send along any
# number of different data values, so long as the values that you send here match
# up with the values defined in the "DataValuesType" OMF message type (see the next section)
# In this example, this function simply generates two random values for the sensor values,
# but here is where you could change this function to reference a library that actually
# reads from sensors attached to the device that's running the script
def create_data_values_message():
# Read data from the BeagleBone Blue's built-in sensors
boardTemperature = rcpy.mpu9250.read_imu_temp() * 9/5 + 32
accelRotationAndMagneticData = rcpy.mpu9250.read()
# Get the current timestamp in ISO format
timestamp = datetime.datetime.utcnow().isoformat() + 'Z'
# Assemble a JSON object containing the streamId and any data values
return [
{
"containerid": DATA_VALUES_CONTAINER_ID,
"values": [
{
"Time": timestamp,
# Again, in this example,
# we're just sending along random values for these two "sensors"
#"Raw Sensor Reading 1": 100*random.random(),
#"Raw Sensor Reading 2": 100*random.random()
# For the BeagleBone Blue, indexes 0, 1, and 2 correspond to X, Y, and Z
# Moreover, we're dividing acceleration by 9.80665 to convert it to units of Gs
"X-acceleration": accelRotationAndMagneticData['accel'][0]/9.80665,
"Y-acceleration": accelRotationAndMagneticData['accel'][1]/9.80665,
"Z-acceleration": accelRotationAndMagneticData['accel'][2]/9.80665,
"X-rotation": accelRotationAndMagneticData['gyro'][0],
"Y-rotation": accelRotationAndMagneticData['gyro'][1],
"Z-rotation": accelRotationAndMagneticData['gyro'][2],
"X-magnetic field": accelRotationAndMagneticData['mag'][0],
"Y-magnetic field": accelRotationAndMagneticData['mag'][1],
"Z-magnetic field": accelRotationAndMagneticData['mag'][2],
"Board Temperature": boardTemperature
# If you wanted to read, for example, the digital GPIO pins
# 4 and 5 on a Raspberry PI,
# you would add to the earlier package import section:
# import RPi.GPIO as GPIO
# then add the below 3 lines to the above initialize_sensors
# function to set up the GPIO pins:
# GPIO.setmode(GPIO.BCM)
# GPIO.setup(4, GPIO.IN)
# GPIO.setup(5, GPIO.IN)
# and then lastly, you would change the two Raw Sensor reading lines above to
# "Raw Sensor Reading 1": GPIO.input(4),
# "Raw Sensor Reading 2": GPIO.input(5)
}
]
}
]
# ************************************************************************
# Helper function: REQUIRED: wrapper function for sending an HTTPS message
# ************************************************************************
# Define a helper function to allow easily sending web request messages;
# this function can later be customized to allow you to port this script to other languages.
# All it does is take in a data object and a message type, and it sends an HTTPS
# request to the target OMF endpoint
def send_omf_message_to_endpoint(action, message_type, message_json):
try:
# Assemble headers that contain the producer token and message type
# Note: in this example, the only action that is used is "create",
# which will work totally fine;
# to expand this application, you could modify it to use the "update"
# action to, for example, modify existing AF element template types
web_request_header = {
'producertoken': PRODUCER_TOKEN,
'messagetype': message_type,
'action': action,
'messageformat': 'JSON',
'omfversion': '1.0'
}
# !!! Note: if desired, ucomment the below line to print the outgoing message
print('\nOutgoing message: ' + json.dumps(message_json));
# Send the request, and collect the response; json.dumps is used to
# properly format the message JSON so that it can be sent as a web request
response = requests.post(
TARGET_URL,
headers=web_request_header,
data=json.dumps(message_json),
verify=VERIFY_SSL,
timeout=WEB_REQUEST_TIMEOUT_SECONDS
)
# Print a debug message, if desired; note: you should receive a
# response code 200 or 202 if the request was successful!
print(
'Response from sending a message of type ' +
'"{0}" with action "{1}": {2} {3}'.format(
message_type,
action,
response.status_code,
response.text
)
)
except Exception as ex:
# Log any error, if it occurs
print(str(datetime.datetime.now()) + " Error during web request: " + str(ex))
# ************************************************************************
# Turn off HTTPS warnings, if desired
# (if the default certificate configuration was used by the PI Connector)
# ************************************************************************
# Suppress insecure HTTPS warnings, if an untrusted certificate is used by the target endpoint
# Remove if targetting trusted targets
try:
if not VERIFY_SSL:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings()
except Exception as ex:
# Log any error, if it occurs
print(str(datetime.datetime.now()) + " Possible non-fatal error when disabling SSL validation: " + str(ex))
print(
'\n--- Setup: targeting endpoint "' + TARGET_URL + '"...' +
'\n--- Now sending types, defining containers, and creating assets and links...' +
'\n--- (Note: a successful message will return a 20X response code.)\n'
)
# ************************************************************************
# Create a JSON packet to define the types of streams that will be sent
# ************************************************************************
DYNAMIC_TYPES_MESSAGE_JSON = [
# ************************************************************************
# There are several different message types that will be used by this script, but
# you can customize this script for your own needs by modifying the types:
# First, you can modify the "AssetsType", which will allow you to customize which static
# attributes are added to the new PI AF Element that will be created, and second,
# you can modify the "DataValuesType", which will allow you to customize this script to send
# additional sensor values, in addition to (or instead of) the two shown here
# This values type is going to be used to send real-time values; feel free to rename the
# values from "Raw Sensor Reading 1" to, say, "Temperature", or "Pressure"
# Note:
# all keywords ("id", "type", "classification", etc. are case sensitive!)
# For a list of the specific keywords used in these messages,
# see http://omf-docs.readthedocs.io/
{
"id": DATA_VALUES_MESSAGE_TYPE_NAME,
"type": "object",
"classification": "dynamic",
"properties": {
"Time": {
"format": "date-time",
"type": "string",
"isindex": True
},
#"Raw Sensor Reading 1": {"type": "number"},
#"Raw Sensor Reading 2": {"type": "number"}
"X-acceleration": {"type": "number", "description": "in Gs"},
"Y-acceleration": {"type": "number", "description": "in Gs"},
"Z-acceleration": {"type": "number", "description": "in Gs"},
"X-rotation": {"type": "number", "description": "in degrees per second"},
"Y-rotation": {"type": "number", "description": "in degrees per second"},
"Z-rotation": {"type": "number", "description": "in degrees per second"},
"X-magnetic field": {"type": "number", "description": "in microteslas"},
"Y-magnetic field": {"type": "number", "description": "in microteslas"},
"Z-magnetic field": {"type": "number", "description": "in microteslas"},
"Board Temperature": {"type": "number", "description": "in Fahrenheit"}
# For example, to allow you to send a string-type live data value,
# such as "Status", you would add
#"Status": {
# "type": "string"
#}
}
}
]
# ************************************************************************
# Send the DYNAMIC types message, so that these types can be referenced in all later messages
# ************************************************************************
send_omf_message_to_endpoint("create", "Type", DYNAMIC_TYPES_MESSAGE_JSON)
# !!! Note: if sending data to OCS, static types are not included!
if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES:
STATIC_TYPES_MESSAGE_JSON = [
# This asset type is used to define a PI AF Element that will be created;
# this type also defines two static string attributes that will be created
# as well; feel free to rename these or add additional
# static attributes for each Element (PI Point attributes will be added later)
# The name of this type will also end up being part of the name of the PI AF Element template
# that is automatically created
{
"id": ASSETS_MESSAGE_TYPE_NAME,
"type": "object",
"classification": "static",
"properties": {
"Name": {
"type": "string",
"isindex": True
},
"Device Type": {
"type": "string"
},
"Location": {
"type": "string"
},
"Data Ingress Method": {
"type": "string"
}
# For example, to add a number-type static
# attribute for the device model, you would add
# "Model": {
# "type": "number"
#}
}
}
]
# ************************************************************************
# Send the STATIC types message, so that these types can be referenced in all later messages
# ************************************************************************
send_omf_message_to_endpoint("create", "Type", STATIC_TYPES_MESSAGE_JSON)
# ************************************************************************
# Create a JSON packet to define containerids and the type
# (using the types listed above) for each new data events container
# ************************************************************************
# The device name that you specified earlier will be used as the AF Element name!
NEW_AF_ELEMENT_NAME = DEVICE_NAME
CONTAINERS_MESSAGE_JSON = [
{
"id": DATA_VALUES_CONTAINER_ID,
"typeid": DATA_VALUES_MESSAGE_TYPE_NAME
}
]
# ************************************************************************
# Send the container message, to instantiate this particular container;
# we can now directly start sending data to it using its Id
# ************************************************************************
send_omf_message_to_endpoint("create", "Container", CONTAINERS_MESSAGE_JSON)
# !!! Note: if sending data to OCS, static types are not included!
if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES:
# ************************************************************************
# Create a JSON packet to containing the asset and
# linking data for the PI AF asset that will be made
# ************************************************************************
# Here is where you can specify values for the static PI AF attributes;
# in this case, we're auto-populating the Device Type,
# but you can manually hard-code in values if you wish
# we also add the LINKS to be made, which will both position the new PI AF
# Element, so it will show up in AF, and will associate the PI Points
# that will be created with that Element
ASSETS_AND_LINKS_MESSAGE_JSON = [
{
# This will end up creating a new PI AF Element with
# this specific name and static attribute values
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"values": [
{
"Name": NEW_AF_ELEMENT_NAME,
"Device Type": (
platform.machine() + " - " + platform.platform() + " - " + platform.processor()
),
"Location": DEVICE_LOCATION,
"Data Ingress Method": "OMF"
}
]
},
{
"typeid": "__Link",
"values": [
# This first link will locate such a newly created AF Element under
# the root PI element targeted by the PI Connector in your target AF database
# This was specfied in the Connector Relay Admin page; note that a new
# parent element, with the same name as the PRODUCER_TOKEN, will also be made
{
"Source": {
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"index": "_ROOT"
},
"Target": {
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"index": NEW_AF_ELEMENT_NAME
}
},
# This second link will map new PI Points (created by messages
# sent to the data values container) to a newly create element
{
"Source": {
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"index": NEW_AF_ELEMENT_NAME
},
"Target": {
"containerid": DATA_VALUES_CONTAINER_ID
}
}
]
}
]
# ************************************************************************
# Send the message to create the PI AF asset; it won't appear in PI AF,
# though, because it hasn't yet been positioned...
# ************************************************************************
send_omf_message_to_endpoint("create", "Data", ASSETS_AND_LINKS_MESSAGE_JSON)
# ************************************************************************
# Initialize sensors prior to sending data (if needed), using the function defined earlier
# ************************************************************************
initialize_sensors()
# ************************************************************************
# Finally, loop indefinitely, sending random events
# conforming to the value type that we defined earlier
# ************************************************************************
print(
'\n--- Now sending live data every ' + str(NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES) +
' second(s) for device "' + NEW_AF_ELEMENT_NAME + '"... (press CTRL+C to quit at any time)\n'
)
if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES:
print(
'--- (Look for a new AF Element named "' + NEW_AF_ELEMENT_NAME + '".)\n'
)
while True:
# Call the custom function that builds a JSON object that
# contains new data values; see the beginning of this script
VALUES_MESSAGE_JSON = create_data_values_message()
# Send the JSON message to the target URL
send_omf_message_to_endpoint("create", "Data", VALUES_MESSAGE_JSON)
# Send the next message after the required interval
time.sleep(NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES)
| {
"content_hash": "ba9fda7f459949fedd5b7dbfe72cccd7",
"timestamp": "",
"source": "github",
"line_count": 458,
"max_line_length": 193,
"avg_line_length": 47.903930131004365,
"alnum_prop": 0.5641750227894257,
"repo_name": "osisoft/OMF-Samples",
"id": "a278821e31ec13f8d57b3b9905b9c237a350eb8f",
"size": "23066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Community Samples/Python3/SendOMFDataToPISystemFromBeagleBoneBlue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "334323"
},
{
"name": "C#",
"bytes": "95862"
},
{
"name": "C++",
"bytes": "85685"
},
{
"name": "Elixir",
"bytes": "12930"
},
{
"name": "Go",
"bytes": "18179"
},
{
"name": "HTML",
"bytes": "22086"
},
{
"name": "Java",
"bytes": "24815"
},
{
"name": "JavaScript",
"bytes": "21017"
},
{
"name": "Makefile",
"bytes": "398"
},
{
"name": "PowerShell",
"bytes": "31155"
},
{
"name": "Python",
"bytes": "187683"
}
],
"symlink_target": ""
} |
import datetime
import math
import re
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Avg,
Case,
Count,
DateField,
DateTimeField,
DecimalField,
DurationField,
Exists,
F,
FloatField,
IntegerField,
Max,
Min,
OuterRef,
Q,
StdDev,
Subquery,
Sum,
TimeField,
Value,
Variance,
When,
)
from django.db.models.expressions import Func, RawSQL
from django.db.models.functions import (
Cast,
Coalesce,
Greatest,
Now,
Pi,
TruncDate,
TruncHour,
)
from django.test import TestCase
from django.test.testcases import skipUnlessDBFeature
from django.test.utils import Approximate, CaptureQueriesContext
from django.utils import timezone
from .models import Author, Book, Publisher, Store
class NowUTC(Now):
template = "CURRENT_TIMESTAMP"
output_field = DateTimeField()
def as_sql(self, compiler, connection, **extra_context):
if connection.features.test_now_utc_template:
extra_context["template"] = connection.features.test_now_utc_template
return super().as_sql(compiler, connection, **extra_context)
class AggregateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34)
cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35)
cls.a3 = Author.objects.create(name="Brad Dayley", age=45)
cls.a4 = Author.objects.create(name="James Bennett", age=29)
cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37)
cls.a6 = Author.objects.create(name="Paul Bissex", age=29)
cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25)
cls.a8 = Author.objects.create(name="Peter Norvig", age=57)
cls.a9 = Author.objects.create(name="Stuart Russell", age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(
name="Apress", num_awards=3, duration=datetime.timedelta(days=1)
)
cls.p2 = Publisher.objects.create(
name="Sams", num_awards=1, duration=datetime.timedelta(days=2)
)
cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7)
cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn="159059725",
name="The Definitive Guide to Django: Web Development Done Right",
pages=447,
rating=4.5,
price=Decimal("30.00"),
contact=cls.a1,
publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6),
)
cls.b2 = Book.objects.create(
isbn="067232959",
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
rating=3.0,
price=Decimal("23.09"),
contact=cls.a3,
publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3),
)
cls.b3 = Book.objects.create(
isbn="159059996",
name="Practical Django Projects",
pages=300,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a4,
publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23),
)
cls.b4 = Book.objects.create(
isbn="013235613",
name="Python Web Development with Django",
pages=350,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a5,
publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3),
)
cls.b5 = Book.objects.create(
isbn="013790395",
name="Artificial Intelligence: A Modern Approach",
pages=1132,
rating=4.0,
price=Decimal("82.80"),
contact=cls.a8,
publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15),
)
cls.b6 = Book.objects.create(
isbn="155860191",
name=(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp"
),
pages=946,
rating=5.0,
price=Decimal("75.00"),
contact=cls.a8,
publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15),
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name="Amazon.com",
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59),
)
s2 = Store.objects.create(
name="Books.com",
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59),
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30),
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.aggregate(), {})
def test_aggregate_in_order_by(self):
msg = (
"Using an aggregate in order_by() without also including it in "
"annotate() is not allowed: Avg(F(book__rating)"
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.values("age").order_by(Avg("book__rating"))
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(
vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)}
)
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(vals, {"age__sum": 254})
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(vals, {"friends__age__avg": Approximate(34.07, places=2)})
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(vals, {"authors__age__avg": Approximate(38.2857, places=2)})
vals = Author.objects.filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(vals, {"book__rating__avg": 4.0})
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(vals, {"publisher__num_awards__sum": 30})
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(vals, {"book__price__sum": Decimal("270.27")})
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(vals, {"books__authors__age__max": 57})
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(vals, {"book__publisher__num_awards__min": 1})
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(
amazon_mean=Avg("books__rating")
)
self.assertEqual(vals, {"amazon_mean": Approximate(4.08, places=2)})
def test_aggregate_transform(self):
vals = Store.objects.aggregate(min_month=Min("original_opening__month"))
self.assertEqual(vals, {"min_month": 3})
def test_aggregate_join_transform(self):
vals = Publisher.objects.aggregate(min_year=Min("book__pubdate__year"))
self.assertEqual(vals, {"min_year": 1991})
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by("pk"),
[
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
],
lambda b: b.name,
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name, "The Definitive Guide to Django: Web Development Done Right"
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = (
Book.objects.annotate(page_sum=Sum("pages"))
.defer("name")
.filter(pk=self.b1.pk)
)
rows = [
(
self.b1.id,
"159059725",
447,
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerysetEqual(
qs.order_by("pk"), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = (
Book.objects.select_related("contact")
.annotate(page_sum=Sum("pages"))
.defer("name")
.filter(pk=self.b1.pk)
)
rows = [
(
self.b1.id,
"159059725",
447,
"Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerysetEqual(
qs.order_by("pk"),
rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name),
)
def test_annotate_m2m(self):
books = (
Book.objects.filter(rating__lt=4.5)
.annotate(Avg("authors__age"))
.order_by("name")
)
self.assertQuerysetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 51.5),
("Practical Django Projects", 29.0),
("Python Web Development with Django", Approximate(30.3, places=1)),
("Sams Teach Yourself Django in 24 Hours", 45.0),
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
lambda b: (b.name, b.num_authors),
)
def test_backwards_m2m_annotate(self):
authors = (
Author.objects.filter(name__contains="a")
.annotate(Avg("book__rating"))
.order_by("name")
)
self.assertQuerysetEqual(
authors,
[
("Adrian Holovaty", 4.5),
("Brad Dayley", 3.0),
("Jacob Kaplan-Moss", 4.5),
("James Bennett", 4.0),
("Paul Bissex", 4.0),
("Stuart Russell", 4.0),
],
lambda a: (a.name, a.book__rating__avg),
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors,
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 1),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 1),
("Peter Norvig", 2),
("Stuart Russell", 1),
("Wesley J. Chun", 1),
],
lambda a: (a.name, a.num_books),
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 7),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
9,
),
("Practical Django Projects", 3),
("Python Web Development with Django", 7),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 3),
],
lambda b: (b.name, b.publisher__num_awards__sum),
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers,
[
("Apress", Decimal("59.69")),
("Jonno's House of Books", None),
("Morgan Kaufmann", Decimal("75.00")),
("Prentice Hall", Decimal("112.49")),
("Sams", Decimal("23.09")),
],
lambda p: (p.name, p.book__price__sum),
)
def test_annotate_values(self):
books = list(
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values()
)
self.assertEqual(
books,
[
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values("pk", "isbn", "mean_age")
)
self.assertEqual(
list(books),
[
{
"pk": self.b1.pk,
"isbn": "159059725",
"mean_age": 34.5,
}
],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values("name")
)
self.assertEqual(
list(books),
[{"name": "The Definitive Guide to Django: Web Development Done Right"}],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.values()
.annotate(mean_age=Avg("authors__age"))
)
self.assertEqual(
list(books),
[
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
],
)
books = (
Book.objects.values("rating")
.annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age"))
.order_by("rating")
)
self.assertEqual(
list(books),
[
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1),
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
},
],
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertQuerysetEqual(
authors,
[
("Adrian Holovaty", 32.0),
("Brad Dayley", None),
("Jacob Kaplan-Moss", 29.5),
("James Bennett", 34.0),
("Jeffrey Forcier", 27.0),
("Paul Bissex", 31.0),
("Peter Norvig", 46.0),
("Stuart Russell", 57.0),
("Wesley J. Chun", Approximate(33.66, places=1)),
],
lambda a: (a.name, a.friends__age__avg),
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
def test_count_star(self):
with self.assertNumQueries(1) as ctx:
Book.objects.aggregate(n=Count("*"))
sql = ctx.captured_queries[0]["sql"]
self.assertIn("SELECT COUNT(*) ", sql)
def test_count_distinct_expression(self):
aggs = Book.objects.aggregate(
distinct_ratings=Count(
Case(When(pages__gt=300, then="rating")), distinct=True
),
)
self.assertEqual(aggs["distinct_ratings"], 4)
def test_distinct_on_aggregate(self):
for aggregate, expected_result in (
(Avg, 4.125),
(Count, 4),
(Sum, 16.5),
):
with self.subTest(aggregate=aggregate.__name__):
books = Book.objects.aggregate(
ratings=aggregate("rating", distinct=True)
)
self.assertEqual(books["ratings"], expected_result)
def test_non_grouped_annotation_not_in_group_by(self):
"""
An annotation not included in values() before an aggregate should be
excluded from the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F("price"))
.filter(rating=4.0)
.values("rating")
.annotate(count=Count("publisher_id", distinct=True))
.values("count", "rating")
.order_by("count")
)
self.assertEqual(list(qs), [{"rating": 4.0, "count": 2}])
def test_grouped_annotation_in_group_by(self):
"""
An annotation included in values() before an aggregate should be
included in the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F("price"))
.filter(rating=4.0)
.values("rating", "xprice")
.annotate(count=Count("publisher_id", distinct=True))
.values("count", "rating")
.order_by("count")
)
self.assertEqual(
list(qs),
[
{"rating": 4.0, "count": 1},
{"rating": 4.0, "count": 2},
],
)
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count("book__id")))
implicit = list(Author.objects.annotate(Count("book")))
self.assertCountEqual(explicit, implicit)
def test_annotate_ordering(self):
books = (
Book.objects.values("rating")
.annotate(oldest=Max("authors__age"))
.order_by("oldest", "rating")
)
self.assertEqual(
list(books),
[
{"rating": 4.5, "oldest": 35},
{"rating": 3.0, "oldest": 45},
{"rating": 4.0, "oldest": 57},
{"rating": 5.0, "oldest": 57},
],
)
books = (
Book.objects.values("rating")
.annotate(oldest=Max("authors__age"))
.order_by("-oldest", "-rating")
)
self.assertEqual(
list(books),
[
{"rating": 5.0, "oldest": 57},
{"rating": 4.0, "oldest": 57},
{"rating": 3.0, "oldest": 45},
{"rating": 4.5, "oldest": 35},
],
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Avg("num_authors")
)
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_avg_duration_field(self):
# Explicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg("duration", output_field=DurationField())),
{"duration__avg": datetime.timedelta(days=1, hours=12)},
)
# Implicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg("duration")),
{"duration__avg": datetime.timedelta(days=1, hours=12)},
)
def test_sum_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Sum("duration", output_field=DurationField())),
{"duration__sum": datetime.timedelta(days=3)},
)
def test_sum_distinct_aggregate(self):
"""
Sum on a distinct() QuerySet should aggregate only the distinct items.
"""
authors = Author.objects.filter(book__in=[self.b5, self.b6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum("age"))
self.assertEqual(age_sum["age__sum"], 103)
def test_filtering(self):
p = Publisher.objects.create(name="Expensive Publisher", num_awards=0)
Book.objects.create(
name="ExpensiveBook1",
pages=1,
isbn="111",
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 1),
)
Book.objects.create(
name="ExpensiveBook2",
pages=1,
isbn="222",
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 2),
)
Book.objects.create(
name="ExpensiveBook3",
pages=1,
isbn="333",
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 3),
)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by(
"pk"
)
self.assertQuerysetEqual(
publishers,
[
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
)
publishers = (
Publisher.objects.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(publishers, ["Apress"], lambda p: p.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__range=[1, 3])
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
[
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__range=[1, 2])
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
["Apress", "Sams", "Prentice Hall", "Morgan Kaufmann"],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__in=[1, 3])
.order_by("pk")
)
self.assertQuerysetEqual(
publishers,
["Sams", "Morgan Kaufmann", "Expensive Publisher"],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(
num_books__isnull=True
)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = (
Book.objects.annotate(num_authors=Count("authors__name"))
.filter(num_authors__exact=2)
.order_by("pk")
)
self.assertQuerysetEqual(
books,
[
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name,
)
authors = (
Author.objects.annotate(num_friends=Count("friends__id", distinct=True))
.filter(num_friends=0)
.order_by("pk")
)
self.assertQuerysetEqual(authors, ["Brad Dayley"], lambda a: a.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(
publishers, ["Apress", "Prentice Hall"], lambda p: p.name
)
publishers = (
Publisher.objects.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
)
self.assertQuerysetEqual(publishers, ["Apress"], lambda p: p.name)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(
authors__name__contains="Norvig", num_authors__gt=1
)
self.assertQuerysetEqual(
books, ["Artificial Intelligence: A Modern Approach"], lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains="Norvig")
b = Book.objects.get(name__contains="Done Right")
b.authors.add(a)
b.save()
vals = (
Book.objects.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
.aggregate(Avg("rating"))
)
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = (
Publisher.objects.annotate(
earliest_book=Min("book__pubdate"),
)
.exclude(earliest_book=None)
.order_by("earliest_book")
.values(
"earliest_book",
"num_awards",
"id",
"name",
)
)
self.assertEqual(
list(publishers),
[
{
"earliest_book": datetime.date(1991, 10, 15),
"num_awards": 9,
"id": self.p4.id,
"name": "Morgan Kaufmann",
},
{
"earliest_book": datetime.date(1995, 1, 15),
"num_awards": 7,
"id": self.p3.id,
"name": "Prentice Hall",
},
{
"earliest_book": datetime.date(2007, 12, 6),
"num_awards": 3,
"id": self.p1.id,
"name": "Apress",
},
{
"earliest_book": datetime.date(2008, 3, 3),
"num_awards": 1,
"id": self.p2.id,
"name": "Sams",
},
],
)
vals = Store.objects.aggregate(
Max("friday_night_closing"), Min("original_opening")
)
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
)
def test_annotate_values_list(self):
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("pk", "isbn", "mean_age")
)
self.assertEqual(list(books), [(self.b1.id, "159059725", 34.5)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("isbn")
)
self.assertEqual(list(books), [("159059725",)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age")
)
self.assertEqual(list(books), [(34.5,)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age", flat=True)
)
self.assertEqual(list(books), [34.5])
books = (
Book.objects.values_list("price")
.annotate(count=Count("price"))
.order_by("-count", "price")
)
self.assertEqual(
list(books),
[
(Decimal("29.69"), 2),
(Decimal("23.09"), 1),
(Decimal("30"), 1),
(Decimal("75"), 1),
(Decimal("82.8"), 1),
],
)
def test_dates_with_aggregation(self):
"""
.dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates(
"pubdate", "year"
)
self.assertSequenceEqual(
dates,
[
datetime.date(1991, 1, 1),
datetime.date(1995, 1, 1),
datetime.date(2007, 1, 1),
datetime.date(2008, 1, 1),
],
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values("rating").aggregate(max_rating=Max("rating"))
self.assertEqual(max_rating["max_rating"], 5)
max_books_per_rating = (
Book.objects.values("rating")
.annotate(books_per_rating=Count("id"))
.aggregate(Max("books_per_rating"))
)
self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3})
def test_ticket17424(self):
"""
Doing exclude() on a foreign model after annotate() doesn't crash.
"""
all_books = list(Book.objects.values_list("pk", flat=True).order_by("pk"))
annotated_books = Book.objects.order_by("pk").annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Aggregation over sliced queryset works correctly.
"""
qs = Book.objects.order_by("-rating")[0:3]
vals = qs.aggregate(average_top3_rating=Avg("rating"))["average_top3_rating"]
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or
select_related() stuff.
"""
qs = (
Book.objects.select_for_update()
.order_by("pk")
.select_related("publisher")
.annotate(max_pk=Max("pk"))
)
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg("max_pk"))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]["sql"].lower()
self.assertNotIn("for update", qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r"order by (\w+)", qstr),
[", ".join(f[1][0] for f in forced_ordering).lower()],
)
else:
self.assertNotIn("order by", qstr)
self.assertEqual(qstr.count(" join "), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i),
name="none",
pages=10,
rating=4.0,
price=9999.98,
contact=a1,
publisher=p1,
pubdate=thedate,
)
book = Book.objects.aggregate(price_sum=Sum("price"))
self.assertEqual(book["price_sum"], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with self.assertRaisesMessage(TypeError, "fail is not an aggregate expression"):
Book.objects.aggregate(fail=F("price"))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(
val=Max(Value(2), output_field=IntegerField())
).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_annotation_expressions(self):
authors = Author.objects.annotate(
combined_ages=Sum(F("age") + F("friends__age"))
).order_by("name")
authors2 = Author.objects.annotate(
combined_ages=Sum("age") + Sum("friends__age")
).order_by("name")
for qs in (authors, authors2):
self.assertQuerysetEqual(
qs,
[
("Adrian Holovaty", 132),
("Brad Dayley", None),
("Jacob Kaplan-Moss", 129),
("James Bennett", 63),
("Jeffrey Forcier", 128),
("Paul Bissex", 120),
("Peter Norvig", 103),
("Stuart Russell", 103),
("Wesley J. Chun", 176),
],
lambda a: (a.name, a.combined_ages),
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum("age") / Count("*"))
a2 = Author.objects.aggregate(av_age=Sum("age") / Count("age"))
a3 = Author.objects.aggregate(av_age=Avg("age"))
self.assertEqual(a1, {"av_age": 37})
self.assertEqual(a2, {"av_age": 37})
self.assertEqual(a3, {"av_age": Approximate(37.4, places=1)})
def test_avg_decimal_field(self):
v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price")))[
"avg_price"
]
self.assertIsInstance(v, Decimal)
self.assertEqual(v, Approximate(Decimal("47.39"), places=2))
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price") + 2) * 3)
self.assertEqual(p1, {"avg_price": Approximate(Decimal("148.18"), places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg("price") + 2 * 3)
self.assertEqual(p2, {"avg_price": Approximate(Decimal("53.39"), places=2)})
def test_combine_different_types(self):
msg = (
"Cannot infer type of '+' expression involving these types: FloatField, "
"DecimalField. You must set output_field."
)
qs = Book.objects.annotate(sums=Sum("rating") + Sum("pages") + Sum("price"))
with self.assertRaisesMessage(FieldError, msg):
qs.first()
with self.assertRaisesMessage(FieldError, msg):
qs.first()
b1 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=IntegerField())
).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=FloatField())
).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=DecimalField())
).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with self.assertRaisesMessage(
TypeError, "Complex annotations require an alias"
):
Author.objects.annotate(Sum(F("age") + F("friends__age")))
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Author.objects.aggregate(Sum("age") / Count("age"))
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(combined_ages=Sum(F("age") + F("friends__age")))
age = qs.aggregate(max_combined_age=Max("combined_ages"))
self.assertEqual(age["max_combined_age"], 176)
age = qs.aggregate(max_combined_age_doubled=Max("combined_ages") * 2)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages")
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"),
sum_combined_age=Sum("combined_ages"),
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
self.assertEqual(age["sum_combined_age"], 954)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"),
sum_combined_age_doubled=Sum("combined_ages") + Sum("combined_ages"),
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
self.assertEqual(age["sum_combined_age_doubled"], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values("name").annotate(another_age=Sum("age") + F("age"))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a["another_age"], 68)
qs = qs.annotate(friend_count=Count("friends"))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a["friend_count"], 2)
qs = (
qs.annotate(combined_age=Sum("age") + F("friends__age"))
.filter(name="Adrian Holovaty")
.order_by("-combined_age")
)
self.assertEqual(
list(qs),
[
{
"name": "Adrian Holovaty",
"another_age": 68,
"friend_count": 1,
"combined_age": 69,
},
{
"name": "Adrian Holovaty",
"another_age": 68,
"friend_count": 1,
"combined_age": 63,
},
],
)
vals = qs.values("name", "combined_age")
self.assertEqual(
list(vals),
[
{"name": "Adrian Holovaty", "combined_age": 69},
{"name": "Adrian Holovaty", "combined_age": 63},
],
)
def test_annotate_values_aggregate(self):
alias_age = (
Author.objects.annotate(age_alias=F("age"))
.values(
"age_alias",
)
.aggregate(sum_age=Sum("age_alias"))
)
age = Author.objects.values("age").aggregate(sum_age=Sum("age"))
self.assertEqual(alias_age["sum_age"], age["sum_age"])
def test_annotate_over_annotate(self):
author = (
Author.objects.annotate(age_alias=F("age"))
.annotate(sum_age=Sum("age_alias"))
.get(name="Adrian Holovaty")
)
other_author = Author.objects.annotate(sum_age=Sum("age")).get(
name="Adrian Holovaty"
)
self.assertEqual(author.sum_age, other_author.sum_age)
def test_aggregate_over_aggregate(self):
msg = "Cannot compute Avg('age'): 'age' is an aggregate"
with self.assertRaisesMessage(FieldError, msg):
Author.objects.annotate(age_alias=F("age"),).aggregate(
age=Sum(F("age")),
avg_age=Avg(F("age")),
)
def test_annotated_aggregate_over_annotated_aggregate(self):
with self.assertRaisesMessage(
FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"
):
Book.objects.annotate(Max("id")).annotate(Sum("id__max"))
class MyMax(Max):
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super().as_sql(compiler, connection)
with self.assertRaisesMessage(
FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"
):
Book.objects.annotate(Max("id")).annotate(my_max=MyMax("id__max", "price"))
def test_multi_arg_aggregate(self):
class MyMax(Max):
output_field = DecimalField()
def as_sql(self, compiler, connection):
copy = self.copy()
copy.set_source_expressions(copy.get_source_expressions()[0:1])
return super(MyMax, copy).as_sql(compiler, connection)
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Book.objects.aggregate(MyMax("pages", "price"))
with self.assertRaisesMessage(
TypeError, "Complex annotations require an alias"
):
Book.objects.annotate(MyMax("pages", "price"))
Book.objects.aggregate(max_field=MyMax("pages", "price"))
def test_add_implementation(self):
class MySum(Sum):
pass
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = {
"function": self.function.lower(),
"expressions": sql,
"distinct": "",
}
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(MySum, "as_" + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("sum("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra["function"] = self.function.lower()
return super(MySum, self).as_sql(compiler, connection)
setattr(MySum, "as_" + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("sum("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = {"function": "MAX", "expressions": "2", "distinct": ""}
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(MySum, "as_" + connection.vendor, be_evil)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("MAX("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values("rating").aggregate(
double_max_rating=Max("rating") + Max("rating")
)
self.assertEqual(max_rating["double_max_rating"], 5 * 2)
max_books_per_rating = (
Book.objects.values("rating")
.annotate(books_per_rating=Count("id") + 5)
.aggregate(Max("books_per_rating"))
)
self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3 + 5})
def test_expression_on_aggregation(self):
qs = (
Publisher.objects.annotate(
price_or_median=Greatest(
Avg("book__rating", output_field=DecimalField()), Avg("book__price")
)
)
.filter(price_or_median__gte=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerysetEqual(qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = (
Publisher.objects.annotate(
rating_or_num_awards=Greatest(
Avg("book__rating"), F("num_awards"), output_field=FloatField()
)
)
.filter(rating_or_num_awards__gt=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerysetEqual(qs2, [1, 3], lambda v: v.num_awards)
def test_arguments_must_be_expressions(self):
msg = "QuerySet.aggregate() received non-expression(s): %s."
with self.assertRaisesMessage(TypeError, msg % FloatField()):
Book.objects.aggregate(FloatField())
with self.assertRaisesMessage(TypeError, msg % True):
Book.objects.aggregate(is_book=True)
with self.assertRaisesMessage(
TypeError, msg % ", ".join([str(FloatField()), "True"])
):
Book.objects.aggregate(FloatField(), Avg("price"), is_book=True)
def test_aggregation_subquery_annotation(self):
"""Subquery annotations are excluded from the GROUP BY if they are
not explicitly grouped against."""
latest_book_pubdate_qs = (
Book.objects.filter(publisher=OuterRef("pk"))
.order_by("-pubdate")
.values("pubdate")[:1]
)
publisher_qs = Publisher.objects.annotate(
latest_book_pubdate=Subquery(latest_book_pubdate_qs),
).annotate(count=Count("book"))
with self.assertNumQueries(1) as ctx:
list(publisher_qs)
self.assertEqual(ctx[0]["sql"].count("SELECT"), 2)
# The GROUP BY should not be by alias either.
self.assertEqual(ctx[0]["sql"].lower().count("latest_book_pubdate"), 1)
def test_aggregation_subquery_annotation_exists(self):
latest_book_pubdate_qs = (
Book.objects.filter(publisher=OuterRef("pk"))
.order_by("-pubdate")
.values("pubdate")[:1]
)
publisher_qs = Publisher.objects.annotate(
latest_book_pubdate=Subquery(latest_book_pubdate_qs),
count=Count("book"),
)
self.assertTrue(publisher_qs.exists())
def test_aggregation_filter_exists(self):
publishers_having_more_than_one_book_qs = (
Book.objects.values("publisher")
.annotate(cnt=Count("isbn"))
.filter(cnt__gt=1)
)
query = publishers_having_more_than_one_book_qs.query.exists(
using=connection.alias
)
_, _, group_by = query.get_compiler(connection=connection).pre_sql_setup()
self.assertEqual(len(group_by), 1)
def test_aggregation_exists_annotation(self):
published_books = Book.objects.filter(publisher=OuterRef("pk"))
publisher_qs = Publisher.objects.annotate(
published_book=Exists(published_books),
count=Count("book"),
).values_list("name", flat=True)
self.assertCountEqual(
list(publisher_qs),
[
"Apress",
"Morgan Kaufmann",
"Jonno's House of Books",
"Prentice Hall",
"Sams",
],
)
def test_aggregation_subquery_annotation_values(self):
"""
Subquery annotations and external aliases are excluded from the GROUP
BY if they are not selected.
"""
books_qs = (
Book.objects.annotate(
first_author_the_same_age=Subquery(
Author.objects.filter(
age=OuterRef("contact__friends__age"),
)
.order_by("age")
.values("id")[:1],
)
)
.filter(
publisher=self.p1,
first_author_the_same_age__isnull=False,
)
.annotate(
min_age=Min("contact__friends__age"),
)
.values("name", "min_age")
.order_by("name")
)
self.assertEqual(
list(books_qs),
[
{"name": "Practical Django Projects", "min_age": 34},
{
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"min_age": 29,
},
],
)
def test_aggregation_subquery_annotation_values_collision(self):
books_rating_qs = Book.objects.filter(
publisher=OuterRef("pk"),
price=Decimal("29.69"),
).values("rating")
publisher_qs = (
Publisher.objects.filter(
book__contact__age__gt=20,
name=self.p1.name,
)
.annotate(
rating=Subquery(books_rating_qs),
contacts_count=Count("book__contact"),
)
.values("rating")
.annotate(total_count=Count("rating"))
)
self.assertEqual(
list(publisher_qs),
[
{"rating": 4.0, "total_count": 2},
],
)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_subquery_annotation_multivalued(self):
"""
Subquery annotations must be included in the GROUP BY if they use
potentially multivalued relations (contain the LOOKUP_SEP).
"""
subquery_qs = Author.objects.filter(
pk=OuterRef("pk"),
book__name=OuterRef("book__name"),
).values("pk")
author_qs = Author.objects.annotate(
subquery_id=Subquery(subquery_qs),
).annotate(count=Count("book"))
self.assertEqual(author_qs.count(), Author.objects.count())
def test_aggregation_order_by_not_selected_annotation_values(self):
result_asc = [
self.b4.pk,
self.b3.pk,
self.b1.pk,
self.b2.pk,
self.b5.pk,
self.b6.pk,
]
result_desc = result_asc[::-1]
tests = [
("min_related_age", result_asc),
("-min_related_age", result_desc),
(F("min_related_age"), result_asc),
(F("min_related_age").asc(), result_asc),
(F("min_related_age").desc(), result_desc),
]
for ordering, expected_result in tests:
with self.subTest(ordering=ordering):
books_qs = (
Book.objects.annotate(
min_age=Min("authors__age"),
)
.annotate(
min_related_age=Coalesce("min_age", "contact__age"),
)
.order_by(ordering)
.values_list("pk", flat=True)
)
self.assertEqual(list(books_qs), expected_result)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_group_by_subquery_annotation(self):
"""
Subquery annotations are included in the GROUP BY if they are
grouped against.
"""
long_books_count_qs = (
Book.objects.filter(
publisher=OuterRef("pk"),
pages__gt=400,
)
.values("publisher")
.annotate(count=Count("pk"))
.values("count")
)
groups = [
Subquery(long_books_count_qs),
long_books_count_qs,
long_books_count_qs.query,
]
for group in groups:
with self.subTest(group=group.__class__.__name__):
long_books_count_breakdown = Publisher.objects.values_list(
group,
).annotate(total=Count("*"))
self.assertEqual(dict(long_books_count_breakdown), {None: 1, 1: 4})
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_group_by_exists_annotation(self):
"""
Exists annotations are included in the GROUP BY if they are
grouped against.
"""
long_books_qs = Book.objects.filter(
publisher=OuterRef("pk"),
pages__gt=800,
)
has_long_books_breakdown = Publisher.objects.values_list(
Exists(long_books_qs),
).annotate(total=Count("*"))
self.assertEqual(dict(has_long_books_breakdown), {True: 2, False: 3})
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_subquery_annotation_related_field(self):
publisher = Publisher.objects.create(name=self.a9.name, num_awards=2)
book = Book.objects.create(
isbn="159059999",
name="Test book.",
pages=819,
rating=2.5,
price=Decimal("14.44"),
contact=self.a9,
publisher=publisher,
pubdate=datetime.date(2019, 12, 6),
)
book.authors.add(self.a5, self.a6, self.a7)
books_qs = (
Book.objects.annotate(
contact_publisher=Subquery(
Publisher.objects.filter(
pk=OuterRef("publisher"),
name=OuterRef("contact__name"),
).values("name")[:1],
)
)
.filter(
contact_publisher__isnull=False,
)
.annotate(count=Count("authors"))
)
self.assertSequenceEqual(books_qs, [book])
# FIXME: GROUP BY doesn't need to include a subquery with
# non-multivalued JOINs, see Col.possibly_multivalued (refs #31150):
# with self.assertNumQueries(1) as ctx:
# self.assertSequenceEqual(books_qs, [book])
# self.assertEqual(ctx[0]['sql'].count('SELECT'), 2)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_nested_subquery_outerref(self):
publisher_with_same_name = Publisher.objects.filter(
id__in=Subquery(
Publisher.objects.filter(
name=OuterRef(OuterRef("publisher__name")),
).values("id"),
),
).values(publisher_count=Count("id"))[:1]
books_breakdown = Book.objects.annotate(
publisher_count=Subquery(publisher_with_same_name),
authors_count=Count("authors"),
).values_list("publisher_count", flat=True)
self.assertSequenceEqual(books_breakdown, [1] * 6)
def test_filter_in_subquery_or_aggregation(self):
"""
Filtering against an aggregate requires the usage of the HAVING clause.
If such a filter is unionized to a non-aggregate one the latter will
also need to be moved to the HAVING clause and have its grouping
columns used in the GROUP BY.
When this is done with a subquery the specialized logic in charge of
using outer reference columns to group should be used instead of the
subquery itself as the latter might return multiple rows.
"""
authors = Author.objects.annotate(
Count("book"),
).filter(Q(book__count__gt=0) | Q(pk__in=Book.objects.values("authors")))
self.assertQuerysetEqual(authors, Author.objects.all(), ordered=False)
def test_aggregation_random_ordering(self):
"""Random() is not included in the GROUP BY when used for ordering."""
authors = Author.objects.annotate(contact_count=Count("book")).order_by("?")
self.assertQuerysetEqual(
authors,
[
("Adrian Holovaty", 1),
("Jacob Kaplan-Moss", 1),
("Brad Dayley", 1),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 1),
("Wesley J. Chun", 1),
("Stuart Russell", 1),
("Peter Norvig", 2),
],
lambda a: (a.name, a.contact_count),
ordered=False,
)
def test_empty_result_optimization(self):
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Sum("num_awards"),
books_count=Count("book"),
),
{
"sum_awards": None,
"books_count": 0,
},
)
# Expression without empty_result_set_value forces queries to be
# executed even if they would return an empty result set.
raw_books_count = Func("book", function="COUNT")
raw_books_count.contains_aggregate = True
with self.assertNumQueries(1):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Sum("num_awards"),
books_count=raw_books_count,
),
{
"sum_awards": None,
"books_count": 0,
},
)
def test_coalesced_empty_result_set(self):
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Sum("num_awards"), 0),
)["sum_awards"],
0,
)
# Multiple expressions.
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Sum("num_awards"), None, 0),
)["sum_awards"],
0,
)
# Nested coalesce.
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Coalesce(Sum("num_awards"), None), 0),
)["sum_awards"],
0,
)
# Expression coalesce.
with self.assertNumQueries(1):
self.assertIsInstance(
Store.objects.none().aggregate(
latest_opening=Coalesce(
Max("original_opening"),
RawSQL("CURRENT_TIMESTAMP", []),
),
)["latest_opening"],
datetime.datetime,
)
def test_aggregation_default_unsupported_by_count(self):
msg = "Count does not allow default."
with self.assertRaisesMessage(TypeError, msg):
Count("age", default=0)
def test_aggregation_default_unset(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age"),
)
self.assertIsNone(result["value"])
def test_aggregation_default_zero(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=0),
)
self.assertEqual(result["value"], 0)
def test_aggregation_default_integer(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=21),
)
self.assertEqual(result["value"], 21)
def test_aggregation_default_expression(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=Value(5) * Value(7)),
)
self.assertEqual(result["value"], 35)
def test_aggregation_default_group_by(self):
qs = (
Publisher.objects.values("name")
.annotate(
books=Count("book"),
pages=Sum("book__pages", default=0),
)
.filter(books=0)
)
self.assertSequenceEqual(
qs,
[{"name": "Jonno's House of Books", "books": 0, "pages": 0}],
)
def test_aggregation_default_compound_expression(self):
# Scale rating to a percentage; default to 50% if no books published.
formula = Avg("book__rating", default=2.5) * 20.0
queryset = Publisher.objects.annotate(rating=formula).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "rating"),
[
{"name": "Apress", "rating": 85.0},
{"name": "Jonno's House of Books", "rating": 50.0},
{"name": "Morgan Kaufmann", "rating": 100.0},
{"name": "Prentice Hall", "rating": 80.0},
{"name": "Sams", "rating": 60.0},
],
)
def test_aggregation_default_using_time_from_python(self):
expr = Min(
"store__friday_night_closing",
filter=~Q(store__name="Amazon.com"),
default=datetime.time(17),
)
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL 8.0+ & MariaDB.
expr.default = Cast(expr.default, TimeField())
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "013790395",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "067232959", "oldest_store_opening": datetime.time(17)},
{"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "159059725",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)},
],
)
def test_aggregation_default_using_time_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min(
"store__friday_night_closing",
filter=~Q(store__name="Amazon.com"),
default=TruncHour(NowUTC(), output_field=TimeField()),
)
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "013790395",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "067232959", "oldest_store_opening": datetime.time(now.hour)},
{"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "159059725",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)},
],
)
def test_aggregation_default_using_date_from_python(self):
expr = Min("book__pubdate", default=datetime.date(1970, 1, 1))
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL 5.7+ & MariaDB.
expr.default = Cast(expr.default, DateField())
queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "earliest_pubdate"),
[
{"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)},
{
"name": "Jonno's House of Books",
"earliest_pubdate": datetime.date(1970, 1, 1),
},
{
"name": "Morgan Kaufmann",
"earliest_pubdate": datetime.date(1991, 10, 15),
},
{
"name": "Prentice Hall",
"earliest_pubdate": datetime.date(1995, 1, 15),
},
{"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)},
],
)
def test_aggregation_default_using_date_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min("book__pubdate", default=TruncDate(NowUTC()))
queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "earliest_pubdate"),
[
{"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)},
{"name": "Jonno's House of Books", "earliest_pubdate": now.date()},
{
"name": "Morgan Kaufmann",
"earliest_pubdate": datetime.date(1991, 10, 15),
},
{
"name": "Prentice Hall",
"earliest_pubdate": datetime.date(1995, 1, 15),
},
{"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)},
],
)
def test_aggregation_default_using_datetime_from_python(self):
expr = Min(
"store__original_opening",
filter=~Q(store__name="Amazon.com"),
default=datetime.datetime(1970, 1, 1),
)
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL 8.0+ & MariaDB.
expr.default = Cast(expr.default, DateTimeField())
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{
"isbn": "013235613",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "013790395",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "067232959",
"oldest_store_opening": datetime.datetime(1970, 1, 1),
},
{
"isbn": "155860191",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "159059725",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "159059996",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
],
)
def test_aggregation_default_using_datetime_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min(
"store__original_opening",
filter=~Q(store__name="Amazon.com"),
default=TruncHour(NowUTC(), output_field=DateTimeField()),
)
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{
"isbn": "013235613",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "013790395",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "067232959",
"oldest_store_opening": now.replace(
minute=0, second=0, microsecond=0, tzinfo=None
),
},
{
"isbn": "155860191",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "159059725",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "159059996",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
],
)
def test_aggregation_default_using_duration_from_python(self):
result = Publisher.objects.filter(num_awards__gt=3).aggregate(
value=Sum("duration", default=datetime.timedelta(0)),
)
self.assertEqual(result["value"], datetime.timedelta(0))
def test_aggregation_default_using_duration_from_database(self):
result = Publisher.objects.filter(num_awards__gt=3).aggregate(
value=Sum("duration", default=Now() - Now()),
)
self.assertEqual(result["value"], datetime.timedelta(0))
def test_aggregation_default_using_decimal_from_python(self):
result = Book.objects.filter(rating__lt=3.0).aggregate(
value=Sum("price", default=Decimal("0.00")),
)
self.assertEqual(result["value"], Decimal("0.00"))
def test_aggregation_default_using_decimal_from_database(self):
result = Book.objects.filter(rating__lt=3.0).aggregate(
value=Sum("price", default=Pi()),
)
self.assertAlmostEqual(result["value"], Decimal.from_float(math.pi), places=6)
def test_aggregation_default_passed_another_aggregate(self):
result = Book.objects.aggregate(
value=Sum("price", filter=Q(rating__lt=3.0), default=Avg("pages") / 10.0),
)
self.assertAlmostEqual(result["value"], Decimal("61.72"), places=2)
def test_aggregation_default_after_annotation(self):
result = Publisher.objects.annotate(
double_num_awards=F("num_awards") * 2,
).aggregate(value=Sum("double_num_awards", default=0))
self.assertEqual(result["value"], 40)
def test_aggregation_default_not_in_aggregate(self):
result = Publisher.objects.annotate(
avg_rating=Avg("book__rating", default=2.5),
).aggregate(Sum("num_awards"))
self.assertEqual(result["num_awards__sum"], 20)
def test_exists_none_with_aggregate(self):
qs = Book.objects.annotate(
count=Count("id"),
exists=Exists(Author.objects.none()),
)
self.assertEqual(len(qs), 6)
def test_alias_sql_injection(self):
crafted_alias = """injected_name" from "aggregation_author"; --"""
msg = (
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
with self.assertRaisesMessage(ValueError, msg):
Author.objects.aggregate(**{crafted_alias: Avg("age")})
def test_exists_extra_where_with_aggregate(self):
qs = Book.objects.annotate(
count=Count("id"),
exists=Exists(Author.objects.extra(where=["1=0"])),
)
self.assertEqual(len(qs), 6)
| {
"content_hash": "ef570c7f661d837df657ac40537dae92",
"timestamp": "",
"source": "github",
"line_count": 2077,
"max_line_length": 88,
"avg_line_length": 36.68367838228214,
"alnum_prop": 0.5157365602687947,
"repo_name": "auvipy/django",
"id": "c6d73e80429dfdfa185055b4aca0e244e0a04145",
"size": "76192",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "tests/aggregation/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "87610"
},
{
"name": "HTML",
"bytes": "236871"
},
{
"name": "JavaScript",
"bytes": "146241"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16014747"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
} |
"""An API for reversible (bijective) transformations of random variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
class _Bijector(object):
"""An interface for transforming a `Distribution` `Tensor`.
Recall that a `Distribution` `Tensor` has dimensions which have `sample`,
`batch`, and `event` semantics. (See `DistributionShape` for more details.)
A `Bijector` implements a bijective, differentiable function by transforming
an input `Tensor`. The output `Tensor` shape is constrained by the input
`sample`, `batch`, and `event` shape. A `Bijector` is characterized by three
operations:
(1) Forward Evaluation
Useful for turning one random outcome into another random outcome from a
different distribution.
(2) Inverse Evaluation
Useful for "reversing" a transformation to compute one probability in
terms of another.
(3) (log o det o Jacobian o inverse)(x)
"The log of the determinant of the matrix of all first-order partial
derivatives of the inverse function."
Useful for inverting a transformation to compute one probability in terms
of another. Geometrically, the det(Jacobian) is the volume of the
transformation and is used to scale the probability.
By convention, transformations of random variables are named in terms of the
forward transformation. The forward transformation creates samples, the
inverse is useful for computing probabilities.
Example Use:
Basic properties:
```python
x = ... # A tensor.
# Evaluate forward transformation.
fwd_x = my_bijector.forward(x)
x == my_bijector.inverse(fwd_x)
x != my_bijector.forward(fwd_x) # Not equal because g(x) != g(g(x)).
```
Computing a log-likelihood:
```python
def transformed_log_pdf(bijector, log_pdf, x):
return (bijector.inverse_log_det_jacobian(x) +
log_pdf(bijector.inverse(x)))
```
Transforming a random outcome:
```python
def transformed_sample(bijector, x):
return bijector.forward(x)
```
Example transformations:
"Exponential"
```
Y = g(X) = exp(X)
X ~ Normal(0, 1) # Univariate.
```
Implies:
```
g^{-1}(Y) = log(Y)
|Jacobian(g^{-1})(y)| = 1 / y
Y ~ LogNormal(0, 1), i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= (1 / y) Normal(log(y); 0, 1)
```
"ShiftAndScale"
```
Y = g(X) = sqrtSigma * X + mu
X ~ MultivariateNormal(0, I_d)
```
Implies:
```
g^{-1}(Y) = inv(sqrtSigma) * (Y - mu)
|Jacobian(g^{-1})(y)| = det(inv(sqrtSigma))
Y ~ MultivariateNormal(mu, sqrtSigma) , i.e.,
prob(Y=y) = |Jacobian(g^{-1})(y)| * prob(X=g^{-1}(y))
= det(sqrtSigma)^(-d) *
MultivariateNormal(inv(sqrtSigma) * (y - mu); 0, I_d)
```
Example of why a `Bijector` needs to understand sample, batch, event
partitioning:
Consider the `Exp` `Bijector` applied to a `Tensor` which has sample, batch,
and event (S, B, E) shape semantics. Suppose
the `Tensor`'s partitioned-shape is `(S=[4], B=[2], E=[3, 3])`.
For `Exp`, the shape of the `Tensor` returned by `forward` and `inverse` is
unchanged, i.e., `[4, 2, 3, 3]`. However the shape returned by
`inverse_log_det_jacobian` is `[4, 2]` because the Jacobian is a reduction
over the event dimensions.
Subclass Requirements:
Subclasses are expected to implement `_forward` and one or both of:
- `_inverse`, `_inverse_log_det_jacobian`,
- `_inverse_and_inverse_log_det_jacobian`.
If computation can be shared among `_inverse` and
`_inverse_log_det_jacobian` it is preferable to implement
`_inverse_and_inverse_log_det_jacobian`. This usually reduces
graph-construction overhead because a `Distribution`'s implementation of
`log_prob` will need to evaluate both the inverse Jacobian as well as the
inverse function.
If an additional use case needs just `inverse` or just
`inverse_log_det_jacobian` then he or she may also wish to implement these
functions to avoid computing the `inverse_log_det_jacobian` or the
`inverse`, respectively.
"""
# TODO(b/30476956): Try to remove constructor dependence on ndims.
def __init__(self,
batch_ndims=None,
event_ndims=None,
parameters=None,
is_constant_jacobian=False,
validate_args=False,
dtype=None,
name=None):
"""Constructs Bijector.
A `Bijector` transforms random variables into new random variables.
Examples:
```python
# Create the Y = g(X) = X transform which operates on 4-Tensors of vectors.
identity = Identity(batch_ndims=4, event_ndims=1)
# Create the Y = g(X) = exp(X) transform which operates on matrices.
exp = Exp(batch_ndims=0, event_ndims=2)
```
See `Bijector` subclass docstring for more details and specific examples.
Args:
batch_ndims: number of dimensions associated with batch coordinates.
event_ndims: number of dimensions associated with event coordinates.
parameters: Dictionary of parameters used by this `Bijector`
is_constant_jacobian: `Boolean` indicating that the Jacobian is not a
function of the input.
validate_args: `Boolean`, default `False`. Whether to validate input with
asserts. If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
dtype: `tf.dtype` supported by this `Bijector`. `None` means dtype is not
enforced.
name: The name to give Ops created by the initializer.
"""
if batch_ndims is None or event_ndims is None:
self._shaper = None # Apparently subclass will create.
else:
self._shaper = _DistributionShape(
batch_ndims=batch_ndims,
event_ndims=event_ndims,
validate_args=validate_args)
self._parameters = parameters or {}
self._is_constant_jacobian = is_constant_jacobian
self._validate_args = validate_args
self._dtype = dtype
self._name = name or type(self).__name__
@property
def shaper(self):
"""Returns shape object used to manage shape constraints."""
return self._shaper
@property
def parameters(self):
"""Returns this `Bijector`'s parameters as a name/value dictionary."""
return self._parameters
@property
def is_constant_jacobian(self):
"""Returns true iff the Jacobian is not a function of x.
Note: Jacobian is either constant for both forward and inverse or neither.
Returns:
`Boolean`.
"""
return self._is_constant_jacobian
@property
def validate_args(self):
"""Returns True if Tensor arguments will be validated."""
return self._validate_args
@property
def dtype(self):
"""dtype of `Tensor`s transformable by this distribution."""
return self._dtype
@property
def name(self):
"""Returns the string name of this `Bijector`."""
return self._name
def forward(self, x, name="forward"):
"""Returns the forward `Bijector` evaluation, i.e., X = g(Y).
Args:
x: `Tensor`. The input to the "forward" evaluation.
name: The name to give this op.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
AttributeError: if `_forward` is not implemented.
"""
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
return self._forward(x)
def inverse(self, x, name="inverse"):
"""Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y).
Args:
x: `Tensor`. The input to the "inverse" evaluation.
name: The name to give this op.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
AttributeError: if neither `_inverse` nor
`_inverse_and_inverse_log_det_jacobian` are implemented.
"""
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
try:
return self._inverse(x)
except AttributeError:
# Since _inverse was not implemented, try to see if it's implemented
# by the _inverse_and_inverse_log_det_jacobian member.
return self._inverse_and_inverse_log_det_jacobian(x)[0]
def inverse_log_det_jacobian(self, x, name="inverse_log_det_jacobian"):
"""Returns the (log o det o Jacobian o inverse)(x).
Mathematically, returns: log(det(dY/dX g^{-1}))(Y).
Note that forward_log_det_jacobian is the negative of this function. (See
is_constant_jacobian for related proof.)
Args:
x: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
AttributeError: if neither `_inverse_log_det_jacobian` nor
`_inverse_and_inverse_log_det_jacobian` are implemented.
"""
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
try:
return self._inverse_log_det_jacobian(x)
except AttributeError:
# Since _inverse_log_det_jacobian was not implemented, try to see if
# it's implemented by the _inverse_and_inverse_log_det_jacobian member.
return self._inverse_and_inverse_log_det_jacobian(x)[1]
def inverse_and_inverse_log_det_jacobian(
self, x, name="inverse_and_inverse_log_det_jacobian"):
"""Returns both the inverse evaluation and inverse_log_det_jacobian.
Enables possibly more efficient calculation when both inverse and
corresponding Jacobian are needed.
See `inverse()`, `inverse_log_det_jacobian()` for more details.
Args:
x: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
AttributeError: if neither `_inverse_and_inverse_log_det_jacobian` nor
{`_inverse`, `_inverse_log_det_jacobian`} are implemented.
"""
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
try:
return self._inverse_and_inverse_log_det_jacobian(x)
except AttributeError:
# Since _inverse_and_inverse_log_det_jacobian was not implemented, try
# to see if we can separately use _inverse and
# _inverse_log_det_jacobian members.
return self._inverse(x), self._inverse_log_det_jacobian(x)
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + list(self.parameters.values()))) as scope:
yield scope
def _maybe_assert_dtype(self, x):
"""Helper to check dtype when self.dtype is known."""
if self.dtype is not None and self.dtype.base_dtype != x.dtype.base_dtype:
raise TypeError("Input had dtype %s but expected %s." %
(self.dtype, x.dtype))
class _Identity(_Bijector):
"""Bijector which computes Y = g(X) = X.
Example Use:
```python
# Create the Y=g(X)=X transform which is intended for Tensors with 1 batch
# ndim and 1 event ndim (i.e., vector of vectors).
identity = Identity(batch_ndims=1, event_ndims=1)
x = [[1., 2],
[3, 4]]
x == identity.forward(x) == identity.inverse(x)
```
"""
def __init__(self, validate_args=False, name="Identity"):
super(_Identity, self).__init__(
batch_ndims=0,
event_ndims=0,
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
self._is_constant_jacobian = True
def _forward(self, x):
return x
def _inverse(self, x):
return x
def _inverse_log_det_jacobian(self, x):
return constant_op.constant(0., dtype=x.dtype)
class _Exp(_Bijector):
"""Bijector which computes Y = g(X) = exp(X).
Example Use:
```python
# Create the Y=g(X)=exp(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
exp = Exp(batch_ndims=1, event_ndims=2)
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
exp(x) == exp.forward(x)
log(x) == exp.inverse(x)
```
Note: the exp(.) is applied element-wise but the Jacobian is a reduction
over the event space.
"""
# TODO(b/30476956): Try to remove constructor dependence on ndims.
def __init__(self,
event_ndims=0,
validate_args=False,
name="Exp"):
super(_Exp, self).__init__(
batch_ndims=0,
event_ndims=event_ndims,
validate_args=validate_args,
name=name)
def _forward(self, x):
return math_ops.exp(x)
def _inverse(self, x):
return math_ops.log(x)
def _inverse_log_det_jacobian(self, x):
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
_, _, event_dims = self.shaper.get_dims(x)
return -math_ops.reduce_sum(math_ops.log(x), reduction_indices=event_dims)
def _inverse_and_inverse_log_det_jacobian(self, x):
if self.shaper is None:
raise ValueError("Jacobian cannot be computed with unknown event_ndims")
y = math_ops.log(x)
_, _, event_dims = self.shaper.get_dims(x)
return y, -math_ops.reduce_sum(y, reduction_indices=event_dims)
class _ShiftAndScale(_Bijector):
"""Bijector which computes Y = g(X; loc, scale) = scale * X + loc.
Example Use:
```python
# No batch, scalar.
mu = 0 # shape=[]
sigma = 1 # shape=[]
b = ShiftAndScale(loc=mu, scale=sigma)
# b.shaper.batch_ndims == 0
# b.shaper.event_ndims == 0
# One batch, scalar.
mu = ... # shape=[b], b>0
sigma = ... # shape=[b], b>0
b = ShiftAndScale(loc=mu, scale=sigma)
# b.shaper.batch_ndims == 1
# b.shaper.event_ndims == 0
# No batch, multivariate.
mu = ... # shape=[d], d>0
sigma = ... # shape=[d, d], d>0
b = ShiftAndScale(loc=mu, scale=sigma, event_ndims=1)
# b.shaper.batch_ndims == 0
# b.shaper.event_ndims == 1
# (B1*B2*...*Bb)-batch, multivariate.
mu = ... # shape=[B1,...,Bb, d], b>0, d>0
sigma = ... # shape=[B1,...,Bb, d, d], b>0, d>0
b = ShiftAndScale(loc=mu, scale=sigma, event_ndims=1)
# b.shaper.batch_ndims == b
# b.shaper.event_ndims == 1
# Mu is broadcast:
mu = 1
sigma = [I, I] # I is a 3x3 identity matrix.
b = ShiftAndScale(loc=mu, scale=sigma, event_ndims=1)
x = numpy.ones(S + sigma.shape)
b.forward(x) # == x + 1
```
"""
def __init__(self,
loc,
scale,
event_ndims=0,
validate_args=False,
name="ShiftAndScale"):
self._parameters = {}
self._name = name
with self._name_scope("init", values=[loc, scale, event_ndims]):
self._loc = ops.convert_to_tensor(loc, name="loc")
self._scale = ops.convert_to_tensor(scale, name="scale")
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
if self.loc.dtype.base_dtype != self.scale.dtype.base_dtype:
raise TypeError("%s.dtype=%s does not match %s.dtype=%s" %
(self.loc.name, self.loc.dtype, self.scale.name,
self.scale.dtype))
if event_ndims.dtype.base_dtype != dtypes.int32.base_dtype:
raise TypeError("%s.dtype=%s does not match %s" %
(event_ndims.name, event_ndims.dtype, dtypes.int32))
self._scale, batch_ndims = self._process_scale(self.scale, event_ndims)
super(_ShiftAndScale, self).__init__(
batch_ndims=batch_ndims,
event_ndims=event_ndims,
parameters={"loc": self.loc, "scale": self.scale},
is_constant_jacobian=True,
validate_args=validate_args,
name=name)
def _process_scale(self, scale, event_ndims):
"""Helper to __init__ which gets scale in batch-ready form.
This function expands dimensions of `scale` according to the following
table:
event_ndims
scale.ndims 0 1
0 [1]+S+[1,1] "silent error"
1 [ ]+S+[1,1] "silent error"
2 [ ]+S+[1,1] [1]+S+[ ]
3 [ ]+S+[1,1] [ ]+S+[ ]
... (same) (same)
The idea is that we want to convert `scale` into something which can always
work for, say, the left-hand argument of `batch_matmul`.
Args:
scale: `Tensor`.
event_ndims: `Tensor` (0D, `int32`).
Returns:
scale: `Tensor` with dims expanded according to [above] table.
batch_ndims: `Tensor` (0D, `int32`). The ndims of the `batch` portion.
"""
ndims = array_ops.rank(scale)
left = math_ops.select(
math_ops.reduce_any([
math_ops.reduce_all([
math_ops.equal(ndims, 0),
math_ops.equal(event_ndims, 0)
]),
math_ops.reduce_all([
math_ops.equal(ndims, 2),
math_ops.equal(event_ndims, 1)
])]), 1, 0)
right = math_ops.select(math_ops.equal(event_ndims, 0), 2, 0)
pad = array_ops.concat(0, (
array_ops.ones([left], dtype=dtypes.int32),
array_ops.shape(scale),
array_ops.ones([right], dtype=dtypes.int32)))
scale = array_ops.reshape(scale, pad)
batch_ndims = ndims - 2 + right
return scale, batch_ndims
@property
def loc(self):
return self._loc
@property
def scale(self):
return self._scale
def _forward(self, x):
x, sample_shape = self.shaper.make_batch_of_event_sample_matrices(x)
x = math_ops.batch_matmul(self.scale, x)
x = self.shaper.undo_make_batch_of_event_sample_matrices(x, sample_shape)
x += self.loc
return x
def _inverse(self, x):
x -= self.loc
x, sample_shape = self.shaper.make_batch_of_event_sample_matrices(x)
x = linalg_ops.matrix_triangular_solve(self.scale, x)
x = self.shaper.undo_make_batch_of_event_sample_matrices(x, sample_shape)
return x
def _inverse_log_det_jacobian(self, x): # pylint: disable=unused-argument
return -math_ops.reduce_sum(
math_ops.log(array_ops.matrix_diag_part(self.scale)),
reduction_indices=[-1])
| {
"content_hash": "642c48f904eeaa5ba15e443d092fcbe9",
"timestamp": "",
"source": "github",
"line_count": 578,
"max_line_length": 80,
"avg_line_length": 33.173010380622834,
"alnum_prop": 0.6215708772295817,
"repo_name": "MostafaGazar/tensorflow",
"id": "9f69d3cb21d56eca4e49b3de7534dc773fc525fa",
"size": "19863",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distributions/python/ops/bijector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "88155"
},
{
"name": "C++",
"bytes": "12907224"
},
{
"name": "CMake",
"bytes": "65581"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "42531"
},
{
"name": "HTML",
"bytes": "1171692"
},
{
"name": "Java",
"bytes": "143277"
},
{
"name": "JavaScript",
"bytes": "12972"
},
{
"name": "Jupyter Notebook",
"bytes": "1833435"
},
{
"name": "Makefile",
"bytes": "23390"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "136850"
},
{
"name": "Python",
"bytes": "11873486"
},
{
"name": "Shell",
"bytes": "267196"
},
{
"name": "TypeScript",
"bytes": "675176"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from jsonlibconfig.yacc import parser
from jsonlibconfig.lex import hextoint
def load(f, _hextoint=False):
s = ""
for buff in f.read():
s = s + buff
return loads(s, _hextoint)
def loads(s, _hextoint=False):
hextoint(_hextoint)
return parser.parse(s)
| {
"content_hash": "57c92c8959afcd4e60cb9a148165f0ad",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 38,
"avg_line_length": 20.125,
"alnum_prop": 0.6677018633540373,
"repo_name": "imZack/jsonlibconfig",
"id": "b8ff0c60239915ea1562d08a55b126b78de5bace",
"size": "322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsonlibconfig/decoder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23201"
}
],
"symlink_target": ""
} |
from telemetry.page import test_expectations
# Valid expectation conditions are:
#
# Operating systems:
# win, xp, vista, win7, mac, leopard, snowleopard, lion, mountainlion,
# mavericks, yosemite, linux, chromeos, android
#
# GPU vendors:
# amd, arm, broadcom, hisilicon, intel, imagination, nvidia, qualcomm,
# vivante
#
# Browser types:
# android-webview-shell, android-content-shell, debug
#
# ANGLE renderer:
# d3d9, d3d11, opengl
#
# Specific GPUs can be listed as a tuple with vendor name and device ID.
# Examples: ('nvidia', 0x1234), ('arm', 'Mali-T604')
# Device IDs must be paired with a GPU vendor.
#
# Sample usage in SetExpectations in subclasses:
# self.Fail('gl-enable-vertex-attrib.html',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
ANGLE_MODIFIERS = ['d3d9', 'd3d11', 'opengl']
BROWSER_TYPE_MODIFIERS = [
'android-webview-shell', 'android-content-shell', 'debug' ]
class _FlakyExpectation(object):
def __init__(self, expectation, max_num_retries):
self.expectation = expectation
self.max_num_retries = max_num_retries
class _FakeSharedPageState(object):
def __init__(self, browser):
self.browser = browser
class GpuTestExpectations(test_expectations.TestExpectations):
def __init__(self):
self._flaky_expectations = []
super(GpuTestExpectations, self).__init__()
def Flaky(self, url_pattern, conditions=None, bug=None, max_num_retries=2):
expectation = _FlakyExpectation(self.CreateExpectation(
'pass', url_pattern, conditions, bug), max_num_retries)
self._flaky_expectations.append(expectation)
def GetFlakyRetriesForPage(self, page, browser):
for fe in self._flaky_expectations:
e = fe.expectation
if self.ExpectationAppliesToPage(e, _FakeSharedPageState(browser), page):
return fe.max_num_retries
return 0
def IsValidUserDefinedCondition(self, condition):
# Add support for d3d9, d3d11 and opengl-specific expectations.
if condition in ANGLE_MODIFIERS:
return True
# Add support for browser-type-specific expectations.
if condition in BROWSER_TYPE_MODIFIERS:
return True
return super(GpuTestExpectations,
self).IsValidUserDefinedCondition(condition)
def ModifiersApply(self, shared_page_state, expectation):
if not super(GpuTestExpectations, self).ModifiersApply(
shared_page_state, expectation):
return False
# We'll only get here if the OS and GPU matched the expectation.
# TODO(kbr): refactor _Expectation to be a public class so that
# the GPU-specific properties can be moved into a subclass, and
# run the unit tests from this directory on the CQ and the bots.
# crbug.com/495868 crbug.com/495870
# Check for presence of Android WebView.
browser = shared_page_state.browser
browser_expectations = [x for x in expectation.user_defined_conditions
if x in BROWSER_TYPE_MODIFIERS]
browser_matches = ((not browser_expectations) or
browser.browser_type in browser_expectations)
if not browser_matches:
return False
angle_renderer = ''
gpu_info = None
if browser.supports_system_info:
gpu_info = browser.GetSystemInfo().gpu
if gpu_info and gpu_info.aux_attributes:
gl_renderer = gpu_info.aux_attributes.get('gl_renderer')
if gl_renderer:
if 'Direct3D11' in gl_renderer:
angle_renderer = 'd3d11'
elif 'Direct3D9' in gl_renderer:
angle_renderer = 'd3d9'
elif 'OpenGL' in gl_renderer:
angle_renderer = 'opengl'
angle_expectations = [x for x in expectation.user_defined_conditions
if x in ANGLE_MODIFIERS]
angle_matches = ((not angle_expectations) or
angle_renderer in angle_expectations)
return angle_matches
| {
"content_hash": "3f44b9016c6bd32e07b4776f2406b2ba",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 79,
"avg_line_length": 36.695238095238096,
"alnum_prop": 0.6849208409031923,
"repo_name": "TheTypoMaster/chromium-crosswalk",
"id": "fdae5abd4ecf5357bf673d8b0d07d877203235f1",
"size": "4016",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "content/test/gpu/gpu_tests/gpu_test_expectations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "37073"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9417055"
},
{
"name": "C++",
"bytes": "240920124"
},
{
"name": "CSS",
"bytes": "938860"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27258381"
},
{
"name": "Java",
"bytes": "14580273"
},
{
"name": "JavaScript",
"bytes": "20507007"
},
{
"name": "Makefile",
"bytes": "70992"
},
{
"name": "Objective-C",
"bytes": "1742904"
},
{
"name": "Objective-C++",
"bytes": "9967587"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "480579"
},
{
"name": "Python",
"bytes": "8519074"
},
{
"name": "Shell",
"bytes": "482077"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
from pypdflite import PDFCursor
def TestCursor(test_dir):
cursor1 = PDFCursor(20, 30)
cursor2 = PDFCursor(22, 50)
#print "Should be True:", cursor1 < cursor2
print ("Should be False: ", cursor1 > cursor2)
print ("Should be False: ", cursor1 == cursor2)
if __name__ == '__main__':
TestCursor()
| {
"content_hash": "7de7641245ad4069ec11a4b4836c0d3b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 51,
"avg_line_length": 24.692307692307693,
"alnum_prop": 0.632398753894081,
"repo_name": "katerina7479/pypdflite",
"id": "063e3bb717de3ee738f43d5e26dd4240e49d30e5",
"size": "321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/testcursor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1080"
},
{
"name": "Python",
"bytes": "311418"
},
{
"name": "Shell",
"bytes": "73"
}
],
"symlink_target": ""
} |
"""
There are a number of spherical balloons spread in two-dimensional space. For
each balloon, provided input is the start and end coordinates of the horizontal
diameter. Since it's horizontal, y-coordinates don't matter and hence the
x-coordinates of start and end of the diameter suffice. Start is always smaller
than end. There will be at most 104 balloons.
An arrow can be shot up exactly vertically from different points along the
x-axis. A balloon with xstart and xend bursts by an arrow shot at x if
x_start ≤ x ≤ x_end. There is no limit to the number of arrows that can be shot.
An arrow once shot keeps travelling up infinitely. The problem is to find the
minimum number of arrows that must be shot to burst all balloons.
Example:
Input:
[[10,16], [2,8], [1,6], [7,12]]
Output:
2
Explanation:
One way is to shoot one arrow for example at x = 6 (bursting the balloons [2,8]
and [1,6]) and another arrow at x = 11 (bursting the other two balloons).
"""
import heapq
class Balloon:
def __init__(self, s, e):
self.s = s
self.e = e
def __lt__(self, other):
# __cmp__ removed in py3
return self.e < other.e
class Solution:
def findMinArrowShots(self, points):
"""
greedy shot since if two balloon no overlap, then must shot separately
heap: min, insert by s, pop by e
Like the maximum overlapping interval
:type points: List[List[int]]
:rtype: int
"""
ret = 0
points.sort(key=lambda x: x[0])
heap = []
for point in points:
s, e = point
if heap and heap[0].e < s:
ret += 1
heap = []
heapq.heappush(heap, Balloon(s, e))
if heap:
ret += 1
return ret
if __name__ == "__main__":
assert Solution().findMinArrowShots([[10,16], [2,8], [1,6], [7,12]]) == 2
| {
"content_hash": "3d9d2b0e105bd6216bc886177a092730",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 80,
"avg_line_length": 27.970588235294116,
"alnum_prop": 0.6219768664563617,
"repo_name": "algorhythms/LeetCode",
"id": "fee91d01388b6e79f21de9c3dec4b6798e826156",
"size": "1925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "452 Minimum Number of Arrows to Burst Balloons.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1444167"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
from box.models import Box
from django.shortcuts import get_object_or_404, render, redirect
from django.forms import ModelForm
from django.contrib.auth.decorators import login_required
#ModelForm to create a box
class BoxCreateForm(ModelForm):
class Meta:
model = Box
fields = ['tag', 'storage']
#ModelForm to update a box, only the storage can change
class BoxUpdateForm(ModelForm):
class Meta:
model = Box
fields = ['storage']
#List all the boxes
def index(request):
box_list = Box.objects.all() #fetch all the boxes
return render(request, 'index.html', {'box_list': box_list})
#Create a box
@login_required
def create(request):
form = BoxCreateForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('box:index')
return render(request, 'create.html', {'form':form})
#Give details about one box
@login_required
def read(request, box_id):
b = get_object_or_404(Box, id=box_id) #fetch one box, 404 if box_id invalid
return render(request, 'read.html', {'box': b})
#Update a box
@login_required
def update(request, box_id):
box= get_object_or_404(Box, id=box_id)
form = BoxUpdateForm(request.POST or None, instance=box, initial={'storage': box.storage})
if form.is_valid():
form.save()
return redirect('box:read', box_id)
return render(request, 'update.html', {'form':form})
#Delete a box
@login_required
def delete(request):
b = get_object_or_404(Box, id=request.POST['box_id']).delete()
return redirect('box:index')
| {
"content_hash": "73ad7b8748c53ef66b0a755da120ac68",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 94,
"avg_line_length": 30.433962264150942,
"alnum_prop": 0.6788592684438933,
"repo_name": "gallardjm/TUM_WebTech_DjangoExample",
"id": "adb1ca7024820f6de36a9b4f9d905edff907d96c",
"size": "1613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webtech_django1/box/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2962"
},
{
"name": "HTML",
"bytes": "3883"
},
{
"name": "Python",
"bytes": "7693"
}
],
"symlink_target": ""
} |
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps,
use_tpu):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = ((1.0 - is_warmup) * learning_rate +
is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=5.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
class AdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None,
name="AdamWeightDecayOptimizer"):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(
name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(
name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Standard Adam update.
next_m = (tf.multiply(self.beta_1, m) +
tf.multiply(1.0 - self.beta_1, grad))
next_v = (tf.multiply(self.beta_2, v) +
tf.multiply(1.0 - self.beta_2, tf.square(grad)))
update = next_m / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = self.learning_rate * update
next_param = param - update_with_lr
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
| {
"content_hash": "00ed674fda32a7cc6281bd2c9ce9c610",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 84,
"avg_line_length": 37.51592356687898,
"alnum_prop": 0.5884550084889644,
"repo_name": "FeiSun/BERT4Rec",
"id": "cbfeb960aae3fc253e73a9f72272e7d57c5b0ee1",
"size": "6505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "optimization.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "113557"
},
{
"name": "Shell",
"bytes": "6098"
}
],
"symlink_target": ""
} |
import operator
import euler
from Arista import Arista
padre = dict()
ranking = dict()
def kruskal(graph):
for vertice in graph.iter_vertexes():
generar_set(vertice)
mst = set()
aristas = list(graph.iter_edges())
aristas = sorted(aristas, key=operator.attrgetter('weight'))
for arista in aristas:
vertice1 = arista.src
vertice2 = arista.dst
if buscar_vertice(vertice1) != buscar_vertice(vertice2):
union(vertice1, vertice2)
mst.add(arista)
return duplicar_camino(mst)
def duplicar_camino(mst):
return_mst = set(mst)
for arista in mst:
nueva_arista = Arista(arista.dst, arista.src, arista.weight)
return_mst.add(nueva_arista)
return return_mst
def generar_set(vertice):
padre[vertice] = vertice
ranking[vertice] = 0
def union(vertice1, vertice2):
raizA = buscar_vertice(vertice1)
raizB = buscar_vertice(vertice2)
if raizA != raizB:
if ranking[raizA] > ranking[raizB]:
padre[raizB] = raizA
else:
padre[raizA] = raizB
if ranking[raizA] == ranking[raizB]: ranking[raizB] += 1
def buscar_vertice(vertice):
if padre[vertice] != vertice:
padre[vertice] = buscar_vertice(padre[vertice])
return padre[vertice]
def encontrar_camino_de_euler(graph):
mst = []
i = 0
for object in graph:
obj = [object.src, object.dst, object.weight]
mst.append(obj)
camino_euler = euler.find_eulerian_tour(mst)
camino_procesado, peso_total = procesar_euler(camino_euler, graph)
return camino_procesado, peso_total
def obtener_arista(mst,src,dst):
for arista in mst:
if arista.src == src and arista.dst == dst:
return arista
def eliminar_arista(mst, arista_a_eliminar):
for arista in mst:
if arista.src == arista_a_eliminar.src and arista.dst == arista_a_eliminar.dst:
mst.remove(arista)
return 1
def procesar_euler(camino, mst):
retorno = []
peso_total = 0
anterior = None
for vertice in camino:
if anterior is None :
anterior = vertice
continue
arista = obtener_arista(mst,anterior,vertice)
retorno.append(arista)
peso_total += arista.weight
anterior = vertice
return retorno, peso_total
| {
"content_hash": "46ba509d9d3ca24be5cdc0f0dbb37d76",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 87,
"avg_line_length": 29.3625,
"alnum_prop": 0.6304810557684121,
"repo_name": "GFibrizo/TPS_7529",
"id": "5c582c5ab83ae4863072e466549eeb51e1f47e26",
"size": "2349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TP3/TSP_aprox/Kruskal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46398"
}
],
"symlink_target": ""
} |
black_list = ["increment_references", "print_warnings"]
class MockGPIOMetaClass(type):
'''
This class is meant to be a meta class that is purely responsible
for incrementing reference counts and logging arguments for the
purpose of unit testing since MockGPIO will only ever be used
in an environment that is not production.
'''
def __getattribute__(cls, name):
attr = object.__getattribute__(cls, name)
if hasattr(attr, '__func__'):
def newfunc(*args, **kwargs):
if name not in black_list:
cls.increment_references(name, args)
result = attr.__func__(cls, *args, **kwargs)
return result
return newfunc
else:
return attr
def increment_references(cls, name, args):
if name not in cls.references.keys():
cls.references[name] = (1, [args])
else:
original = cls.references[name]
original[1].append(args)
cls.references[name] = (original[0]+1, original[1])
class MockGPIO(object):
'''
A class to mock out the RPi.GPIO library for local testing
'''
__metaclass__ = MockGPIOMetaClass
BOARD = "This is a board."
OUT = 0
IN = 1
_warnings = True
references = {} #read comments on metaclass, this is just for unit testing.
@classmethod
def print_warnings(cls, warning_string):
if MockGPIO._warnings == True:
print(warning_string)
@classmethod
def setmode(cls, board):
MockGPIO.print_warnings("Setting mode to %s"%board)
@classmethod
def setwarnings(cls, toggle):
MockGPIO._warnings = toggle
MockGPIO.print_warnings("Setting warnings to %s"%toggle)
@classmethod
def setup(cls, pin, mode):
MockGPIO.print_warnings("Setting pin %d to mode %d"%(pin, mode))
@classmethod
def output(cls, pin, value):
MockGPIO.print_warnings("Setting pin %d to value %s"%(pin,value))
| {
"content_hash": "54bb4c0f0b286a0105faccdf7891669f",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 79,
"avg_line_length": 32.54838709677419,
"alnum_prop": 0.6060455896927651,
"repo_name": "hackernight/portal",
"id": "67332972fee3a8e131e18b42417021be355da573",
"size": "2018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hardware/mockgpio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6549"
}
],
"symlink_target": ""
} |
import argparse
import csv
import logging
import re
import os
logger = logging.getLogger(__name__)
def main():
argparser = argparse.ArgumentParser(description=("Clean the Quora dataset "
"by removing newlines in "
"the data."))
argparser.add_argument("dataset_input_path", type=str,
help=("The path to the raw Quora "
"dataset to clean."))
argparser.add_argument("dataset_output_path", type=str,
help=("The *folder* to write the "
"cleaned file to. The name will just have "
"_cleaned appended to it, before the "
"extension"))
config = argparser.parse_args()
# Get the data
logger.info("Reading csv at {}".format(config.dataset_input_path))
# Iterate through the CSV, removing anomalous whitespace
# and making a list of lists the clean csv.
logger.info("Cleaning csv")
clean_rows = []
with open(config.dataset_input_path) as f:
reader = csv.reader(f)
# skip the header
reader.__next__()
for row in reader:
clean_row = []
for item in row:
# normalize whitespace in each string in each row
item_no_newlines = re.sub(r"\n", " ", item)
clean_item = re.sub(r"\s+", " ", item_no_newlines)
clean_row.append(clean_item)
clean_rows.append(clean_row)
input_filename_full = os.path.basename(config.dataset_input_path)
input_filename, input_ext = os.path.splitext(input_filename_full)
out_path = os.path.join(config.dataset_output_path,
input_filename + "_cleaned" + input_ext)
logger.info("Writing output to {}".format(out_path))
with open(out_path, "w") as f:
writer = csv.writer(f, quoting=csv.QUOTE_ALL)
writer.writerows(clean_rows)
if __name__ == "__main__":
logging.basicConfig(format=("%(asctime)s - %(levelname)s - "
"%(name)s - %(message)s"),
level=logging.INFO)
main()
| {
"content_hash": "bcd2f139749b1be196741a59bbcbc149",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 79,
"avg_line_length": 37.916666666666664,
"alnum_prop": 0.5292307692307693,
"repo_name": "nelson-liu/paraphrase-id-tensorflow",
"id": "6ae270e6584a5450ac16ae602615bde2a660d293",
"size": "2275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/data/quora/clean_quora_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3473"
},
{
"name": "Python",
"bytes": "344238"
},
{
"name": "Shell",
"bytes": "4169"
}
],
"symlink_target": ""
} |
__author__ = 'Yan'
import pandas
import sklearn.metrics
import statistics
import numpy as np
import matplotlib.pylab as plt
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
# bug fix for display formats to avoid run time errors
pandas.set_option('display.float_format', lambda x:'%.2f'%x)
#load the data
data = pandas.read_csv('..\separatedData.csv')
# convert to numeric format
data["breastCancer100th"] = pandas.to_numeric(data["breastCancer100th"], errors='coerce')
data["meanSugarPerson"] = pandas.to_numeric(data["meanSugarPerson"], errors='coerce')
data["meanFoodPerson"] = pandas.to_numeric(data["meanFoodPerson"], errors='coerce')
data["meanCholesterol"] = pandas.to_numeric(data["meanCholesterol"], errors='coerce')
# listwise deletion of missing values
sub1 = data[['breastCancer100th', 'meanFoodPerson', 'meanCholesterol', 'meanSugarPerson']].dropna()
# Create the conditions to a new variable named incidence_cancer that will categorize the meanSugarPerson answers
meanIncidence = statistics.mean(sub1['breastCancer100th'])
def incidence_cancer (row):
if row['breastCancer100th'] <= meanIncidence : return 0 # Incidence of breast cancer is below the
# average of the incidence of all countries.
if row['breastCancer100th'] > meanIncidence : return 1 # Incidence of breast cancer is above the average
# of the incidence of all countries.
# Add the new variable sugar_consumption to subData
sub1['incidence_cancer'] = sub1.apply (lambda row: incidence_cancer (row),axis=1)
#Split into training and testing sets
predictors = sub1[[ 'meanSugarPerson', 'meanFoodPerson', 'meanCholesterol']]
targets = sub1['incidence_cancer']
#Train = 60%, Test = 40%
pred_train, pred_test, tar_train, tar_test = train_test_split(predictors, targets, test_size=.4)
#Build model on training data
classifier=RandomForestClassifier(n_estimators=25)
classifier=classifier.fit(pred_train,tar_train)
predictions=classifier.predict(pred_test)
confusion_matrix = sklearn.metrics.confusion_matrix(tar_test,predictions)
accuracy_score = sklearn.metrics.accuracy_score(tar_test, predictions)
print (confusion_matrix)
print (accuracy_score)
# fit an Extra Trees model to the data
model = ExtraTreesClassifier()
model.fit(pred_train,tar_train)
# display the relative importance of each attribute
print(model.feature_importances_)
"""
Running a different number of trees and see the effect
of that on the accuracy of the prediction
"""
trees=range(25)
accuracy=np.zeros(25)
for idx in range(len(trees)):
classifier=RandomForestClassifier(n_estimators=idx + 1)
classifier=classifier.fit(pred_train,tar_train)
predictions=classifier.predict(pred_test)
accuracy[idx]=sklearn.metrics.accuracy_score(tar_test, predictions)
plt.cla()
plt.plot(trees, accuracy)
print(accuracy)
print(statistics.mean(accuracy))
| {
"content_hash": "1a74b53a13887c9fc3b2c2594a056882",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 113,
"avg_line_length": 37.523809523809526,
"alnum_prop": 0.7201776649746193,
"repo_name": "yan-duarte/yan-duarte.github.io",
"id": "888d94fe0d60b5460a0d98ad24b3dae63ab1225e",
"size": "3152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "archives/mlda-assignment2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36491"
},
{
"name": "HTML",
"bytes": "14485"
},
{
"name": "JavaScript",
"bytes": "1280"
},
{
"name": "Jupyter Notebook",
"bytes": "2298339"
},
{
"name": "Python",
"bytes": "48167"
},
{
"name": "Ruby",
"bytes": "3154"
}
],
"symlink_target": ""
} |
""" test parquet compat """
import datetime
import os
from warnings import catch_warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas.util import testing as tm
from pandas.io.parquet import (
FastParquetImpl,
PyArrowImpl,
get_engine,
read_parquet,
to_parquet,
)
try:
import pyarrow # noqa
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
import fastparquet # noqa
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
# setup engines & skips
@pytest.fixture(
params=[
pytest.param(
"fastparquet",
marks=pytest.mark.skipif(
not _HAVE_FASTPARQUET, reason="fastparquet is not installed"
),
),
pytest.param(
"pyarrow",
marks=pytest.mark.skipif(
not _HAVE_PYARROW, reason="pyarrow is not installed"
),
),
]
)
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
return "pyarrow"
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
return "fastparquet"
@pytest.fixture
def df_compat():
return pd.DataFrame({"A": [1, 2, 3], "B": "foo"})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
}
)
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{
"string": list("abc"),
"string_with_nan": ["a", np.nan, "c"],
"string_with_none": ["a", None, "c"],
"bytes": [b"foo", b"bar", b"baz"],
"unicode": ["foo", "bar", "baz"],
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_nan": [2.0, np.nan, 3.0],
"bool": [True, False, True],
"datetime": pd.date_range("20130101", periods=3),
"datetime_with_nat": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
}
)
def check_round_trip(
df,
engine=None,
path=None,
write_kwargs=None,
read_kwargs=None,
expected=None,
check_names=True,
repeat=2,
):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {"compression": None}
read_kwargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs["engine"] = engine
read_kwargs["engine"] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(expected, actual, check_names=check_names)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def test_invalid_engine(df_compat):
with pytest.raises(ValueError):
check_round_trip(df_compat, "foo", "bar")
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context("io.parquet.engine", "pyarrow"):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context("io.parquet.engine", "fastparquet"):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context("io.parquet.engine", "auto"):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "pyarrow"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "fastparquet"):
assert isinstance(get_engine("auto"), FastParquetImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "auto"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa, compression=None)
result = read_parquet(path, engine=fp)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=fp, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
with catch_warnings(record=True):
result = read_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=pa, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
class Base:
def check_error_on_write(self, df, engine, exc):
# check that we are raising the exception on writing
with tm.ensure_clean() as path:
with pytest.raises(exc):
to_parquet(df, path, engine, compression=None)
class TestBasic(Base):
def test_error(self, engine):
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
pd.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
self.check_error_on_write(obj, engine, ValueError)
def test_columns_dtypes(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# unicode
df.columns = ["foo", "bar"]
check_round_trip(df, engine)
def test_columns_dtypes_invalid(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# numeric
df.columns = [0, 1]
self.check_error_on_write(df, engine, ValueError)
# bytes
df.columns = [b"foo", b"bar"]
self.check_error_on_write(df, engine, ValueError)
# python object
df.columns = [
datetime.datetime(2011, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 1, 1),
]
self.check_error_on_write(df, engine, ValueError)
@pytest.mark.parametrize("compression", [None, "gzip", "snappy", "brotli"])
def test_compression(self, engine, compression):
if compression == "snappy":
pytest.importorskip("snappy")
elif compression == "brotli":
pytest.importorskip("brotli")
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine, write_kwargs={"compression": compression})
def test_read_columns(self, engine):
# GH18154
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
expected = pd.DataFrame({"string": list("abc")})
check_round_trip(
df, engine, expected=expected, read_kwargs={"columns": ["string"]}
)
def test_write_index(self, engine):
check_names = engine != "fastparquet"
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine)
indexes = [
[2, 3, 4],
pd.date_range("20130101", periods=3),
list("abc"),
[1, 3, 4],
]
# non-default index
for index in indexes:
df.index = index
check_round_trip(df, engine, check_names=check_names)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = "foo"
check_round_trip(df, engine)
def test_write_multiindex(self, pa):
# Not supported in fastparquet as of 0.1.3 or older pyarrow version
engine = pa
df = pd.DataFrame({"A": [1, 2, 3]})
index = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df.index = index
check_round_trip(df, engine)
def test_write_column_multiindex(self, engine):
# column multi-index
mi_columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns)
self.check_error_on_write(df, engine, ValueError)
def test_multiindex_with_columns(self, pa):
engine = pa
dates = pd.date_range("01-Jan-2018", "01-Dec-2018", freq="MS")
df = pd.DataFrame(np.random.randn(2 * len(dates), 3), columns=list("ABC"))
index1 = pd.MultiIndex.from_product(
[["Level1", "Level2"], dates], names=["level", "date"]
)
index2 = index1.copy(names=None)
for index in [index1, index2]:
df.index = index
check_round_trip(df, engine)
check_round_trip(
df, engine, read_kwargs={"columns": ["A", "B"]}, expected=df[["A", "B"]]
)
def test_write_ignoring_index(self, engine):
# ENH 20768
# Ensure index=False omits the index from the written Parquet file.
df = pd.DataFrame({"a": [1, 2, 3], "b": ["q", "r", "s"]})
write_kwargs = {"compression": None, "index": False}
# Because we're dropping the index, we expect the loaded dataframe to
# have the default integer index.
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore custom index
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["q", "r", "s"]}, index=["zyx", "wvu", "tsr"]
)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore multi-indexes as well.
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = pd.DataFrame(
{"one": [i for i in range(8)], "two": [-i for i in range(8)]}, index=arrays
)
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
class TestParquetPyArrow(Base):
def test_basic(self, pa, df_full):
df = df_full
# additional supported types for pyarrow
df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
df["bool_with_none"] = [True, None, True]
check_round_trip(df, pa)
# TODO: This doesn't fail on all systems; track down which
@pytest.mark.xfail(reason="pyarrow fails on this (ARROW-1883)", strict=False)
def test_basic_subset_columns(self, pa, df_full):
# GH18628
df = df_full
# additional supported types for pyarrow
df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
check_round_trip(
df,
pa,
expected=df[["string", "int"]],
read_kwargs={"columns": ["string", "int"]},
)
def test_duplicate_columns(self, pa):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_error_on_write(df, pa, ValueError)
def test_unsupported(self, pa):
# period
df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_error_on_write(df, pa, Exception)
# timedelta
df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)})
self.check_error_on_write(df, pa, NotImplementedError)
# mixed python objects
df = pd.DataFrame({"a": ["a", 1, 2.0]})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_error_on_write(df, pa, Exception)
def test_categorical(self, pa):
# supported in >= 0.7.0
df = pd.DataFrame({"a": pd.Categorical(list("abc"))})
# de-serialized as object
expected = df.assign(a=df.a.astype(object))
check_round_trip(df, pa, expected=expected)
def test_s3_roundtrip(self, df_compat, s3_resource, pa):
# GH #19134
check_round_trip(df_compat, pa, path="s3://pandas-test/pyarrow.parquet")
def test_partition_cols_supported(self, pa, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(path, partition_cols=partition_cols, compression=None)
import pyarrow.parquet as pq
dataset = pq.ParquetDataset(path, validate_schema=False)
assert len(dataset.partitions.partition_names) == 2
assert dataset.partitions.partition_names == set(partition_cols)
def test_empty_dataframe(self, pa):
# GH #27339
df = pd.DataFrame()
check_round_trip(df, pa)
class TestParquetFastParquet(Base):
@td.skip_if_no("fastparquet", min_version="0.2.1")
def test_basic(self, fp, df_full):
df = df_full
df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="US/Eastern")
df["timedelta"] = pd.timedelta_range("1 day", periods=3)
check_round_trip(df, fp)
@pytest.mark.skip(reason="not supported")
def test_duplicate_columns(self, fp):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_error_on_write(df, fp, ValueError)
def test_bool_with_none(self, fp):
df = pd.DataFrame({"a": [True, None, False]})
expected = pd.DataFrame({"a": [1.0, np.nan, 0.0]}, dtype="float16")
check_round_trip(df, fp, expected=expected)
def test_unsupported(self, fp):
# period
df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)})
self.check_error_on_write(df, fp, ValueError)
# mixed
df = pd.DataFrame({"a": ["a", 1, 2.0]})
self.check_error_on_write(df, fp, ValueError)
def test_categorical(self, fp):
df = pd.DataFrame({"a": pd.Categorical(list("abc"))})
check_round_trip(df, fp)
def test_filter_row_groups(self, fp):
d = {"a": list(range(0, 3))}
df = pd.DataFrame(d)
with tm.ensure_clean() as path:
df.to_parquet(path, fp, compression=None, row_group_offsets=1)
result = read_parquet(path, fp, filters=[("a", "==", 0)])
assert len(result) == 1
def test_s3_roundtrip(self, df_compat, s3_resource, fp):
# GH #19134
check_round_trip(df_compat, fp, path="s3://pandas-test/fastparquet.parquet")
def test_partition_cols_supported(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
partition_cols=partition_cols,
compression=None,
)
assert os.path.exists(path)
import fastparquet # noqa: F811
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_partition_on_supported(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
compression=None,
partition_on=partition_cols,
)
assert os.path.exists(path)
import fastparquet # noqa: F811
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_error_on_using_partition_cols_and_partition_on(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with pytest.raises(ValueError):
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
compression=None,
partition_on=partition_cols,
partition_cols=partition_cols,
)
def test_empty_dataframe(self, fp):
# GH #27339
df = pd.DataFrame()
expected = df.copy()
expected.index.name = "index"
check_round_trip(df, fp, expected=expected)
| {
"content_hash": "238363229729ca378ec177af07d31145",
"timestamp": "",
"source": "github",
"line_count": 580,
"max_line_length": 88,
"avg_line_length": 31.54655172413793,
"alnum_prop": 0.5710772257747172,
"repo_name": "toobaz/pandas",
"id": "a04fb9fd502577bd582269c1b863c58463d0eeb9",
"size": "18297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/io/test_parquet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "541"
},
{
"name": "C",
"bytes": "394843"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "15031623"
},
{
"name": "Shell",
"bytes": "27585"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
"""Crawler that sends notifications as soon as servers
on Kimsufi/OVH become available for purchase"""
import json
import sys
import os
import re
import logging
import importlib
import tornado.ioloop
import tornado.web
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPError
from tornado.gen import coroutine
# Python 3 imports
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
_logger = logging.getLogger(__name__)
CURRENT_PATH = os.path.dirname(__file__)
def parse_json_file(filename):
"""open file and parse its content as json"""
with open(filename, 'r') as jsonfile:
content = jsonfile.read()
try:
result = json.loads(content)
except ValueError:
_logger.error(
"Parsing file %s failed. Check syntax with a JSON validator:"
"\nhttp://jsonlint.com/?json=%s", filename, quote(content))
sys.exit(1)
return result
class Crawler(object):
"""Crawler responsible for fetching availability and monitoring states"""
def __init__(self, state_change_callback):
# set properties
self.state_change_callback = state_change_callback
# load mappings
self.SERVER_TYPES = parse_json_file(
os.path.join(CURRENT_PATH, 'mapping/server_types.json'))
self.REGIONS = parse_json_file(
os.path.join(CURRENT_PATH, 'mapping/regions.json'))
# set private vars
self.API_URL = ("https://ws.ovh.com/dedicated/r2/ws.dispatcher"
"/getAvailability2")
self.STATES = {}
self.HTTP_ERRORS = []
self.interval = 8 # seconds between interations
self.periodic_cb = None
self.ioloop = None
self.http_client = AsyncHTTPClient()
def update_state(self, state, value, message=None):
"""Update state of particular event"""
# if state is new, init it as False
if state not in self.STATES:
self.STATES[state] = False
# compare new value to old value
if value is not self.STATES[state]:
_logger.debug("State change - %s: %s", state, value)
# notify, if state changed from False to True
if value and not self.STATES[state]:
self.state_change_callback(state, message)
# save the new value
self.STATES[state] = value
def resume_periodic_cb(self):
_logger.info("Crawler resumed")
self.periodic_cb.start()
@coroutine
def run(self):
"""Run a crawler iteration"""
progress()
try:
# request OVH availability API asynchronously
resp = yield self.http_client.fetch(self.API_URL,
request_timeout=REQUEST_TIMEOUT)
except HTTPError as ex:
# Internal Server Error
self.HTTP_ERRORS.append(ex)
if len(self.HTTP_ERRORS) > 5:
if all([e.code == 500 for e in self.HTTP_ERRORS]):
_logger.error("Server continiously returns error 500 and "
"may be down, check the status manually: %s",
self.API_URL)
else:
_logger.error("Too many HTTP Errors: %s", self.HTTP_ERRORS)
self.HTTP_ERRORS = []
return
except Exception as gex:
# Also catch other errors.
_logger.error("Socket Error: %s", str(gex))
return
if self.HTTP_ERRORS:
del self.HTTP_ERRORS[:]
response_json = json.loads(resp.body.decode('utf-8'))
if response_json.get('error'):
if response_json['error']['status'] == 451:
match = re.search(r'will be replenished in (\d+) seconds.',
response_json['error'].get('message', ''))
timeout = int(match.group(1)) if match else 28800
_logger.error("Rate-limit error, have to pause for %d seconds",
timeout)
self.periodic_cb.stop()
self.ioloop.call_later(timeout, self.resume_periodic_cb)
self.interval *= 2
_logger.info("New request interval: %d seconds", self.interval)
return
if not response_json or not response_json['answer']:
_logger.error("No answer from API: %s", response_json)
return
availability = response_json['answer']['availability']
for item in availability:
# get server type of availability item
server_type = self.SERVER_TYPES.get(item['reference'])
# return if this server type is not in mapping
if not server_type:
continue
# make a flat list of zones where servers of this type are available
available_zones = set([
e['zone'] for e in item['zones']
if e['availability'] not in ['unavailable', 'unknown']])
_logger.debug('%s is available in %s', server_type, available_zones)
# iterate over all regions and update availability states
for region, places in self.REGIONS.items():
server_available = bool(available_zones.intersection(places))
state_id = '%s_available_in_%s' % (server_type.lower(),
region.lower())
message = {
'title': "{0} is available".format(server_type),
'text': "Server {server} is available in {region}".format(
server=server_type, region=region.capitalize()),
'url': "http://www.kimsufi.com/en/index.xml"
}
if 'sys' in item['reference'] or 'bk' in item['reference']:
message['url'] = 'http://www.soyoustart.com/de/essential-server/'
self.update_state(state_id, server_available, message)
def bell():
sys.stdout.write('\a')
sys.stdout.flush()
def progress():
sys.stdout.write('.')
sys.stdout.flush()
if __name__ == "__main__":
# load user config
_CONFIG = parse_json_file(os.path.join(CURRENT_PATH, 'config.json'))
# init notifier
_NOTIFIERS = {
'pushover': 'notifiers.pushover_notifier.PushoverNotifier',
'email': 'notifiers.email_notifier.EmailNotifier',
'osx': 'notifiers.osx_notifier.OSXNotifier',
'popup': 'notifiers.popup_notifier.PopupNotifier',
'popup_pywin': 'notifiers.popup_pywin_notifier.PopupPywinNotifier',
'smsapi': 'notifiers.smsapi_notifier.SmsApiNotifier',
'xmpp': 'notifiers.xmpp_notifier.XMPPNotifier',
'pushbullet': 'notifiers.pushbullet_notifier.PushbulletNotifier',
'file': 'notifiers.file_notifier.FileNotifier',
'freemobile': 'notifiers.freemobile_notifier.FreemobileNotifier',
}
# Select notifier, 'email' by default
if 'notifier' not in _CONFIG:
_logger.warning("No notifier selected in config, 'email' will be used")
_CONFIG['notifier'] = 'email'
# Instantiate notifier class dynamically
try:
_NOTIFIER_PATH = _NOTIFIERS[_CONFIG['notifier']]
_NOTIFIER_FILE, _NOTIFIER_CLASSNAME = _NOTIFIER_PATH.rsplit('.', 1)
_NOTIFIER_MODULE = importlib.import_module(_NOTIFIER_FILE)
NOTIFIER = getattr(_NOTIFIER_MODULE, _NOTIFIER_CLASSNAME)(_CONFIG)
except Exception as ex:
_logger.exception("Notifier loading failed, check config for errors")
sys.exit(1)
# prepare states tracked by the user
TRACKED_STATES = []
for server in _CONFIG['servers']:
TRACKED_STATES.append(
'%s_available_in_%s' % (server.lower(), _CONFIG['region'].lower()))
_logger.info('Tracking states: %s', TRACKED_STATES)
# define state-change callback to notify the user
def state_changed(state, message=None):
"""Trigger notifications"""
message = message or {}
if state in TRACKED_STATES:
_logger.info("Will notify: %s", state)
NOTIFIER.notify(**message)
bell()
# Check and set request timeout
REQUEST_TIMEOUT = _CONFIG.get('request_timeout', 30)
# Init the crawler
crawler = Crawler(state_change_callback=state_changed)
crawler.periodic_cb = tornado.ioloop.PeriodicCallback(
crawler.run, crawler.interval * 1000)
crawler.periodic_cb.start()
# start the IOloop
_logger.info("Starting main loop")
crawler.ioloop = tornado.ioloop.IOLoop.instance()
try:
crawler.ioloop.start()
except KeyboardInterrupt:
_logger.info("Terminated by user. Bye.")
sys.exit(0)
| {
"content_hash": "ea2ce8dfa41237067ee25aff103154ec",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 85,
"avg_line_length": 39.736607142857146,
"alnum_prop": 0.590720143804067,
"repo_name": "cyber01/kimsufi-crawler",
"id": "e19bc3bf03cc6189353e94f78138a1c5b48f6872",
"size": "8947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crawler.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24044"
}
],
"symlink_target": ""
} |
from build_util import *
if __name__ == "__main__":
build_project_by_name("luabind_plus_test")
| {
"content_hash": "704abb3a0d05692f3585d242f81345f1",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 43,
"avg_line_length": 24.25,
"alnum_prop": 0.6391752577319587,
"repo_name": "Napoleon314/luabind-plus",
"id": "5ed6e170d0c6acf8b61dae7fac34cfb1c49d3d63",
"size": "97",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "611630"
},
{
"name": "C++",
"bytes": "186049"
},
{
"name": "CMake",
"bytes": "2167"
},
{
"name": "Lua",
"bytes": "1892"
},
{
"name": "Python",
"bytes": "15140"
}
],
"symlink_target": ""
} |
"""Base anchor generator.
The job of the anchor generator is to create (or load) a collection
of bounding boxes to be used as anchors.
Generated anchors are assumed to match some convolutional grid or list of grid
shapes. For example, we might want to generate anchors matching an 8x8
feature map and a 4x4 feature map. If we place 3 anchors per grid location
on the first feature map and 6 anchors per grid location on the second feature
map, then 3*8*8 + 6*4*4 = 288 anchors are generated in total.
To support fully convolutional settings, feature map shapes are passed
dynamically at generation time. The number of anchors to place at each location
is static --- implementations of AnchorGenerator must always be able return
the number of anchors that it uses per location for each feature map.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
class AnchorGenerator(object):
"""Abstract base class for anchor generators."""
__metaclass__ = ABCMeta
@abstractmethod
def name_scope(self):
"""Name scope.
Must be defined by implementations.
Returns:
a string representing the name scope of the anchor generation operation.
"""
pass
@property
def check_num_anchors(self):
"""Whether to dynamically check the number of anchors generated.
Can be overridden by implementations that would like to disable this
behavior.
Returns:
a boolean controlling whether the Generate function should dynamically
check the number of anchors generated against the mathematically
expected number of anchors.
"""
return True
@abstractmethod
def num_anchors_per_location(self):
"""Returns the number of anchors per spatial location.
Returns:
a list of integers, one for each expected feature map to be passed to
the `generate` function.
"""
pass
def generate(self, feature_map_shape_list, **params):
"""Generates a collection of bounding boxes to be used as anchors.
TODO(rathodv): remove **params from argument list and make stride and
offsets (for multiple_grid_anchor_generator) constructor arguments.
Args:
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with. Pairs can be provided as 1-dimensional
integer tensors of length 2 or simply as tuples of integers.
**params: parameters for anchor generation op
Returns:
boxes_list: a list of BoxLists each holding anchor boxes corresponding to
the input feature map shapes.
Raises:
ValueError: if the number of feature map shapes does not match the length
of NumAnchorsPerLocation.
"""
if self.check_num_anchors and (
len(feature_map_shape_list) != len(self.num_anchors_per_location())):
raise ValueError('Number of feature maps is expected to equal the length '
'of `num_anchors_per_location`.')
with tf.name_scope(self.name_scope()):
anchors_list = self._generate(feature_map_shape_list, **params)
if self.check_num_anchors:
with tf.control_dependencies([
self._assert_correct_number_of_anchors(
anchors_list, feature_map_shape_list)]):
for item in anchors_list:
item.set(tf.identity(item.get()))
return anchors_list
@abstractmethod
def _generate(self, feature_map_shape_list, **params):
"""To be overridden by implementations.
Args:
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with.
**params: parameters for anchor generation op
Returns:
boxes_list: a list of BoxList, each holding a collection of N anchor
boxes.
"""
pass
def _assert_correct_number_of_anchors(self, anchors_list,
feature_map_shape_list):
"""Assert that correct number of anchors was generated.
Args:
anchors_list: A list of box_list.BoxList object holding anchors generated.
feature_map_shape_list: list of (height, width) pairs in the format
[(height_0, width_0), (height_1, width_1), ...] that the generated
anchors must align with.
Returns:
Op that raises InvalidArgumentError if the number of anchors does not
match the number of expected anchors.
"""
expected_num_anchors = 0
actual_num_anchors = 0
for num_anchors_per_location, feature_map_shape, anchors in zip(
self.num_anchors_per_location(), feature_map_shape_list, anchors_list):
expected_num_anchors += (num_anchors_per_location
* feature_map_shape[0]
* feature_map_shape[1])
actual_num_anchors += anchors.num_boxes()
return tf.assert_equal(expected_num_anchors, actual_num_anchors)
| {
"content_hash": "f1e7cf81a41fe231ebcab291ac80c009",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 80,
"avg_line_length": 37.148148148148145,
"alnum_prop": 0.6805583250249252,
"repo_name": "cshallue/models",
"id": "f2797ef77d3e83597e18db10e5ba87f24364d8aa",
"size": "5705",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "research/object_detection/core/anchor_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1523636"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "2829707"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "13149300"
},
{
"name": "Shell",
"bytes": "146035"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandParser
import sys
import os
import signal
import time
import subprocess
import argparse
from typing import Optional
class PidExists(Exception):
pass
class MissingPid(Exception):
pass
class Command(BaseCommand):
"""
The daemon process for controlling the :class:`carrot.management.commands.carrot` service
"""
pid_file: Optional[str] = None
options: dict = {}
def delete_pid(self) -> None:
"""
Deletes the pid file, if it exists
"""
if self.pid_file and os.path.exists(self.pid_file):
os.remove(self.pid_file)
def stop(self, hard_stop: bool = False) -> None:
"""
Attempts to stop the process. Performs the following actions:
1. Asserts that the pidfile exists, or raises a :class:`MissingPid` exception
2. Runs :function:`os.kill` on a loop until an :class:`OSError` is raised.
3. Deletes the pidfile once the process if no longer running
If *hard_stop* is used, the process will not wait for the consumers to finish running their current tasks
:param bool hard_stop: if True, sends a sigkill instead of a sigterm to the consumers
"""
assert self.pid, MissingPid('PIDFILE does not exist. The process may not be running')
_signal = signal.SIGKILL if hard_stop else signal.SIGTERM
while True:
try:
os.kill(self.pid, _signal)
time.sleep(0.1)
except OSError:
break
self.stdout.write(self.style.SUCCESS('Process has been stopped'))
self.delete_pid()
def add_arguments(self, parser: CommandParser) -> None:
"""
This Command inherits the same arguments as :class:`carrot.management.commands.carrot.Command`, with the
addition of one positional argument: **mode**
"""
parser.add_argument('mode')
parser.add_argument("-l", "--logfile", type=str, help='The path to the log file',
default='/var/log/carrot.log')
parser.add_argument("-p", "--pidfile", type=str, help='The path to the pid file',
default='/var/run/carrot.pid')
parser.add_argument('--no-scheduler', dest='run_scheduler', action='store_false', default=False,
help='Do not start scheduled tasks (only runs consumer sets)')
parser.add_argument('--hard', dest='force', action='store_true', default=False,
help='Force stop the consumer (can only be used with stop|restart modes). USE WITH CAUTION')
parser.set_defaults(run_scheduler=True)
parser.set_defaults(testmode=False)
parser.add_argument('--consumer-class', type=str, help='The consumer class to use',
default='carrot.objects.Consumer')
parser.add_argument('--loglevel', type=str, default='DEBUG', help='The logging level. Must be one of DEBUG, '
'INFO, WARNING, ERROR, CRITICAL')
parser.add_argument('--testmode', dest='testmode', action='store_true', default=False,
help='Run in test mode. Prevents the command from running as a service. Should only be '
'used when running Carrot\'s tests')
@property
def pid(self) -> Optional[int]:
"""
Opens and reads the file stored at `self.pidfile`, and returns the content as an integer. If the pidfile doesn't
exist, then None is returned.
"""
if self.pid_file:
try:
with open(self.pid_file, 'r') as pf:
return int(pf.read().strip())
except IOError:
pass
return None
def write_pid(self, pid: int) -> None:
"""
Writes the pid to the pidfile
"""
if self.pid_file:
with open(self.pid_file, 'w') as f:
f.write(str(pid) + '\n')
def start(self, **kwargs: dict) -> None:
"""
Starts the carrot service as a subprocess and records the pid
"""
if self.pid:
raise PidExists('Process already running!')
if kwargs:
self.options = kwargs
options: list = [sys.executable, sys.argv[0], 'carrot', '--verbosity', str(kwargs.get('verbosity', 2)),
'--logfile', self.options['logfile'], '--loglevel', self.options['loglevel']]
if not self.options['run_scheduler']:
options.append('--no-scheduler')
if self.options['consumer_class'] != 'carrot.objects.Consumer':
options.append('--consumer-class')
options.append(self.options['consumer_class'])
proc = subprocess.Popen(options, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
self.write_pid(proc.pid)
return None
def handle(self, *args, **options) -> None:
"""
The main handler. Initiates :class:`CarrotService`, then handles it based on the options supplied
:param options: handled by *argparse*
"""
mode = options.pop('mode')
hard_stop = options.pop('force', False)
if hard_stop:
if mode not in ['stop', 'restart']:
raise argparse.ArgumentError('force', 'This option is only valid for stop|restart modes')
self.pid_file = options.pop('pidfile')
if mode not in ['start', 'stop', 'restart', 'status']:
raise argparse.ArgumentError('mode', 'Must be start, stop, restart or status')
if mode == 'start':
self.stdout.write('Attempting to start the process')
self.start(**options)
self.stdout.write(self.style.SUCCESS('Process started successfully with pid: %s' % self.pid))
elif mode == 'stop':
self.stdout.write('Attempting to stop the process. Please wait...')
self.stop(hard_stop)
elif mode == 'restart':
try:
self.stdout.write('Attempting to stop the process. Please wait...')
self.stop(hard_stop)
except MissingPid:
self.stdout.write(self.style.WARNING('Unable to stop the process because it isn\'t running'))
self.stdout.write('Attempting to start the process')
self.start(**options)
self.stdout.write(self.style.SUCCESS('Process restarted successfully'))
elif mode == 'status':
if self.pid:
self.stdout.write(self.style.SUCCESS('Service is running. PID: %i' % self.pid))
else:
self.stdout.write(self.style.ERROR('Service is NOT running'))
sys.exit()
| {
"content_hash": "178ed9e8b73298f40f289a174923e203",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 120,
"avg_line_length": 38.1731843575419,
"alnum_prop": 0.5837845748573102,
"repo_name": "chris104957/django-carrot",
"id": "8f91c96c2731638b4d43ff9ed6b4e4a95e3ce30e",
"size": "6833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "carrot/management/commands/carrot_daemon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "312"
},
{
"name": "HTML",
"bytes": "682"
},
{
"name": "Makefile",
"bytes": "7665"
},
{
"name": "Python",
"bytes": "107193"
},
{
"name": "Vue",
"bytes": "34985"
}
],
"symlink_target": ""
} |
import Tkinter as tk
#Learning how to use classes and Tkinter
class Application(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.grid()
self.createWidgets()
def createWidgets(self):
self.quitButton = tk.Button(self, text='Quit', command=self.quit)
self.quitButton.grid()
self.helloWorld = tk.Button(self, text='Hello World!', command=self.quit)
self.helloWorld.grid()
app = Application()
app.master.title('Hello World!')
app.mainloop()
| {
"content_hash": "0f8ad93d34692da2ea01538c88c6178f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 27.055555555555557,
"alnum_prop": 0.7166324435318275,
"repo_name": "Ca2Patton/PythonStuff",
"id": "23b78e92c24f3ade388280477adde94ce37a0c38",
"size": "510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tkGui.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30997"
}
],
"symlink_target": ""
} |
"""This module contains a wrapper class of the HFTS Grasp Sampler."""
from hfts_grasp_planner.core import HFTSSampler, HFTSNode
from sampler import SamplingResult
import logging
import numpy
class HFTSNodeDataExtractor:
def extractData(self, hierarchyInfo):
return hierarchyInfo.get_hand_config()
def getCopyFunction(self):
return numpy.copy
class GraspGoalSampler:
""" Wrapper class for the HFTS Grasp Planner/Sampler that allows a full black box usage."""
# TODO data_path is specific to reading objects from a filesystem. Better probably to pass ObjectIO
def __init__(self, object_io_interface, hand_path, hand_cache_file, planning_scene_interface,
visualize=False, open_hand_offset=0.1):
""" Creates a new wrapper.
@param object_io_interface IOObject Object that handles IO requests
@param hand_path Path to where the hand data is stored.
@param planning_scene_interface OpenRAVE environment with some additional information
containing the robot and its surroundings.
@param visualize If true, the internal OpenRAVE environment is set to be visualized
(only works if there is no other OpenRAVE viewer in this process)
@param open_hand_offset Value to open the hand by. A grasp is in contact with the target object,
hence a grasping configuration is always in collision. To enable motion planning to such a
configuration we open the hand by some constant offset.
"""
self.grasp_planner = HFTSSampler(object_io_interface=object_io_interface,
vis=visualize, scene_interface=planning_scene_interface)
self.grasp_planner.set_max_iter(100)
self.open_hand_offset = open_hand_offset
self.root_node = self.grasp_planner.get_root_node()
self.load_hand(hand_path, hand_cache_file)
def sample(self, depth_limit, post_opt=True):
""" Samples a grasp from the root level. """
return self.sample_warm_start(self.root_node, depth_limit, post_opt=post_opt)
def sample_warm_start(self, hierarchy_node, depth_limit, label_cache=None, post_opt=False):
""" Samples a grasp from the given node on. """
logging.debug('[GoalSamplerWrapper] Sampling a grasp from hierarchy depth ' +
str(hierarchy_node.get_depth()))
sampled_node = self.grasp_planner.sample_grasp(node=hierarchy_node, depth_limit=depth_limit,
post_opt=post_opt,
label_cache=label_cache,
open_hand_offset=self.open_hand_offset)
config = sampled_node.get_arm_configuration()
if config is not None:
config = numpy.concatenate((config, sampled_node.get_pre_grasp_config()))
return SamplingResult(configuration=config, hierarchy_info=sampled_node, data_extractor=HFTSNodeDataExtractor())
def is_goal(self, sampling_result):
""" Returns whether the given node is a goal or not. """
return sampling_result.hierarchyInfo.is_goal()
def load_hand(self, hand_path, hand_cache_file):
""" Reset the hand being used. @see __init__ for parameter description. """
self.grasp_planner.load_hand(hand_file=hand_path, hand_cache_file=hand_cache_file)
def set_object(self, obj_id, model_id=None):
""" Set the object.
@param obj_id String identifying the object.
@param model_id (optional) Name of the model data. If None, it is assumed to be identical to obj_id
"""
self.grasp_planner.load_object(obj_id=obj_id, model_id=model_id)
self.root_node = self.grasp_planner.get_root_node()
def set_max_iter(self, iterations):
self.grasp_planner.set_max_iter(iterations)
def get_max_depth(self):
return self.grasp_planner.get_maximum_depth()
def get_root(self):
return self.root_node
def set_parameters(self, **kwargs):
self.grasp_planner.set_parameters(**kwargs)
| {
"content_hash": "033f6d0badf63f5ed8ea71e1f61f79c1",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 120,
"avg_line_length": 50.095238095238095,
"alnum_prop": 0.6447243346007605,
"repo_name": "kth-ros-pkg/hfts_grasp_planner",
"id": "a016f09c14b66454711eaec6ab1a14e84393f96d",
"size": "4231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hfts_grasp_planner/grasp_goal_sampler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "6444"
},
{
"name": "Python",
"bytes": "295731"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import zmq
context = zmq.Context()
server_socket = context.socket(zmq.REP)
server_socket.bind('tcp://*:6000')
class NewAddr(object):
def __init__(self, first_addr):
self.last_addr = first_addr
def __call__(self):
self.last_addr += 1
return self.last_addr
new_address = NewAddr(6000)
inbound_addresses = defaultdict(new_address)
outbound_targets = {
'com.jessebmiller.Proxy': 'com.jessebmiller.Logic',
'com.jessebmiller.Logic': 'com.jessebmiller.Model',
'com.jessebmiller.Model': 'com.jessebmiller.View',
'com.jessebmiller.View': 'com.jessebmiller.Proxy',
}
def inbound(socket_name):
return "tcp://localhost:{}".format(
inbound_addresses[socket_name])
def outbound(socket_name):
return "tcp://*:{}".format(
inbound_addresses[outbound_targets[socket_name]])
while True:
request = server_socket.recv()
print 'bartok got', request
in_addr = inbound(request)
print ' > in', in_addr
out_addr = outbound(request)
print ' > out', out_addr
server_socket.send(b';'.join([in_addr, out_addr]))
| {
"content_hash": "39377582ead834dfa1e3ec043b5c962d",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 57,
"avg_line_length": 25.795454545454547,
"alnum_prop": 0.6590308370044052,
"repo_name": "jessebmiller/JBMPub",
"id": "42fe9261ccc2845b941feff3dc6ed5d234eaa64e",
"size": "1135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bartok/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8533"
},
{
"name": "Python",
"bytes": "11843"
},
{
"name": "Shell",
"bytes": "301"
}
],
"symlink_target": ""
} |
"""
Provides functions to handle source code highlighting.
"""
from pygments import highlight
from pygments.lexers import get_lexer_by_name, PythonLexer
from pygments.formatters import HtmlFormatter
from pygments.styles import get_style_by_name
from pydozeoff.conf import settings
def get_style_defs(name="default"):
"""Returns the CSS code used to highlight source code. If the given style
name isn't found, the default style will be used.
"""
try:
return HtmlFormatter(style=name).get_style_defs()
except:
return HtmlFormatter().get_style_defs()
def code(language, source_code):
"""Formats the given source code snippet as HTML.
"""
formatter = HtmlFormatter(**settings["SYNTAX_HIGHLIGHT_OPTIONS"])
return highlight(source_code, _get_lexer(language, source_code), formatter)
def _get_lexer(language, source_code):
"""Returns the appropriate lexer to parse the given source code snippet.
"""
try:
lexer = get_lexer_by_name(language)
except:
try:
lexer = guess_lexer(source_code)
except:
lexer = PythonLexer()
return lexer
| {
"content_hash": "31e6b0296f2df2ded0ba91a324f986ed",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 28.775,
"alnum_prop": 0.6889661164205039,
"repo_name": "danielfm/pydozeoff",
"id": "931a43774d017077841b55da40e7dc1e7912cc31",
"size": "1174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pydozeoff/template/highlight.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "29242"
}
],
"symlink_target": ""
} |
"""This module contains a cache that supports string and blob values. It supports
both because blobs are unable to encode unicode characters properly.
"""
__author__ = 'Aaron Steele'
# Standard Python imports
import logging
import json
from google.appengine.api import memcache
# Google App Engine imports
from google.appengine.ext.ndb import model
class CacheItem(model.Model):
"""An item in the cache. Supports blob and string cached values since blob
can't handle unicode characters.
"""
blob = model.BlobProperty('b')
string = model.StringProperty('s', indexed=False)
created = model.DateTimeProperty('c', auto_now_add=True)
@classmethod
def create(cls, key, value, dumps=False, value_type='string'):
entity = None
if value_type == 'string':
if dumps:
entity = cls(id=key.strip().lower(), string=json.dumps(value))
else:
entity = cls(id=key.strip().lower(), string=value)
elif value_type == 'blob':
entity = cls(id=key.strip().lower(), blob=value)
return entity
@classmethod
def get(cls, key, loads=False, value_type='string'):
value = None
item = model.Key(cls.__name__, key.strip().lower()).get()
if item:
if value_type == 'string':
data = item._to_dict()['string']
if loads:
try:
value = json.loads(data)
except:
value = data
else:
value = data
elif value_type == 'blob':
value = item.blob
return value
@classmethod
def add(cls, key, value, dumps=False, value_type='string'):
return cls.create(key, value, dumps, value_type).put()
def create_entry(key, value, dumps=False, value_type='string'):
return CacheItem.create(key, value, dumps, value_type)
def get(key, loads=False, value_type='string'):
"""Gets a cached item value by key.
Arguments:
key - The cache item key.
loads - If true call json.loads() on cached item (default false).
value_type - The type of cache value (string or blob, default string).
"""
return CacheItem.get(key, loads, value_type)
def add(key, value, dumps=False, value_type='string'):
"""Adds a value to the cache by key.
Arguments:
key - The cache item key.
value - The cache item value.
dumps - If true call json.dumps() to value before caching (default false).
value_type - The type of cache value (string or blob, default string).
"""
CacheItem.add(key, value, dumps, value_type)
| {
"content_hash": "bafd8d4e17888ee361dccaf68bfd8091",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 82,
"avg_line_length": 33.71604938271605,
"alnum_prop": 0.5924569754668619,
"repo_name": "earthenv/mapotron",
"id": "b13655acf53ff65173b4d298d5bcedd901e84b13",
"size": "2731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3325"
},
{
"name": "HTML",
"bytes": "13123"
},
{
"name": "JavaScript",
"bytes": "18026"
},
{
"name": "Python",
"bytes": "19670"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_admin_state_pac import TapiCommonAdminStatePac # noqa: F401,E501
from tapi_server.models.tapi_common_administrative_state import TapiCommonAdministrativeState # noqa: F401,E501
from tapi_server.models.tapi_common_capacity import TapiCommonCapacity # noqa: F401,E501
from tapi_server.models.tapi_common_forwarding_direction import TapiCommonForwardingDirection # noqa: F401,E501
from tapi_server.models.tapi_common_global_class import TapiCommonGlobalClass # noqa: F401,E501
from tapi_server.models.tapi_common_layer_protocol_name import TapiCommonLayerProtocolName # noqa: F401,E501
from tapi_server.models.tapi_common_lifecycle_state import TapiCommonLifecycleState # noqa: F401,E501
from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue # noqa: F401,E501
from tapi_server.models.tapi_common_operational_state import TapiCommonOperationalState # noqa: F401,E501
from tapi_server.models.tapi_common_time_range import TapiCommonTimeRange # noqa: F401,E501
from tapi_server.models.tapi_connectivity_connection_ref import TapiConnectivityConnectionRef # noqa: F401,E501
from tapi_server.models.tapi_connectivity_connectivity_constraint import TapiConnectivityConnectivityConstraint # noqa: F401,E501
from tapi_server.models.tapi_connectivity_connectivity_service_ref import TapiConnectivityConnectivityServiceRef # noqa: F401,E501
from tapi_server.models.tapi_connectivity_connectivityservice_end_point import TapiConnectivityConnectivityserviceEndPoint # noqa: F401,E501
from tapi_server.models.tapi_connectivity_coordinate_type import TapiConnectivityCoordinateType # noqa: F401,E501
from tapi_server.models.tapi_connectivity_resilience_constraint import TapiConnectivityResilienceConstraint # noqa: F401,E501
from tapi_server.models.tapi_connectivity_reversion_mode import TapiConnectivityReversionMode # noqa: F401,E501
from tapi_server.models.tapi_connectivity_service_type import TapiConnectivityServiceType # noqa: F401,E501
from tapi_server.models.tapi_path_computation_diversity_policy import TapiPathComputationDiversityPolicy # noqa: F401,E501
from tapi_server.models.tapi_path_computation_path_ref import TapiPathComputationPathRef # noqa: F401,E501
from tapi_server.models.tapi_path_computation_route_objective_function import TapiPathComputationRouteObjectiveFunction # noqa: F401,E501
from tapi_server.models.tapi_path_computation_routing_constraint import TapiPathComputationRoutingConstraint # noqa: F401,E501
from tapi_server.models.tapi_path_computation_topology_constraint import TapiPathComputationTopologyConstraint # noqa: F401,E501
from tapi_server.models.tapi_topology_cost_characteristic import TapiTopologyCostCharacteristic # noqa: F401,E501
from tapi_server.models.tapi_topology_latency_characteristic import TapiTopologyLatencyCharacteristic # noqa: F401,E501
from tapi_server.models.tapi_topology_link_ref import TapiTopologyLinkRef # noqa: F401,E501
from tapi_server.models.tapi_topology_node_ref import TapiTopologyNodeRef # noqa: F401,E501
from tapi_server.models.tapi_topology_resilience_type import TapiTopologyResilienceType # noqa: F401,E501
from tapi_server.models.tapi_topology_risk_characteristic import TapiTopologyRiskCharacteristic # noqa: F401,E501
from tapi_server.models.tapi_topology_topology_ref import TapiTopologyTopologyRef # noqa: F401,E501
from tapi_server import util
class TapiConnectivityCreateconnectivityserviceOutputService(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, operational_state=None, lifecycle_state=None, administrative_state=None, name=None, uuid=None, service_layer=None, schedule=None, connectivity_direction=None, requested_capacity=None, diversity_exclusion=None, service_level=None, service_type=None, coroute_inclusion=None, is_lock_out=False, max_switch_times=None, restoration_coordinate_type=None, is_coordinated_switching_both_ends=False, hold_off_time=None, is_frozen=False, wait_to_revert_time=15, resilience_type=None, preferred_restoration_layer=None, restore_priority=None, reversion_mode=None, is_exclusive=True, diversity_policy=None, route_objective_function=None, cost_characteristic=None, latency_characteristic=None, risk_diversity_characteristic=None, route_direction=None, include_node=None, exclude_link=None, avoid_topology=None, exclude_path=None, include_link=None, preferred_transport_layer=None, exclude_node=None, include_topology=None, include_path=None, end_point=None, connection=None): # noqa: E501
"""TapiConnectivityCreateconnectivityserviceOutputService - a model defined in OpenAPI
:param operational_state: The operational_state of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type operational_state: TapiCommonOperationalState
:param lifecycle_state: The lifecycle_state of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type lifecycle_state: TapiCommonLifecycleState
:param administrative_state: The administrative_state of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type administrative_state: TapiCommonAdministrativeState
:param name: The name of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type name: List[TapiCommonNameAndValue]
:param uuid: The uuid of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type uuid: str
:param service_layer: The service_layer of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type service_layer: TapiCommonLayerProtocolName
:param schedule: The schedule of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type schedule: TapiCommonTimeRange
:param connectivity_direction: The connectivity_direction of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type connectivity_direction: TapiCommonForwardingDirection
:param requested_capacity: The requested_capacity of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type requested_capacity: TapiCommonCapacity
:param diversity_exclusion: The diversity_exclusion of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type diversity_exclusion: List[TapiConnectivityConnectivityServiceRef]
:param service_level: The service_level of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type service_level: str
:param service_type: The service_type of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type service_type: TapiConnectivityServiceType
:param coroute_inclusion: The coroute_inclusion of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type coroute_inclusion: TapiConnectivityConnectivityServiceRef
:param is_lock_out: The is_lock_out of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type is_lock_out: bool
:param max_switch_times: The max_switch_times of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type max_switch_times: int
:param restoration_coordinate_type: The restoration_coordinate_type of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type restoration_coordinate_type: TapiConnectivityCoordinateType
:param is_coordinated_switching_both_ends: The is_coordinated_switching_both_ends of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type is_coordinated_switching_both_ends: bool
:param hold_off_time: The hold_off_time of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type hold_off_time: int
:param is_frozen: The is_frozen of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type is_frozen: bool
:param wait_to_revert_time: The wait_to_revert_time of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type wait_to_revert_time: int
:param resilience_type: The resilience_type of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type resilience_type: TapiTopologyResilienceType
:param preferred_restoration_layer: The preferred_restoration_layer of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type preferred_restoration_layer: List[TapiCommonLayerProtocolName]
:param restore_priority: The restore_priority of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type restore_priority: int
:param reversion_mode: The reversion_mode of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type reversion_mode: TapiConnectivityReversionMode
:param is_exclusive: The is_exclusive of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type is_exclusive: bool
:param diversity_policy: The diversity_policy of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type diversity_policy: TapiPathComputationDiversityPolicy
:param route_objective_function: The route_objective_function of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type route_objective_function: TapiPathComputationRouteObjectiveFunction
:param cost_characteristic: The cost_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type cost_characteristic: List[TapiTopologyCostCharacteristic]
:param latency_characteristic: The latency_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type latency_characteristic: List[TapiTopologyLatencyCharacteristic]
:param risk_diversity_characteristic: The risk_diversity_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type risk_diversity_characteristic: List[TapiTopologyRiskCharacteristic]
:param route_direction: The route_direction of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type route_direction: TapiCommonForwardingDirection
:param include_node: The include_node of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type include_node: List[TapiTopologyNodeRef]
:param exclude_link: The exclude_link of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type exclude_link: List[TapiTopologyLinkRef]
:param avoid_topology: The avoid_topology of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type avoid_topology: List[TapiTopologyTopologyRef]
:param exclude_path: The exclude_path of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type exclude_path: List[TapiPathComputationPathRef]
:param include_link: The include_link of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type include_link: List[TapiTopologyLinkRef]
:param preferred_transport_layer: The preferred_transport_layer of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type preferred_transport_layer: List[TapiCommonLayerProtocolName]
:param exclude_node: The exclude_node of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type exclude_node: List[TapiTopologyNodeRef]
:param include_topology: The include_topology of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type include_topology: List[TapiTopologyTopologyRef]
:param include_path: The include_path of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type include_path: List[TapiPathComputationPathRef]
:param end_point: The end_point of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type end_point: List[TapiConnectivityConnectivityserviceEndPoint]
:param connection: The connection of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:type connection: List[TapiConnectivityConnectionRef]
"""
self.openapi_types = {
'operational_state': TapiCommonOperationalState,
'lifecycle_state': TapiCommonLifecycleState,
'administrative_state': TapiCommonAdministrativeState,
'name': List[TapiCommonNameAndValue],
'uuid': str,
'service_layer': TapiCommonLayerProtocolName,
'schedule': TapiCommonTimeRange,
'connectivity_direction': TapiCommonForwardingDirection,
'requested_capacity': TapiCommonCapacity,
'diversity_exclusion': List[TapiConnectivityConnectivityServiceRef],
'service_level': str,
'service_type': TapiConnectivityServiceType,
'coroute_inclusion': TapiConnectivityConnectivityServiceRef,
'is_lock_out': bool,
'max_switch_times': int,
'restoration_coordinate_type': TapiConnectivityCoordinateType,
'is_coordinated_switching_both_ends': bool,
'hold_off_time': int,
'is_frozen': bool,
'wait_to_revert_time': int,
'resilience_type': TapiTopologyResilienceType,
'preferred_restoration_layer': List[TapiCommonLayerProtocolName],
'restore_priority': int,
'reversion_mode': TapiConnectivityReversionMode,
'is_exclusive': bool,
'diversity_policy': TapiPathComputationDiversityPolicy,
'route_objective_function': TapiPathComputationRouteObjectiveFunction,
'cost_characteristic': List[TapiTopologyCostCharacteristic],
'latency_characteristic': List[TapiTopologyLatencyCharacteristic],
'risk_diversity_characteristic': List[TapiTopologyRiskCharacteristic],
'route_direction': TapiCommonForwardingDirection,
'include_node': List[TapiTopologyNodeRef],
'exclude_link': List[TapiTopologyLinkRef],
'avoid_topology': List[TapiTopologyTopologyRef],
'exclude_path': List[TapiPathComputationPathRef],
'include_link': List[TapiTopologyLinkRef],
'preferred_transport_layer': List[TapiCommonLayerProtocolName],
'exclude_node': List[TapiTopologyNodeRef],
'include_topology': List[TapiTopologyTopologyRef],
'include_path': List[TapiPathComputationPathRef],
'end_point': List[TapiConnectivityConnectivityserviceEndPoint],
'connection': List[TapiConnectivityConnectionRef]
}
self.attribute_map = {
'operational_state': 'operational-state',
'lifecycle_state': 'lifecycle-state',
'administrative_state': 'administrative-state',
'name': 'name',
'uuid': 'uuid',
'service_layer': 'service-layer',
'schedule': 'schedule',
'connectivity_direction': 'connectivity-direction',
'requested_capacity': 'requested-capacity',
'diversity_exclusion': 'diversity-exclusion',
'service_level': 'service-level',
'service_type': 'service-type',
'coroute_inclusion': 'coroute-inclusion',
'is_lock_out': 'is-lock-out',
'max_switch_times': 'max-switch-times',
'restoration_coordinate_type': 'restoration-coordinate-type',
'is_coordinated_switching_both_ends': 'is-coordinated-switching-both-ends',
'hold_off_time': 'hold-off-time',
'is_frozen': 'is-frozen',
'wait_to_revert_time': 'wait-to-revert-time',
'resilience_type': 'resilience-type',
'preferred_restoration_layer': 'preferred-restoration-layer',
'restore_priority': 'restore-priority',
'reversion_mode': 'reversion-mode',
'is_exclusive': 'is-exclusive',
'diversity_policy': 'diversity-policy',
'route_objective_function': 'route-objective-function',
'cost_characteristic': 'cost-characteristic',
'latency_characteristic': 'latency-characteristic',
'risk_diversity_characteristic': 'risk-diversity-characteristic',
'route_direction': 'route-direction',
'include_node': 'include-node',
'exclude_link': 'exclude-link',
'avoid_topology': 'avoid-topology',
'exclude_path': 'exclude-path',
'include_link': 'include-link',
'preferred_transport_layer': 'preferred-transport-layer',
'exclude_node': 'exclude-node',
'include_topology': 'include-topology',
'include_path': 'include-path',
'end_point': 'end-point',
'connection': 'connection'
}
self._operational_state = operational_state
self._lifecycle_state = lifecycle_state
self._administrative_state = administrative_state
self._name = name
self._uuid = uuid
self._service_layer = service_layer
self._schedule = schedule
self._connectivity_direction = connectivity_direction
self._requested_capacity = requested_capacity
self._diversity_exclusion = diversity_exclusion
self._service_level = service_level
self._service_type = service_type
self._coroute_inclusion = coroute_inclusion
self._is_lock_out = is_lock_out
self._max_switch_times = max_switch_times
self._restoration_coordinate_type = restoration_coordinate_type
self._is_coordinated_switching_both_ends = is_coordinated_switching_both_ends
self._hold_off_time = hold_off_time
self._is_frozen = is_frozen
self._wait_to_revert_time = wait_to_revert_time
self._resilience_type = resilience_type
self._preferred_restoration_layer = preferred_restoration_layer
self._restore_priority = restore_priority
self._reversion_mode = reversion_mode
self._is_exclusive = is_exclusive
self._diversity_policy = diversity_policy
self._route_objective_function = route_objective_function
self._cost_characteristic = cost_characteristic
self._latency_characteristic = latency_characteristic
self._risk_diversity_characteristic = risk_diversity_characteristic
self._route_direction = route_direction
self._include_node = include_node
self._exclude_link = exclude_link
self._avoid_topology = avoid_topology
self._exclude_path = exclude_path
self._include_link = include_link
self._preferred_transport_layer = preferred_transport_layer
self._exclude_node = exclude_node
self._include_topology = include_topology
self._include_path = include_path
self._end_point = end_point
self._connection = connection
@classmethod
def from_dict(cls, dikt) -> 'TapiConnectivityCreateconnectivityserviceOutputService':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.connectivity.createconnectivityservice.output.Service of this TapiConnectivityCreateconnectivityserviceOutputService. # noqa: E501
:rtype: TapiConnectivityCreateconnectivityserviceOutputService
"""
return util.deserialize_model(dikt, cls)
@property
def operational_state(self):
"""Gets the operational_state of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The operational_state of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiCommonOperationalState
"""
return self._operational_state
@operational_state.setter
def operational_state(self, operational_state):
"""Sets the operational_state of this TapiConnectivityCreateconnectivityserviceOutputService.
:param operational_state: The operational_state of this TapiConnectivityCreateconnectivityserviceOutputService.
:type operational_state: TapiCommonOperationalState
"""
self._operational_state = operational_state
@property
def lifecycle_state(self):
"""Gets the lifecycle_state of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The lifecycle_state of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiCommonLifecycleState
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""Sets the lifecycle_state of this TapiConnectivityCreateconnectivityserviceOutputService.
:param lifecycle_state: The lifecycle_state of this TapiConnectivityCreateconnectivityserviceOutputService.
:type lifecycle_state: TapiCommonLifecycleState
"""
self._lifecycle_state = lifecycle_state
@property
def administrative_state(self):
"""Gets the administrative_state of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The administrative_state of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiCommonAdministrativeState
"""
return self._administrative_state
@administrative_state.setter
def administrative_state(self, administrative_state):
"""Sets the administrative_state of this TapiConnectivityCreateconnectivityserviceOutputService.
:param administrative_state: The administrative_state of this TapiConnectivityCreateconnectivityserviceOutputService.
:type administrative_state: TapiCommonAdministrativeState
"""
self._administrative_state = administrative_state
@property
def name(self):
"""Gets the name of this TapiConnectivityCreateconnectivityserviceOutputService.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:return: The name of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiCommonNameAndValue]
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TapiConnectivityCreateconnectivityserviceOutputService.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:param name: The name of this TapiConnectivityCreateconnectivityserviceOutputService.
:type name: List[TapiCommonNameAndValue]
"""
self._name = name
@property
def uuid(self):
"""Gets the uuid of this TapiConnectivityCreateconnectivityserviceOutputService.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:return: The uuid of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this TapiConnectivityCreateconnectivityserviceOutputService.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:param uuid: The uuid of this TapiConnectivityCreateconnectivityserviceOutputService.
:type uuid: str
"""
self._uuid = uuid
@property
def service_layer(self):
"""Gets the service_layer of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The service_layer of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiCommonLayerProtocolName
"""
return self._service_layer
@service_layer.setter
def service_layer(self, service_layer):
"""Sets the service_layer of this TapiConnectivityCreateconnectivityserviceOutputService.
:param service_layer: The service_layer of this TapiConnectivityCreateconnectivityserviceOutputService.
:type service_layer: TapiCommonLayerProtocolName
"""
self._service_layer = service_layer
@property
def schedule(self):
"""Gets the schedule of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The schedule of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiCommonTimeRange
"""
return self._schedule
@schedule.setter
def schedule(self, schedule):
"""Sets the schedule of this TapiConnectivityCreateconnectivityserviceOutputService.
:param schedule: The schedule of this TapiConnectivityCreateconnectivityserviceOutputService.
:type schedule: TapiCommonTimeRange
"""
self._schedule = schedule
@property
def connectivity_direction(self):
"""Gets the connectivity_direction of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The connectivity_direction of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiCommonForwardingDirection
"""
return self._connectivity_direction
@connectivity_direction.setter
def connectivity_direction(self, connectivity_direction):
"""Sets the connectivity_direction of this TapiConnectivityCreateconnectivityserviceOutputService.
:param connectivity_direction: The connectivity_direction of this TapiConnectivityCreateconnectivityserviceOutputService.
:type connectivity_direction: TapiCommonForwardingDirection
"""
self._connectivity_direction = connectivity_direction
@property
def requested_capacity(self):
"""Gets the requested_capacity of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The requested_capacity of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiCommonCapacity
"""
return self._requested_capacity
@requested_capacity.setter
def requested_capacity(self, requested_capacity):
"""Sets the requested_capacity of this TapiConnectivityCreateconnectivityserviceOutputService.
:param requested_capacity: The requested_capacity of this TapiConnectivityCreateconnectivityserviceOutputService.
:type requested_capacity: TapiCommonCapacity
"""
self._requested_capacity = requested_capacity
@property
def diversity_exclusion(self):
"""Gets the diversity_exclusion of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:return: The diversity_exclusion of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiConnectivityConnectivityServiceRef]
"""
return self._diversity_exclusion
@diversity_exclusion.setter
def diversity_exclusion(self, diversity_exclusion):
"""Sets the diversity_exclusion of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:param diversity_exclusion: The diversity_exclusion of this TapiConnectivityCreateconnectivityserviceOutputService.
:type diversity_exclusion: List[TapiConnectivityConnectivityServiceRef]
"""
self._diversity_exclusion = diversity_exclusion
@property
def service_level(self):
"""Gets the service_level of this TapiConnectivityCreateconnectivityserviceOutputService.
An abstract value the meaning of which is mutually agreed � typically represents metrics such as - Class of service, priority, resiliency, availability # noqa: E501
:return: The service_level of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: str
"""
return self._service_level
@service_level.setter
def service_level(self, service_level):
"""Sets the service_level of this TapiConnectivityCreateconnectivityserviceOutputService.
An abstract value the meaning of which is mutually agreed � typically represents metrics such as - Class of service, priority, resiliency, availability # noqa: E501
:param service_level: The service_level of this TapiConnectivityCreateconnectivityserviceOutputService.
:type service_level: str
"""
self._service_level = service_level
@property
def service_type(self):
"""Gets the service_type of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The service_type of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiConnectivityServiceType
"""
return self._service_type
@service_type.setter
def service_type(self, service_type):
"""Sets the service_type of this TapiConnectivityCreateconnectivityserviceOutputService.
:param service_type: The service_type of this TapiConnectivityCreateconnectivityserviceOutputService.
:type service_type: TapiConnectivityServiceType
"""
self._service_type = service_type
@property
def coroute_inclusion(self):
"""Gets the coroute_inclusion of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The coroute_inclusion of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiConnectivityConnectivityServiceRef
"""
return self._coroute_inclusion
@coroute_inclusion.setter
def coroute_inclusion(self, coroute_inclusion):
"""Sets the coroute_inclusion of this TapiConnectivityCreateconnectivityserviceOutputService.
:param coroute_inclusion: The coroute_inclusion of this TapiConnectivityCreateconnectivityserviceOutputService.
:type coroute_inclusion: TapiConnectivityConnectivityServiceRef
"""
self._coroute_inclusion = coroute_inclusion
@property
def is_lock_out(self):
"""Gets the is_lock_out of this TapiConnectivityCreateconnectivityserviceOutputService.
The resource is configured to temporarily not be available for use in the protection scheme(s) it is part of. This overrides all other protection control states including forced. If the item is locked out then it cannot be used under any circumstances. Note: Only relevant when part of a protection scheme. # noqa: E501
:return: The is_lock_out of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: bool
"""
return self._is_lock_out
@is_lock_out.setter
def is_lock_out(self, is_lock_out):
"""Sets the is_lock_out of this TapiConnectivityCreateconnectivityserviceOutputService.
The resource is configured to temporarily not be available for use in the protection scheme(s) it is part of. This overrides all other protection control states including forced. If the item is locked out then it cannot be used under any circumstances. Note: Only relevant when part of a protection scheme. # noqa: E501
:param is_lock_out: The is_lock_out of this TapiConnectivityCreateconnectivityserviceOutputService.
:type is_lock_out: bool
"""
self._is_lock_out = is_lock_out
@property
def max_switch_times(self):
"""Gets the max_switch_times of this TapiConnectivityCreateconnectivityserviceOutputService.
Used to limit the maximum swtich times. When work fault disappears , and traffic return to the original work path, switch counter reset. # noqa: E501
:return: The max_switch_times of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: int
"""
return self._max_switch_times
@max_switch_times.setter
def max_switch_times(self, max_switch_times):
"""Sets the max_switch_times of this TapiConnectivityCreateconnectivityserviceOutputService.
Used to limit the maximum swtich times. When work fault disappears , and traffic return to the original work path, switch counter reset. # noqa: E501
:param max_switch_times: The max_switch_times of this TapiConnectivityCreateconnectivityserviceOutputService.
:type max_switch_times: int
"""
self._max_switch_times = max_switch_times
@property
def restoration_coordinate_type(self):
"""Gets the restoration_coordinate_type of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The restoration_coordinate_type of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiConnectivityCoordinateType
"""
return self._restoration_coordinate_type
@restoration_coordinate_type.setter
def restoration_coordinate_type(self, restoration_coordinate_type):
"""Sets the restoration_coordinate_type of this TapiConnectivityCreateconnectivityserviceOutputService.
:param restoration_coordinate_type: The restoration_coordinate_type of this TapiConnectivityCreateconnectivityserviceOutputService.
:type restoration_coordinate_type: TapiConnectivityCoordinateType
"""
self._restoration_coordinate_type = restoration_coordinate_type
@property
def is_coordinated_switching_both_ends(self):
"""Gets the is_coordinated_switching_both_ends of this TapiConnectivityCreateconnectivityserviceOutputService.
Is operating such that switching at both ends of each flow acorss the FC is coordinated at both ingress and egress ends. # noqa: E501
:return: The is_coordinated_switching_both_ends of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: bool
"""
return self._is_coordinated_switching_both_ends
@is_coordinated_switching_both_ends.setter
def is_coordinated_switching_both_ends(self, is_coordinated_switching_both_ends):
"""Sets the is_coordinated_switching_both_ends of this TapiConnectivityCreateconnectivityserviceOutputService.
Is operating such that switching at both ends of each flow acorss the FC is coordinated at both ingress and egress ends. # noqa: E501
:param is_coordinated_switching_both_ends: The is_coordinated_switching_both_ends of this TapiConnectivityCreateconnectivityserviceOutputService.
:type is_coordinated_switching_both_ends: bool
"""
self._is_coordinated_switching_both_ends = is_coordinated_switching_both_ends
@property
def hold_off_time(self):
"""Gets the hold_off_time of this TapiConnectivityCreateconnectivityserviceOutputService.
This attribute indicates the time, in milliseconds, between declaration of signal degrade or signal fail, and the initialization of the protection switching algorithm. # noqa: E501
:return: The hold_off_time of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: int
"""
return self._hold_off_time
@hold_off_time.setter
def hold_off_time(self, hold_off_time):
"""Sets the hold_off_time of this TapiConnectivityCreateconnectivityserviceOutputService.
This attribute indicates the time, in milliseconds, between declaration of signal degrade or signal fail, and the initialization of the protection switching algorithm. # noqa: E501
:param hold_off_time: The hold_off_time of this TapiConnectivityCreateconnectivityserviceOutputService.
:type hold_off_time: int
"""
self._hold_off_time = hold_off_time
@property
def is_frozen(self):
"""Gets the is_frozen of this TapiConnectivityCreateconnectivityserviceOutputService.
Temporarily prevents any switch action to be taken and, as such, freezes the current state. Until the freeze is cleared, additional near-end external commands are rejected and fault condition changes and received APS messages are ignored. All administrative controls of any aspect of protection are rejected. # noqa: E501
:return: The is_frozen of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: bool
"""
return self._is_frozen
@is_frozen.setter
def is_frozen(self, is_frozen):
"""Sets the is_frozen of this TapiConnectivityCreateconnectivityserviceOutputService.
Temporarily prevents any switch action to be taken and, as such, freezes the current state. Until the freeze is cleared, additional near-end external commands are rejected and fault condition changes and received APS messages are ignored. All administrative controls of any aspect of protection are rejected. # noqa: E501
:param is_frozen: The is_frozen of this TapiConnectivityCreateconnectivityserviceOutputService.
:type is_frozen: bool
"""
self._is_frozen = is_frozen
@property
def wait_to_revert_time(self):
"""Gets the wait_to_revert_time of this TapiConnectivityCreateconnectivityserviceOutputService.
If the protection system is revertive, this attribute specifies the time, in minutes, to wait after a fault clears on a higher priority (preferred) resource before reverting to the preferred resource. # noqa: E501
:return: The wait_to_revert_time of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: int
"""
return self._wait_to_revert_time
@wait_to_revert_time.setter
def wait_to_revert_time(self, wait_to_revert_time):
"""Sets the wait_to_revert_time of this TapiConnectivityCreateconnectivityserviceOutputService.
If the protection system is revertive, this attribute specifies the time, in minutes, to wait after a fault clears on a higher priority (preferred) resource before reverting to the preferred resource. # noqa: E501
:param wait_to_revert_time: The wait_to_revert_time of this TapiConnectivityCreateconnectivityserviceOutputService.
:type wait_to_revert_time: int
"""
self._wait_to_revert_time = wait_to_revert_time
@property
def resilience_type(self):
"""Gets the resilience_type of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The resilience_type of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiTopologyResilienceType
"""
return self._resilience_type
@resilience_type.setter
def resilience_type(self, resilience_type):
"""Sets the resilience_type of this TapiConnectivityCreateconnectivityserviceOutputService.
:param resilience_type: The resilience_type of this TapiConnectivityCreateconnectivityserviceOutputService.
:type resilience_type: TapiTopologyResilienceType
"""
self._resilience_type = resilience_type
@property
def preferred_restoration_layer(self):
"""Gets the preferred_restoration_layer of this TapiConnectivityCreateconnectivityserviceOutputService.
Indicate which layer this resilience parameters package configured for. # noqa: E501
:return: The preferred_restoration_layer of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiCommonLayerProtocolName]
"""
return self._preferred_restoration_layer
@preferred_restoration_layer.setter
def preferred_restoration_layer(self, preferred_restoration_layer):
"""Sets the preferred_restoration_layer of this TapiConnectivityCreateconnectivityserviceOutputService.
Indicate which layer this resilience parameters package configured for. # noqa: E501
:param preferred_restoration_layer: The preferred_restoration_layer of this TapiConnectivityCreateconnectivityserviceOutputService.
:type preferred_restoration_layer: List[TapiCommonLayerProtocolName]
"""
self._preferred_restoration_layer = preferred_restoration_layer
@property
def restore_priority(self):
"""Gets the restore_priority of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:return: The restore_priority of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: int
"""
return self._restore_priority
@restore_priority.setter
def restore_priority(self, restore_priority):
"""Sets the restore_priority of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:param restore_priority: The restore_priority of this TapiConnectivityCreateconnectivityserviceOutputService.
:type restore_priority: int
"""
self._restore_priority = restore_priority
@property
def reversion_mode(self):
"""Gets the reversion_mode of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The reversion_mode of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiConnectivityReversionMode
"""
return self._reversion_mode
@reversion_mode.setter
def reversion_mode(self, reversion_mode):
"""Sets the reversion_mode of this TapiConnectivityCreateconnectivityserviceOutputService.
:param reversion_mode: The reversion_mode of this TapiConnectivityCreateconnectivityserviceOutputService.
:type reversion_mode: TapiConnectivityReversionMode
"""
self._reversion_mode = reversion_mode
@property
def is_exclusive(self):
"""Gets the is_exclusive of this TapiConnectivityCreateconnectivityserviceOutputService.
To distinguish if the resources are to be exclusive to the service # noqa: E501
:return: The is_exclusive of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: bool
"""
return self._is_exclusive
@is_exclusive.setter
def is_exclusive(self, is_exclusive):
"""Sets the is_exclusive of this TapiConnectivityCreateconnectivityserviceOutputService.
To distinguish if the resources are to be exclusive to the service # noqa: E501
:param is_exclusive: The is_exclusive of this TapiConnectivityCreateconnectivityserviceOutputService.
:type is_exclusive: bool
"""
self._is_exclusive = is_exclusive
@property
def diversity_policy(self):
"""Gets the diversity_policy of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The diversity_policy of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiPathComputationDiversityPolicy
"""
return self._diversity_policy
@diversity_policy.setter
def diversity_policy(self, diversity_policy):
"""Sets the diversity_policy of this TapiConnectivityCreateconnectivityserviceOutputService.
:param diversity_policy: The diversity_policy of this TapiConnectivityCreateconnectivityserviceOutputService.
:type diversity_policy: TapiPathComputationDiversityPolicy
"""
self._diversity_policy = diversity_policy
@property
def route_objective_function(self):
"""Gets the route_objective_function of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The route_objective_function of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiPathComputationRouteObjectiveFunction
"""
return self._route_objective_function
@route_objective_function.setter
def route_objective_function(self, route_objective_function):
"""Sets the route_objective_function of this TapiConnectivityCreateconnectivityserviceOutputService.
:param route_objective_function: The route_objective_function of this TapiConnectivityCreateconnectivityserviceOutputService.
:type route_objective_function: TapiPathComputationRouteObjectiveFunction
"""
self._route_objective_function = route_objective_function
@property
def cost_characteristic(self):
"""Gets the cost_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService.
The list of costs where each cost relates to some aspect of the TopologicalEntity. # noqa: E501
:return: The cost_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiTopologyCostCharacteristic]
"""
return self._cost_characteristic
@cost_characteristic.setter
def cost_characteristic(self, cost_characteristic):
"""Sets the cost_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService.
The list of costs where each cost relates to some aspect of the TopologicalEntity. # noqa: E501
:param cost_characteristic: The cost_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService.
:type cost_characteristic: List[TapiTopologyCostCharacteristic]
"""
self._cost_characteristic = cost_characteristic
@property
def latency_characteristic(self):
"""Gets the latency_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService.
The effect on the latency of a queuing process. This only has significant effect for packet based systems and has a complex characteristic. # noqa: E501
:return: The latency_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiTopologyLatencyCharacteristic]
"""
return self._latency_characteristic
@latency_characteristic.setter
def latency_characteristic(self, latency_characteristic):
"""Sets the latency_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService.
The effect on the latency of a queuing process. This only has significant effect for packet based systems and has a complex characteristic. # noqa: E501
:param latency_characteristic: The latency_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService.
:type latency_characteristic: List[TapiTopologyLatencyCharacteristic]
"""
self._latency_characteristic = latency_characteristic
@property
def risk_diversity_characteristic(self):
"""Gets the risk_diversity_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:return: The risk_diversity_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiTopologyRiskCharacteristic]
"""
return self._risk_diversity_characteristic
@risk_diversity_characteristic.setter
def risk_diversity_characteristic(self, risk_diversity_characteristic):
"""Sets the risk_diversity_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:param risk_diversity_characteristic: The risk_diversity_characteristic of this TapiConnectivityCreateconnectivityserviceOutputService.
:type risk_diversity_characteristic: List[TapiTopologyRiskCharacteristic]
"""
self._risk_diversity_characteristic = risk_diversity_characteristic
@property
def route_direction(self):
"""Gets the route_direction of this TapiConnectivityCreateconnectivityserviceOutputService.
:return: The route_direction of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: TapiCommonForwardingDirection
"""
return self._route_direction
@route_direction.setter
def route_direction(self, route_direction):
"""Sets the route_direction of this TapiConnectivityCreateconnectivityserviceOutputService.
:param route_direction: The route_direction of this TapiConnectivityCreateconnectivityserviceOutputService.
:type route_direction: TapiCommonForwardingDirection
"""
self._route_direction = route_direction
@property
def include_node(self):
"""Gets the include_node of this TapiConnectivityCreateconnectivityserviceOutputService.
This is a loose constraint - that is it is unordered and could be a partial list # noqa: E501
:return: The include_node of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiTopologyNodeRef]
"""
return self._include_node
@include_node.setter
def include_node(self, include_node):
"""Sets the include_node of this TapiConnectivityCreateconnectivityserviceOutputService.
This is a loose constraint - that is it is unordered and could be a partial list # noqa: E501
:param include_node: The include_node of this TapiConnectivityCreateconnectivityserviceOutputService.
:type include_node: List[TapiTopologyNodeRef]
"""
self._include_node = include_node
@property
def exclude_link(self):
"""Gets the exclude_link of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:return: The exclude_link of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiTopologyLinkRef]
"""
return self._exclude_link
@exclude_link.setter
def exclude_link(self, exclude_link):
"""Sets the exclude_link of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:param exclude_link: The exclude_link of this TapiConnectivityCreateconnectivityserviceOutputService.
:type exclude_link: List[TapiTopologyLinkRef]
"""
self._exclude_link = exclude_link
@property
def avoid_topology(self):
"""Gets the avoid_topology of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:return: The avoid_topology of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiTopologyTopologyRef]
"""
return self._avoid_topology
@avoid_topology.setter
def avoid_topology(self, avoid_topology):
"""Sets the avoid_topology of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:param avoid_topology: The avoid_topology of this TapiConnectivityCreateconnectivityserviceOutputService.
:type avoid_topology: List[TapiTopologyTopologyRef]
"""
self._avoid_topology = avoid_topology
@property
def exclude_path(self):
"""Gets the exclude_path of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:return: The exclude_path of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiPathComputationPathRef]
"""
return self._exclude_path
@exclude_path.setter
def exclude_path(self, exclude_path):
"""Sets the exclude_path of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:param exclude_path: The exclude_path of this TapiConnectivityCreateconnectivityserviceOutputService.
:type exclude_path: List[TapiPathComputationPathRef]
"""
self._exclude_path = exclude_path
@property
def include_link(self):
"""Gets the include_link of this TapiConnectivityCreateconnectivityserviceOutputService.
This is a loose constraint - that is it is unordered and could be a partial list # noqa: E501
:return: The include_link of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiTopologyLinkRef]
"""
return self._include_link
@include_link.setter
def include_link(self, include_link):
"""Sets the include_link of this TapiConnectivityCreateconnectivityserviceOutputService.
This is a loose constraint - that is it is unordered and could be a partial list # noqa: E501
:param include_link: The include_link of this TapiConnectivityCreateconnectivityserviceOutputService.
:type include_link: List[TapiTopologyLinkRef]
"""
self._include_link = include_link
@property
def preferred_transport_layer(self):
"""Gets the preferred_transport_layer of this TapiConnectivityCreateconnectivityserviceOutputService.
soft constraint requested by client to indicate the layer(s) of transport connection that it prefers to carry the service. This could be same as the service layer or one of the supported server layers # noqa: E501
:return: The preferred_transport_layer of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiCommonLayerProtocolName]
"""
return self._preferred_transport_layer
@preferred_transport_layer.setter
def preferred_transport_layer(self, preferred_transport_layer):
"""Sets the preferred_transport_layer of this TapiConnectivityCreateconnectivityserviceOutputService.
soft constraint requested by client to indicate the layer(s) of transport connection that it prefers to carry the service. This could be same as the service layer or one of the supported server layers # noqa: E501
:param preferred_transport_layer: The preferred_transport_layer of this TapiConnectivityCreateconnectivityserviceOutputService.
:type preferred_transport_layer: List[TapiCommonLayerProtocolName]
"""
self._preferred_transport_layer = preferred_transport_layer
@property
def exclude_node(self):
"""Gets the exclude_node of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:return: The exclude_node of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiTopologyNodeRef]
"""
return self._exclude_node
@exclude_node.setter
def exclude_node(self, exclude_node):
"""Sets the exclude_node of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:param exclude_node: The exclude_node of this TapiConnectivityCreateconnectivityserviceOutputService.
:type exclude_node: List[TapiTopologyNodeRef]
"""
self._exclude_node = exclude_node
@property
def include_topology(self):
"""Gets the include_topology of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:return: The include_topology of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiTopologyTopologyRef]
"""
return self._include_topology
@include_topology.setter
def include_topology(self, include_topology):
"""Sets the include_topology of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:param include_topology: The include_topology of this TapiConnectivityCreateconnectivityserviceOutputService.
:type include_topology: List[TapiTopologyTopologyRef]
"""
self._include_topology = include_topology
@property
def include_path(self):
"""Gets the include_path of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:return: The include_path of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiPathComputationPathRef]
"""
return self._include_path
@include_path.setter
def include_path(self, include_path):
"""Sets the include_path of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:param include_path: The include_path of this TapiConnectivityCreateconnectivityserviceOutputService.
:type include_path: List[TapiPathComputationPathRef]
"""
self._include_path = include_path
@property
def end_point(self):
"""Gets the end_point of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:return: The end_point of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiConnectivityConnectivityserviceEndPoint]
"""
return self._end_point
@end_point.setter
def end_point(self, end_point):
"""Sets the end_point of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:param end_point: The end_point of this TapiConnectivityCreateconnectivityserviceOutputService.
:type end_point: List[TapiConnectivityConnectivityserviceEndPoint]
"""
self._end_point = end_point
@property
def connection(self):
"""Gets the connection of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:return: The connection of this TapiConnectivityCreateconnectivityserviceOutputService.
:rtype: List[TapiConnectivityConnectionRef]
"""
return self._connection
@connection.setter
def connection(self, connection):
"""Sets the connection of this TapiConnectivityCreateconnectivityserviceOutputService.
none # noqa: E501
:param connection: The connection of this TapiConnectivityCreateconnectivityserviceOutputService.
:type connection: List[TapiConnectivityConnectionRef]
"""
self._connection = connection
| {
"content_hash": "b90cd817b362763515bb904ef543be45",
"timestamp": "",
"source": "github",
"line_count": 1212,
"max_line_length": 998,
"avg_line_length": 49.0486798679868,
"alnum_prop": 0.7306003667132067,
"repo_name": "karthik-sethuraman/Snowmass-ONFOpenTransport",
"id": "a1d095315147d5277e0504d28952f6df402f03c6",
"size": "59468",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "RI/flask_server/tapi_server/models/tapi_connectivity_createconnectivityservice_output_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "653"
},
{
"name": "D",
"bytes": "2405"
},
{
"name": "HTML",
"bytes": "137234"
},
{
"name": "Python",
"bytes": "937060"
},
{
"name": "Shell",
"bytes": "4361"
}
],
"symlink_target": ""
} |
"""
Kernel installation task
"""
from cStringIO import StringIO
import logging
import os
import re
import shlex
import urllib2
import urlparse
from teuthology import misc as teuthology
from ..orchestra import run
from ..config import config as teuth_config
from ..exceptions import (UnsupportedPackageTypeError,
ConfigError,
VersionNotFoundError)
from ..packaging import (
install_package,
get_koji_build_info,
get_kojiroot_base_url,
get_koji_package_name,
get_koji_task_rpm_info,
get_koji_task_result,
)
log = logging.getLogger(__name__)
def normalize_config(ctx, config):
"""
Returns a config whose keys are all real roles.
Generic roles (client, mon, osd, etc.) are replaced with
the actual roles (client.0, client.1, etc.). If the config
specifies a different version for a specific role, this is
unchanged.
For example, with 4 OSDs this::
osd:
tag: v3.0
kdb: true
osd.1:
branch: new_btrfs
kdb: false
osd.3:
deb: /path/to/linux-whatever.deb
is transformed into::
osd.0:
tag: v3.0
kdb: true
osd.1:
branch: new_btrfs
kdb: false
osd.2:
tag: v3.0
kdb: true
osd.3:
deb: /path/to/linux-whatever.deb
If config is None or just specifies a version to use,
it is applied to all nodes.
:param ctx: Context
:param config: Configuration
"""
if config is None or \
len(filter(lambda x: x in ['tag', 'branch', 'sha1', 'kdb',
'deb', 'rpm', 'koji', 'koji_task',
'flavor'],
config.keys())) == len(config.keys()):
new_config = {}
if config is None:
config = {'branch': 'master'}
for _, roles_for_host in ctx.cluster.remotes.iteritems():
new_config[roles_for_host[0]] = config
return new_config
new_config = {}
for role, role_config in config.iteritems():
if role_config is None:
role_config = {'branch': 'master'}
if '.' in role:
new_config[role] = role_config
else:
for id_ in teuthology.all_roles_of_type(ctx.cluster, role):
name = '{type}.{id}'.format(type=role, id=id_)
# specific overrides generic
if name not in config:
new_config[name] = role_config
return new_config
def validate_config(ctx, config):
"""
Make sure that all kernels in the list of remove kernels
refer to the same kernel.
:param ctx: Context
:param config: Configuration
"""
for _, roles_for_host in ctx.cluster.remotes.iteritems():
kernel = None
for role in roles_for_host:
role_kernel = config.get(role, kernel)
if kernel is None:
kernel = role_kernel
elif role_kernel is not None:
assert kernel == role_kernel, \
"everything on the same host must use the same kernel"
if role in config:
del config[role]
def _vsplitter(version):
"""Kernels from Calxeda are named ...ceph-<sha1>...highbank.
Kernels that we generate are named ...-g<sha1>.
This routine finds the text in front of the sha1 that is used by
need_to_install() to extract information from the kernel name.
:param version: Name of the kernel
"""
if version.endswith('highbank'):
return 'ceph-'
return '-g'
def need_to_install(ctx, role, version):
"""
Check to see if we need to install a kernel. Get the version of the
currently running kernel, and compare it against the value passed in.
:param ctx: Context
:param role: Role
:param version: value to compare against (used in checking), can be either
a utsrelease string (e.g. '3.13.0-rc3-ceph-00049-ge2817b3')
or a sha1.
"""
ret = True
log.info('Checking kernel version of {role}, want {ver}...'.format(
role=role, ver=version))
uname_fp = StringIO()
ctx.cluster.only(role).run(
args=[
'uname',
'-r',
],
stdout=uname_fp,
)
cur_version = uname_fp.getvalue().rstrip('\n')
log.debug('current kernel version is {ver}'.format(ver=cur_version))
if '.' in version:
# version is utsrelease, yay
if cur_version == version:
log.debug('utsrelease strings match, do not need to install')
ret = False
else:
# version is sha1, need to try to extract sha1 from cur_version
splt = _vsplitter(cur_version)
if splt in cur_version:
_, cur_sha1 = cur_version.rsplit(splt, 1)
dloc = cur_sha1.find('-')
if dloc > 0:
cur_sha1 = cur_sha1[0:dloc]
log.debug('extracting sha1, {ver} -> {sha1}'.format(
ver=cur_version, sha1=cur_sha1))
# FIXME: The above will match things like ...-generic on Ubuntu
# distro kernels resulting in 'eneric' cur_sha1.
m = min(len(cur_sha1), len(version))
assert m >= 6, "cur_sha1 and/or version is too short, m = %d" % m
if cur_sha1[0:m] == version[0:m]:
log.debug('extracted sha1 matches, do not need to install')
ret = False
else:
log.debug('failed to parse current kernel version')
uname_fp.close()
return ret
def install_firmware(ctx, config):
"""
Go to the github to get the latest firmware.
:param ctx: Context
:param config: Configuration
"""
linux_firmware_git_upstream = 'git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git'
uri = teuth_config.linux_firmware_git_url or linux_firmware_git_upstream
fw_dir = '/lib/firmware/updates'
for role in config.iterkeys():
if isinstance(config[role], str) and config[role].find('distro') >= 0:
log.info('Skipping firmware on distro kernel');
return
(role_remote,) = ctx.cluster.only(role).remotes.keys()
package_type = role_remote.os.package_type
if package_type == 'rpm':
role_remote.run(args=[
'sudo', 'yum', 'upgrade', '-y', 'linux-firmware',
])
continue
log.info('Installing linux-firmware on {role}...'.format(role=role))
role_remote.run(
args=[
# kludge around mysterious 0-byte .git/HEAD files
'cd', fw_dir,
run.Raw('&&'),
'test', '-d', '.git',
run.Raw('&&'),
'test', '!', '-s', '.git/HEAD',
run.Raw('&&'),
'sudo', 'rm', '-rf', '.git',
run.Raw(';'),
# init
'sudo', 'install', '-d', '-m0755', fw_dir,
run.Raw('&&'),
'cd', fw_dir,
run.Raw('&&'),
'sudo', 'git', 'init',
],
)
role_remote.run(
args=[
'sudo', 'git', '--git-dir=%s/.git' % fw_dir, 'config',
'--get', 'remote.origin.url', run.Raw('>/dev/null'),
run.Raw('||'),
'sudo', 'git', '--git-dir=%s/.git' % fw_dir,
'remote', 'add', 'origin', uri,
],
)
role_remote.run(
args=[
'cd', fw_dir,
run.Raw('&&'),
'sudo', 'git', 'fetch', 'origin',
run.Raw('&&'),
'sudo', 'git', 'reset', '--hard', 'origin/master'
],
)
def gitbuilder_pkg_name(remote):
if remote.os.package_type == 'rpm':
pkg_name = 'kernel.x86_64.rpm'
elif remote.os.package_type == 'deb':
pkg_name = 'linux-image.deb'
else:
raise UnsupportedPackageTypeError(remote)
return pkg_name
def remote_pkg_path(remote):
"""
This is where kernel packages are copied over (in case of local
packages) or downloaded to (in case of gitbuilder packages) and
then installed from.
"""
return os.path.join('/tmp', gitbuilder_pkg_name(remote))
def download_kernel(ctx, config):
"""
Supply each remote with a kernel package:
- local kernels are copied over
- gitbuilder kernels are downloaded
- nothing is done for distro kernels
:param ctx: Context
:param config: Configuration
"""
procs = {}
for role, src in config.iteritems():
needs_download = False
if src == 'distro':
# don't need to download distro kernels
log.debug("src is distro, skipping download");
continue
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if isinstance(src, dict):
# we're downloading a kernel from koji, the src dict here
# is the build_info retrieved from koji using get_koji_build_info
if src.get("id"):
build_id = src["id"]
log.info("Downloading kernel with build_id {build_id} on {role}...".format(
build_id=build_id,
role=role
))
needs_download = True
baseurl = get_kojiroot_base_url(src)
pkg_name = get_koji_package_name("kernel", src)
elif src.get("task_id"):
needs_download = True
log.info("Downloading kernel with task_id {task_id} on {role}...".format(
task_id=src["task_id"],
role=role
))
baseurl = src["base_url"]
# this var is also poorly named as it's not the package name,
# but the full name of the rpm file to download.
pkg_name = src["rpm_name"]
elif src.find('/') >= 0:
# local package - src is path
log.info('Copying kernel package {path} to {role}...'.format(
path=src, role=role))
f = open(src, 'r')
proc = role_remote.run(
args=[
'python', '-c',
'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
remote_pkg_path(role_remote),
],
wait=False,
stdin=f
)
procs[role_remote.name] = proc
else:
# gitbuilder package - src is sha1
log.info('Downloading kernel {sha1} on {role}...'.format(sha1=src,
role=role))
needs_download = True
package_type = role_remote.os.package_type
if package_type == 'rpm':
system_type = role_remote.os.name
system_ver = role_remote.os.version
if '.' in system_ver:
system_ver = system_ver.split('.')[0]
ldist = '{system_type}{system_ver}'.format(
system_type=system_type, system_ver=system_ver)
larch = 'x86_64'
elif package_type == 'deb':
ldist, larch = role_remote.os.codename, role_remote.arch
else:
raise UnsupportedPackageTypeError(role_remote)
_, baseurl = teuthology.get_ceph_binary_url(
package='kernel',
sha1=src,
format=package_type,
flavor='basic',
arch=larch,
dist=ldist,
)
pkg_name = gitbuilder_pkg_name(role_remote)
log.info("fetching, gitbuilder baseurl is %s", baseurl)
if needs_download:
proc = role_remote.run(
args=[
'rm', '-f', remote_pkg_path(role_remote),
run.Raw('&&'),
'echo',
pkg_name,
run.Raw('|'),
'wget',
'-nv',
'-O',
remote_pkg_path(role_remote),
'--base={url}'.format(url=baseurl),
'--input-file=-',
],
wait=False)
procs[role_remote.name] = proc
for name, proc in procs.iteritems():
log.debug('Waiting for download/copy to %s to complete...', name)
proc.wait()
def _no_grub_link(in_file, remote, kernel_ver):
"""
Copy and link kernel related files if grub cannot be used
(as is the case in Arm kernels)
:param infile: kernel file or image file to be copied.
:param remote: remote machine
:param kernel_ver: kernel version
"""
boot1 = '/boot/%s' % in_file
boot2 = '%s.old' % boot1
remote.run(
args=[
'if', 'test', '-e', boot1, run.Raw(';'), 'then',
'sudo', 'mv', boot1, boot2, run.Raw(';'), 'fi',],
)
remote.run(
args=['sudo', 'ln', '-s', '%s-%s' % (in_file, kernel_ver) , boot1, ],
)
def install_and_reboot(ctx, config):
"""
Install and reboot the kernel. This mostly performs remote
installation operations. The code does check for Arm images
and skips grub operations if the kernel is Arm. Otherwise, it
extracts kernel titles from submenu entries and makes the appropriate
grub calls. The assumptions here are somewhat simplified in that
it expects kernel entries to be present under submenu entries.
:param ctx: Context
:param config: Configuration
"""
procs = {}
kernel_title = ''
for role, src in config.iteritems():
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if isinstance(src, str) and src.find('distro') >= 0:
log.info('Installing distro kernel on {role}...'.format(role=role))
install_kernel(role_remote)
continue
log.info('Installing kernel {src} on {role}...'.format(src=src,
role=role))
package_type = role_remote.os.package_type
if package_type == 'rpm':
proc = role_remote.run(
args=[
'sudo',
'rpm',
'-ivh',
'--oldpackage',
'--replacefiles',
'--replacepkgs',
remote_pkg_path(role_remote),
])
install_kernel(role_remote, remote_pkg_path(role_remote))
continue
# TODO: Refactor this into install_kernel() so that it handles all
# cases for both rpm and deb packages.
proc = role_remote.run(
args=[
# install the kernel deb
'sudo',
'dpkg',
'-i',
remote_pkg_path(role_remote),
],
)
# collect kernel image name from the .deb
kernel_title = get_image_version(role_remote,
remote_pkg_path(role_remote))
log.info('searching for kernel {}'.format(kernel_title))
if kernel_title.endswith("-highbank"):
_no_grub_link('vmlinuz', role_remote, kernel_title)
_no_grub_link('initrd.img', role_remote, kernel_title)
proc = role_remote.run(
args=[
'sudo',
'shutdown',
'-r',
'now',
],
wait=False,
)
procs[role_remote.name] = proc
continue
# look for menuentry for our kernel, and collect any
# submenu entries for their titles. Assume that if our
# kernel entry appears later in the file than a submenu entry,
# it's actually nested under that submenu. If it gets more
# complex this will totally break.
cmdout = StringIO()
proc = role_remote.run(
args=[
'egrep',
'(submenu|menuentry.*' + kernel_title + ').*{',
'/boot/grub/grub.cfg'
],
stdout = cmdout,
)
submenu_title = ''
default_title = ''
for l in cmdout.getvalue().split('\n'):
fields = shlex.split(l)
if len(fields) >= 2:
command, title = fields[:2]
if command == 'submenu':
submenu_title = title + '>'
if command == 'menuentry':
if title.endswith(kernel_title):
default_title = title
break
cmdout.close()
log.info('submenu_title:{}'.format(submenu_title))
log.info('default_title:{}'.format(default_title))
proc = role_remote.run(
args=[
# use the title(s) to construct the content of
# the grub menu entry, so we can default to it.
'/bin/echo',
'-e',
r'cat <<EOF\nset default="' + submenu_title + \
default_title + r'"\nEOF\n',
# make it look like an emacs backup file so
# unfortunately timed update-grub runs don't pick it
# up yet; use sudo tee so we are able to write to /etc
run.Raw('|'),
'sudo',
'tee',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
run.Raw('>/dev/null'),
run.Raw('&&'),
'sudo',
'chmod',
'a+x',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
run.Raw('&&'),
'sudo',
'mv',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
'/etc/grub.d/01_ceph_kernel',
# update grub again so it accepts our default
run.Raw('&&'),
'sudo',
'update-grub',
run.Raw('&&'),
'rm',
remote_pkg_path(role_remote),
run.Raw('&&'),
'sudo',
'shutdown',
'-r',
'now',
],
wait=False,
)
procs[role_remote.name] = proc
for name, proc in procs.iteritems():
log.debug('Waiting for install on %s to complete...', name)
proc.wait()
def enable_disable_kdb(ctx, config):
"""
Enable kdb on remote machines in use. Disable on those that are
not in use.
:param ctx: Context
:param config: Configuration
"""
for role, enable in config.iteritems():
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if "mira" in role_remote.name:
serialdev = "ttyS2"
else:
serialdev = "ttyS1"
if enable:
log.info('Enabling kdb on {role}...'.format(role=role))
try:
role_remote.run(
args=[
'echo', serialdev,
run.Raw('|'),
'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc'
])
except run.CommandFailedError:
log.warn('Kernel does not support kdb')
else:
log.info('Disabling kdb on {role}...'.format(role=role))
# Add true pipe so command doesn't fail on kernel without kdb support.
try:
role_remote.run(
args=[
'echo', '',
run.Raw('|'),
'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc',
run.Raw('|'),
'true',
])
except run.CommandFailedError:
log.warn('Kernel does not support kdb')
def wait_for_reboot(ctx, need_install, timeout, distro=False):
"""
Loop reconnecting and checking kernel versions until
they're all correct or the timeout is exceeded.
:param ctx: Context
:param need_install: list of packages that we need to reinstall.
:param timeout: number of second before we timeout.
"""
import time
starttime = time.time()
while need_install:
teuthology.reconnect(ctx, timeout)
for client in need_install.keys():
if 'distro' in need_install[client]:
distro = True
log.info('Checking client {client} for new kernel version...'.format(client=client))
try:
if distro:
assert not need_to_install_distro(ctx, client), \
'failed to install new distro kernel version within timeout'
else:
assert not need_to_install(ctx, client, need_install[client]), \
'failed to install new kernel version within timeout'
del need_install[client]
except Exception:
log.exception("Saw exception")
# ignore connection resets and asserts while time is left
if time.time() - starttime > timeout:
raise
time.sleep(1)
def need_to_install_distro(ctx, role):
"""
Installing kernels on rpm won't setup grub/boot into them. This installs
the newest kernel package and checks its version and compares against
current (uname -r) and returns true if newest != current. Similar check
for deb.
"""
(role_remote,) = ctx.cluster.only(role).remotes.keys()
package_type = role_remote.os.package_type
output, err_mess = StringIO(), StringIO()
role_remote.run(args=['uname', '-r'], stdout=output)
current = output.getvalue().strip()
if package_type == 'rpm':
role_remote.run(args=['sudo', 'yum', 'install', '-y', 'kernel'],
stdout=output)
if 'Nothing to do' in output.getvalue():
err_mess.truncate(0)
role_remote.run(args=['echo', 'no', run.Raw('|'), 'sudo', 'yum',
'reinstall', 'kernel', run.Raw('||'),
'true'], stderr=err_mess)
if 'Skipping the running kernel' in err_mess.getvalue():
# Current running kernel is already newest and updated
log.info('Newest distro kernel already installed/running')
return False
else:
role_remote.run(args=['sudo', 'yum', 'reinstall', '-y',
'kernel', run.Raw('||'), 'true'])
# reset stringIO output.
output.truncate(0)
role_remote.run(args=['rpm', '-q', 'kernel', '--last'], stdout=output)
for kernel in output.getvalue().split():
if kernel.startswith('kernel'):
if 'ceph' not in kernel:
newest = kernel.split('kernel-')[1]
break
if package_type == 'deb':
distribution = role_remote.os.name
newest = get_latest_image_version_deb(role_remote, distribution)
output.close()
err_mess.close()
if current in newest:
return False
log.info(
'Not newest distro kernel. Curent: {cur} Expected: {new}'.format(
cur=current, new=newest))
return True
def maybe_generate_initrd_rpm(remote, path, version):
"""
Generate initrd with mkinitrd if the hooks that should make it
happen on its own aren't there.
:param path: rpm package path
:param version: kernel version to generate initrd for
e.g. 3.18.0-rc6-ceph-00562-g79a9fa5
"""
proc = remote.run(
args=[
'rpm',
'--scripts',
'-qp',
path,
],
stdout=StringIO())
out = proc.stdout.getvalue()
if 'bin/installkernel' in out or 'bin/kernel-install' in out:
return
log.info("No installkernel or kernel-install hook in %s, "
"will generate initrd for %s", path, version)
remote.run(
args=[
'sudo',
'mkinitrd',
'--allow-missing',
'-f', # overwrite existing initrd
'/boot/initramfs-' + version + '.img',
version,
])
def install_kernel(remote, path=None):
"""
A bit of misnomer perhaps - the actual kernel package is installed
elsewhere, this function deals with initrd and grub. Currently the
following cases are handled:
- local, gitbuilder, distro for rpm packages
- distro for deb packages - see TODO in install_and_reboot()
TODO: reboots should be issued from install_and_reboot()
:param path: package path (for local and gitbuilder cases)
"""
package_type = remote.os.package_type
if package_type == 'rpm':
if path:
version = get_image_version(remote, path)
# This is either a gitbuilder or a local package and both of these
# could have been built with upstream rpm targets with specs that
# don't have a %post section at all, which means no initrd.
maybe_generate_initrd_rpm(remote, path, version)
else:
version = get_latest_image_version_rpm(remote)
update_grub_rpm(remote, version)
remote.run( args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
if package_type == 'deb':
distribution = remote.os.name
newversion = get_latest_image_version_deb(remote, distribution)
if 'ubuntu' in distribution:
grub2conf = teuthology.get_file(remote, '/boot/grub/grub.cfg', True)
submenu = ''
menuentry = ''
for line in grub2conf.split('\n'):
if 'submenu' in line:
submenu = line.split('submenu ')[1]
# Ubuntu likes to be sneaky and change formatting of
# grub.cfg between quotes/doublequotes between versions
if submenu.startswith("'"):
submenu = submenu.split("'")[1]
if submenu.startswith('"'):
submenu = submenu.split('"')[1]
if 'menuentry' in line:
if newversion in line and 'recovery' not in line:
menuentry = line.split('\'')[1]
break
if submenu:
grubvalue = submenu + '>' + menuentry
else:
grubvalue = menuentry
grubfile = 'cat <<EOF\nset default="' + grubvalue + '"\nEOF'
teuthology.delete_file(remote, '/etc/grub.d/01_ceph_kernel', sudo=True, force=True)
teuthology.sudo_write_file(remote, '/etc/grub.d/01_ceph_kernel', StringIO(grubfile), '755')
log.info('Distro Kernel Version: {version}'.format(version=newversion))
remote.run(args=['sudo', 'update-grub'])
remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
if 'debian' in distribution:
grub2_kernel_select_generic(remote, newversion, 'deb')
log.info('Distro Kernel Version: {version}'.format(version=newversion))
remote.run( args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
def update_grub_rpm(remote, newversion):
"""
Updates grub file to boot new kernel version on both legacy grub/grub2.
"""
grub='grub2'
# Check if grub2 is isntalled
try:
remote.run(args=['sudo', 'rpm', '-qi', 'grub2'])
except Exception:
grub = 'legacy'
log.info('Updating Grub Version: {grub}'.format(grub=grub))
if grub == 'legacy':
data = ''
#Write new legacy grub entry.
newgrub = generate_legacy_grub_entry(remote, newversion)
for line in newgrub:
data += line + '\n'
temp_file_path = remote.mktemp()
teuthology.sudo_write_file(remote, temp_file_path, StringIO(data), '755')
teuthology.move_file(remote, temp_file_path, '/boot/grub/grub.conf', True)
else:
#Update grub menu entry to new version.
grub2_kernel_select_generic(remote, newversion, 'rpm')
def grub2_kernel_select_generic(remote, newversion, ostype):
"""
Can be used on DEB and RPM. Sets which entry should be boted by entrynum.
"""
if ostype == 'rpm':
grubset = 'grub2-set-default'
mkconfig = 'grub2-mkconfig'
grubconfig = '/boot/grub2/grub.cfg'
if ostype == 'deb':
grubset = 'grub-set-default'
grubconfig = '/boot/grub/grub.cfg'
mkconfig = 'grub-mkconfig'
remote.run(args=['sudo', mkconfig, '-o', grubconfig, ])
grub2conf = teuthology.get_file(remote, grubconfig, True)
entry_num = 0
for line in grub2conf.split('\n'):
if line.startswith('menuentry'):
if newversion in line:
break
entry_num += 1
remote.run(args=['sudo', grubset, str(entry_num), ])
def generate_legacy_grub_entry(remote, newversion):
"""
This will likely need to be used for ceph kernels as well
as legacy grub rpm distros don't have an easy way of selecting
a kernel just via a command. This generates an entry in legacy
grub for a new kernel version using the existing entry as a base.
"""
grubconf = teuthology.get_file(remote, '/boot/grub/grub.conf', True)
titleline = ''
rootline = ''
kernelline = ''
initline = ''
kernelversion = ''
linenum = 0
titlelinenum = 0
#Grab first kernel entry (title/root/kernel/init lines)
for line in grubconf.split('\n'):
if re.match('^title', line):
titleline = line
titlelinenum = linenum
if re.match('(^\s+)root', line):
rootline = line
if re.match('(^\s+)kernel', line):
kernelline = line
for word in line.split(' '):
if 'vmlinuz' in word:
kernelversion = word.split('vmlinuz-')[-1]
if re.match('(^\s+)initrd', line):
initline = line
if (kernelline != '') and (initline != ''):
break
else:
linenum += 1
#insert new entry into grubconfnew list:
linenum = 0
newgrubconf = []
for line in grubconf.split('\n'):
line = line.rstrip('\n')
if linenum == titlelinenum:
newtitle = re.sub(kernelversion, newversion, titleline)
newroot = re.sub(kernelversion, newversion, rootline)
newkernel = re.sub(kernelversion, newversion, kernelline)
newinit = re.sub(kernelversion, newversion, initline)
newgrubconf.append(newtitle)
newgrubconf.append(newroot)
newgrubconf.append(newkernel)
newgrubconf.append(newinit)
newgrubconf.append('')
newgrubconf.append(line)
else:
newgrubconf.append(line)
linenum += 1
return newgrubconf
def get_image_version(remote, path):
"""
Get kernel image version from (rpm or deb) package.
:param path: (rpm or deb) package path
"""
if remote.os.package_type == 'rpm':
proc = remote.run(
args=[
'rpm',
'-qlp',
path
],
stdout=StringIO())
elif remote.os.package_type == 'deb':
proc = remote.run(
args=[
'dpkg-deb',
'-c',
path
],
stdout=StringIO())
else:
raise UnsupportedPackageTypeError(remote)
files = proc.stdout.getvalue()
for file in files.split('\n'):
if '/boot/vmlinuz-' in file:
version = file.split('/boot/vmlinuz-')[1]
break
log.debug("get_image_version: %s", version)
return version
def get_latest_image_version_rpm(remote):
"""
Get kernel image version of the newest kernel rpm package.
Used for distro case.
"""
proc = remote.run(
args=[
'rpm',
'-q',
'kernel',
'--last', # order by install time
], stdout=StringIO())
out = proc.stdout.getvalue()
version = out.split()[0].split('kernel-')[1]
log.debug("get_latest_image_version_rpm: %s", version)
return version
def get_latest_image_version_deb(remote, ostype):
"""
Get kernel image version of the newest kernel deb package.
Used for distro case.
Round-about way to get the newest kernel uname -r compliant version string
from the virtual package which is the newest kenel for debian/ubuntu.
"""
output = StringIO()
newest = ''
# Depend of virtual package has uname -r output in package name. Grab that.
if 'debian' in ostype:
remote.run(args=['sudo', 'apt-get', '-y', 'install',
'linux-image-amd64'], stdout=output)
remote.run(args=['dpkg', '-s', 'linux-image-amd64'], stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
newest = line.split('linux-image-')[1]
output.close()
return newest
# Ubuntu is a depend in a depend.
if 'ubuntu' in ostype:
try:
remote.run(args=['sudo', 'apt-get', '-y', 'install',
'linux-image-current-generic'])
remote.run(args=['dpkg', '-s', 'linux-image-current-generic'],
stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
depends = line.split('Depends: ')[1]
remote.run(args=['sudo', 'apt-get', '-y', 'install',
depends])
remote.run(args=['dpkg', '-s', depends], stdout=output)
except run.CommandFailedError:
# Non precise ubuntu machines (like trusty) don't have
# linux-image-current-generic so use linux-image-generic instead.
remote.run(args=['sudo', 'apt-get', '-y', 'install',
'linux-image-generic'], stdout=output)
remote.run(args=['dpkg', '-s', 'linux-image-generic'],
stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
newest = line.split('linux-image-')[1]
if ',' in newest:
newest = newest.split(',')[0]
output.close()
return newest
def get_sha1_from_pkg_name(path):
"""
Get commit hash (min 7 max 40 chars) from (rpm or deb) package name.
Sample basenames of "make deb-pkg" and "make rpm-pkg" packages
linux-image-3.10.0-ceph-rhdeb-00050-g687d1a5f0083_3.10.0-ceph-rhdeb-00050-g687d1a5f0083-6_amd64.deb
kernel-3.10.0_ceph_rhrpm_00050_g687d1a5f0083-8.x86_64.rpm
Make sure kernel was built with CONFIG_LOCALVERSION_AUTO=y.
:param path: (rpm or deb) package path (only basename is used)
"""
basename = os.path.basename(path)
match = re.search('\d+[-_]g([0-9a-f]{7,40})', basename)
sha1 = match.group(1) if match else None
log.debug("get_sha1_from_pkg_name: %s -> %s -> %s", path, basename, sha1)
return sha1
def remove_old_kernels(ctx):
for remote in ctx.cluster.remotes.keys():
package_type = remote.os.package_type
if package_type == 'rpm':
log.info("Removing old kernels from %s", remote)
args = ['sudo', 'package-cleanup', '-y', '--oldkernels']
remote.run(args=args)
def task(ctx, config):
"""
Make sure the specified kernel is installed.
This can be a branch, tag, or sha1 of ceph-client.git or a local
kernel package.
To install ceph-client.git branch (default: master)::
kernel:
branch: testing
To install ceph-client.git tag::
kernel:
tag: v3.18
To install ceph-client.git sha1::
kernel:
sha1: 275dd19ea4e84c34f985ba097f9cddb539f54a50
To install from a koji build_id::
kernel:
koji: 416058
To install from a koji task_id::
kernel:
koji_task: 9678206
When installing from koji you also need to set the urls for koji hub
and the koji root in your teuthology.yaml config file. These are shown
below with their default values::
kojihub_url: http://koji.fedoraproject.org/kojihub
kojiroot_url: http://kojipkgs.fedoraproject.org/packages
When installing from a koji task_id you also need to set koji_task_url,
which is the base url used to download rpms from koji task results::
koji_task_url: https://kojipkgs.fedoraproject.org/work/
To install local rpm (target should be an rpm system)::
kernel:
rpm: /path/to/appropriately-named.rpm
To install local deb (target should be a deb system)::
kernel:
deb: /path/to/appropriately-named.deb
For rpm: or deb: to work it should be able to figure out sha1 from
local kernel package basename, see get_sha1_from_pkg_name(). This
means that you can't for example install a local tag - package built
with upstream {rpm,deb}-pkg targets won't have a sha1 in its name.
If you want to schedule a run and use a local kernel package, you
have to copy the package over to a box teuthology workers are
running on and specify a path to the package on that box.
All of the above will install a specified kernel on all targets.
You can specify different kernels for each role or for all roles of
a certain type (more specific roles override less specific, see
normalize_config() for details)::
kernel:
client:
tag: v3.0
osd:
branch: btrfs_fixes
client.1:
branch: more_specific
osd.3:
branch: master
To wait 3 minutes for hosts to reboot (default: 300)::
kernel:
timeout: 180
To enable kdb::
kernel:
kdb: true
:param ctx: Context
:param config: Configuration
"""
assert config is None or isinstance(config, dict), \
"task kernel only supports a dictionary for configuration"
timeout = 300
if config is not None and 'timeout' in config:
timeout = config.pop('timeout')
config = normalize_config(ctx, config)
validate_config(ctx, config)
log.info('config %s' % config)
need_install = {} # sha1 to dl, or path to rpm or deb
need_version = {} # utsrelease or sha1
kdb = {}
for role, role_config in config.iteritems():
# gather information about this remote
(role_remote,) = ctx.cluster.only(role).remotes.keys()
system_type, system_ver = role_remote.os.name, role_remote.os.version
if role_config.get('rpm') or role_config.get('deb'):
# We only care about path - deb: vs rpm: is meaningless,
# rpm: just happens to be parsed first. Nothing is stopping
# 'deb: /path/to/foo.rpm' and it will work provided remote's
# os.package_type is 'rpm' and vice versa.
path = role_config.get('rpm')
if not path:
path = role_config.get('deb')
sha1 = get_sha1_from_pkg_name(path)
assert sha1, "failed to extract commit hash from path %s" % path
if need_to_install(ctx, role, sha1):
need_install[role] = path
need_version[role] = sha1
elif role_config.get('sha1') == 'distro':
if need_to_install_distro(ctx, role):
need_install[role] = 'distro'
need_version[role] = 'distro'
elif role_config.get("koji") or role_config.get('koji_task'):
# installing a kernel from koji
build_id = role_config.get("koji")
task_id = role_config.get("koji_task")
if role_remote.os.package_type != "rpm":
msg = (
"Installing a kernel from koji is only supported "
"on rpm based systems. System type is {system_type}."
)
msg = msg.format(system_type=system_type)
log.error(msg)
ctx.summary["failure_reason"] = msg
ctx.summary["status"] = "dead"
raise ConfigError(msg)
# FIXME: this install should probably happen somewhere else
# but I'm not sure where, so we'll leave it here for now.
install_package('koji', role_remote)
if build_id:
# get information about this build from koji
build_info = get_koji_build_info(build_id, role_remote, ctx)
version = "{ver}-{rel}.x86_64".format(
ver=build_info["version"],
rel=build_info["release"]
)
elif task_id:
# get information about results of this task from koji
task_result = get_koji_task_result(task_id, role_remote, ctx)
# this is not really 'build_info', it's a dict of information
# about the kernel rpm from the task results, but for the sake
# of reusing the code below I'll still call it that.
build_info = get_koji_task_rpm_info(
'kernel',
task_result['rpms']
)
# add task_id so we can know later that we're installing
# from a task and not a build.
build_info["task_id"] = task_id
version = build_info["version"]
if need_to_install(ctx, role, version):
need_install[role] = build_info
need_version[role] = version
else:
package_type = role_remote.os.package_type
larch = role_remote.arch
if package_type == 'rpm':
if '.' in system_ver:
system_ver = system_ver.split('.')[0]
ldist = '{system_type}{system_ver}'.format(system_type=system_type, system_ver=system_ver)
if package_type == 'deb':
system_ver = role_remote.os.codename
ldist = '{system_ver}'.format(system_ver=system_ver)
sha1, base_url = teuthology.get_ceph_binary_url(
package='kernel',
branch=role_config.get('branch'),
tag=role_config.get('tag'),
sha1=role_config.get('sha1'),
flavor='basic',
format=package_type,
dist=ldist,
arch=larch,
)
log.debug('sha1 for {role} is {sha1}'.format(role=role, sha1=sha1))
ctx.summary['{role}-kernel-sha1'.format(role=role)] = sha1
if need_to_install(ctx, role, sha1):
version = sha1
version_url = urlparse.urljoin(base_url, 'version')
try:
version_fp = urllib2.urlopen(version_url)
version = version_fp.read().rstrip('\n')
version_fp.close()
except urllib2.HTTPError:
log.debug('failed to get utsrelease string using url {url}'.format(
url=version_url))
if not version:
raise VersionNotFoundError("{url} is empty!".format(
url=version_url))
need_install[role] = sha1
need_version[role] = version
# enable or disable kdb if specified, otherwise do not touch
if role_config.get('kdb') is not None:
kdb[role] = role_config.get('kdb')
remove_old_kernels(ctx)
if need_install:
install_firmware(ctx, need_install)
download_kernel(ctx, need_install)
install_and_reboot(ctx, need_install)
wait_for_reboot(ctx, need_version, timeout)
enable_disable_kdb(ctx, kdb)
| {
"content_hash": "960c0cb91d3dd67a19644a41afcd0d06",
"timestamp": "",
"source": "github",
"line_count": 1213,
"max_line_length": 109,
"avg_line_length": 36.723000824402305,
"alnum_prop": 0.5289707037826916,
"repo_name": "t-miyamae/teuthology",
"id": "b00bb2d4350eca88b364f0425739e1970863d2c4",
"size": "44545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teuthology/task/kernel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "728821"
},
{
"name": "Shell",
"bytes": "9833"
}
],
"symlink_target": ""
} |
import httplib2
import json
import datetime
import hmac
from hashlib import sha1,md5
import base64
class Mailin:
""" This is the Mailin client class
"""
def __init__(self,base_url,access_key,secret_key):
self.base_url = base_url
self.access_key = access_key
self.secret_key = secret_key
def do_request(self,resource,method,indata):
url = self.base_url + "/" + resource
h = httplib2.Http(".cache", disable_ssl_certificate_validation=True)
# Authorization header
content_type = "application/json"
md5_content = ""
if indata!="":
md5_content = md5(indata).hexdigest()
c_date_time = datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p")
sign_string = method+"\n"+md5_content+"\n"+content_type+"\n"+c_date_time+"\n"+url
hashed = hmac.new(self.secret_key,sign_string.encode('utf8'),sha1)
signature = base64.b64encode(hashed.hexdigest())
r,c = h.request(url,method,body=indata,headers={'X-mailin-date':c_date_time,'content-type':content_type,'Authorization':self.access_key+":"+signature})
return json.loads(c)
def get(self,resource,indata):
return self.do_request(resource,"GET",indata)
def post(self,resource,indata):
return self.do_request(resource,"POST",indata)
def put(self,resource,indata):
return self.do_request(resource,"PUT",indata)
def delete(self,resource,indata):
return self.do_request(resource,"DELETE",indata)
def get_account(self,):
return self.get("account","")
def get_smtp_details(self,):
return self.get("account/smtpdetail","")
def create_child_account(self,email,password,company_org,first_name,last_name,credits,associate_ip):
return self.post("account",json.dumps({"child_email":email,"password":password,"company_org":company_org,"first_name":first_name,"last_name":last_name,"credits":credits,"associate_ip":associate_ip,}))
def update_child_account(self,child_authkey,company_org,first_name,last_name,password,associate_ip,disassociate_ip):
return self.put("account",json.dumps({"auth_key":child_authkey,"company_org":company_org,"first_name":first_name,"last_name":last_name,"password":password,"associate_ip":associate_ip,"disassociate_ip":disassociate_ip}))
def delete_child_account(self,child_authkey):
return self.delete("account/" + child_authkey,"")
def get_child_account(self,child_authkey):
return self.post("account/getchild",json.dumps({"auth_key":child_authkey}))
def add_remove_child_credits(self,child_authkey,add_credits,remove_credits):
return self.post("account/addrmvcredit",json.dumps({"auth_key":child_authkey,"add_credit":add_credits,"rmv_credit":remove_credits}))
def send_sms(self,to,from_name,text,web_url,tag,type):
return self.post("sms",json.dumps({"text":text,"tag":tag,"web_url":web_url,"from":from_name,"to":to,"type":type}))
def create_sms_campaign(self,camp_name,sender,content,bat_sent,listids,exclude_list,scheduled_date):
return self.post("sms",json.dumps({"name":camp_name,"sender":sender,"content":content,"bat":bat_sent,"listid":listids,"exclude_list":exclude_list,"scheduled_date":scheduled_date}))
def update_sms_campaign(self,id,camp_name,sender,content,bat_sent,listids,exclude_list,scheduled_date):
return self.put("sms/" + str(id),json.dumps({"name":camp_name,"sender":sender,"content":content,"bat":bat_sent,"listid":listids,"exclude_list":exclude_list,"scheduled_date":scheduled_date}))
def send_bat_sms(self,campid,mobilephone):
return self.get("sms/" + str(campid),json.dumps({"to":mobilephone}))
def get_campaigns(self,type,status,page,page_limit):
if type == "" and status == "" and page == "" and page_limit == "":
return self.get("campaign/","")
else:
return self.get("campaign/type/" + type + "/status/" + status + "/page/" + page + "/page_limit/" + page_limit + "/","")
def get_campaign(self,id):
return self.get("campaign/" + str(id),"")
def create_campaign(self,category,from_name,name,bat_sent,html_content,html_url,listid,scheduled_date,subject,from_email,reply_to,to_field,exclude_list,attachmentUrl,inline_image):
return self.post("campaign",json.dumps({"category":category,"from_name":from_name,"name":name,"bat_sent":bat_sent,"html_content":html_content,"html_url":html_url,"listid":listid,"scheduled_date":scheduled_date,"subject":subject,"from_email":from_email,"reply_to":reply_to,"to_field":to_field,"exclude_list":exclude_list,"attachment_url":attachmentUrl,"inline_image":inline_image}))
def delete_campaign(self,id):
return self.delete("campaign/" + str(id),"")
def update_campaign(self,id,category,from_name,name,bat_sent,html_content,html_url,listid,scheduled_date,subject,from_email,reply_to,to_field,exclude_list,attachmentUrl,inline_image):
return self.put("campaign/" + str(id),json.dumps({"category":category,"from_name":from_name,"name":name,"bat_sent":bat_sent,"html_content":html_content,"html_url":html_url,"listid":listid,"scheduled_date":scheduled_date,"subject":subject,"from_email":from_email,"reply_to":reply_to,"to_field":to_field,"exclude_list":exclude_list,"attachment_url":attachmentUrl,"inline_image":inline_image}))
def campaign_report_email(self,id,lang,email_subject,email_to,email_content_type,email_bcc,email_cc,email_body):
return self.post("campaign/" + str(id) + "/report",json.dumps({"lang":lang,"email_subject":email_subject,"email_to":email_to,"email_content_type":email_content_type,"email_bcc":email_bcc,"email_cc":email_cc,"email_body":email_body}))
def campaign_recipients_export(self,id,notify_url,type):
return self.post("campaign/" + str(id) + "/recipients",json.dumps({"notify_url":notify_url,"type":type}))
def send_bat_email(self,campid,email_to):
return self.post("campaign/" + str(campid) + "/test",json.dumps({"emails":email_to}))
def create_trigger_campaign(self,category,from_name,name,bat_sent,html_content,html_url,listid,scheduled_date,subject,from_email,reply_to,to_field,exclude_list,recurring,attachmentUrl,inline_image):
return self.post("campaign",json.dumps({"category":category,"from_name":from_name,"trigger_name":name,"bat":bat_sent,"html_content":html_content,"html_url":html_url,"listid":listid,"scheduled_date":scheduled_date,"subject":subject,"from_email":from_email,"reply_to":reply_to,"to_field":to_field,"exclude_list":exclude_list,"recurring":recurring,"attachment_url":attachmentUrl,"inline_image":inline_image}))
def update_trigger_campaign(self,id,category,from_name,name,bat_sent,html_content,html_url,listid,scheduled_date,subject,from_email,reply_to,to_field,exclude_list,recurring,attachmentUrl,inline_image):
return self.put("campaign/" + str(id),json.dumps({"category":category,"from_name":from_name,"trigger_name":name,"bat":bat_sent,"html_content":html_content,"html_url":html_url,"listid":listid,"scheduled_date":scheduled_date,"subject":subject,"from_email":from_email,"reply_to":reply_to,"to_field":to_field,"exclude_list":exclude_list,"recurring":recurring,"attachment_url":attachmentUrl,"inline_image":inline_image}))
def campaign_share_link(self,campaign_ids):
return self.post("campaign/sharelink",json.dumps({"camp_ids":campaign_ids}))
def update_campaign_status(self,id,status):
return self.put("campaign/" + str(id) + "/updatecampstatus",json.dumps({"status":status}))
def get_processes(self,):
return self.get("process","")
def get_process(self,id):
return self.get("process/" + str(id),"")
def get_lists(self,):
return self.get("list","")
def get_list(self,id):
return self.get("list/" + str(id),"")
def create_list(self,list_name,list_parent):
return self.post("list",json.dumps({"list_name":list_name,"list_parent":list_parent}))
def delete_list(self,id):
return self.delete("list/" + str(id),"")
def update_list(self,id,list_name,list_parent):
return self.put("list/" + str(id),json.dumps({"list_name":list_name,"list_parent":list_parent}))
def add_users_list(self,id,users):
return self.post("list/" + str(id) + "/users",json.dumps({"users":users}))
def delete_users_list(self,id,users):
return self.delete("list/" + str(id) + "/delusers",json.dumps({"users":users}))
def send_email(self,to,subject,from_name,html,text,cc,bcc,replyto,attachment,headers):
return self.post("email",json.dumps({"cc":cc,"text":text,"bcc":bcc,"replyto":replyto,"html":html,"to":to,"attachment":attachment,"from":from_name,"subject":subject,"headers":headers}))
def get_webhooks(self,):
return self.get("webhook","")
def get_webhook(self,id):
return self.get("webhook/" + str(id),"")
def create_webhook(self,url,description,events):
return self.post("webhook",json.dumps({"url":url,"description":description,"events":events}))
def delete_webhook(self,id):
return self.delete("webhook/" + str(id),"")
def update_webhook(self,id,url,description,events):
return self.put("webhook/" + str(id),json.dumps({"url":url,"description":description,"events":events}))
def get_statistics(self,aggregate,tag,days,end_date,start_date):
return self.post("statistics",json.dumps({"aggregate":aggregate,"tag":tag,"days":days,"end_date":end_date,"start_date":start_date}))
def get_user(self,id):
return self.get("user/" + id,"")
def create_user(self,attributes,blacklisted,email,listid):
return self.post("user",json.dumps({"attributes":attributes,"blacklisted":blacklisted,"email":email,"listid":listid}))
def delete_user(self,id):
return self.delete("user/" + id,"")
def update_user(self,id,attributes,blacklisted,listid,listid_unlink):
return self.put("user/" + id,json.dumps({"attributes":attributes,"blacklisted":blacklisted,"listid":listid,"listid_unlink":listid_unlink}))
def import_users(self,url,listids,notify_url,name,folder_id):
return self.post("user/import",json.dumps({"url":url,"listids":listids,"notify_url":notify_url,"name":name,"list_parent":folder_id}))
def export_users(self,export_attrib,filter,notify_url):
return self.post("user/export",json.dumps({"export_attrib":export_attrib,"filter":filter,"notify_url":notify_url}))
def create_update_user(self,email,attributes,blacklisted,listid,listid_unlink,blacklisted_sms):
return self.post("user/createdituser",json.dumps({"email":email,"attributes":attributes,"blacklisted":blacklisted,"listid":listid,"listid_unlink":listid_unlink,"blacklisted_sms":blacklisted_sms}))
def get_attributes(self,):
return self.get("attribute","")
def get_attribute(self,type):
return self.get("attribute/" + type,"")
def create_attribute(self,type,data):
return self.post("attribute",json.dumps({"type":type,"data":data}))
def delete_attribute(self,type,data):
return self.post("attribute/" + type,json.dumps({"data":data}))
def get_report(self,limit,start_date,end_date,offset,date,days,email):
return self.post("report",json.dumps({"limit":limit,"start_date":start_date,"end_date":end_date,"offset":offset,"date":date,"days":days,"email":email}))
def get_folders(self,):
return self.get("folder","")
def get_folder(self,id):
return self.get("folder/" + str(id),"")
def create_folder(self,name):
return self.post("folder",json.dumps({"name":name}))
def delete_folder(self,id):
return self.delete("folder/" + str(id),"")
def update_folder(self,id,name):
return self.put("folder/" + str(id),json.dumps({"name":name}))
def delete_bounces(self,start_date,end_date,email):
return self.post("bounces",json.dumps({"start_date":start_date,"end_date":end_date,"email":email}))
def send_transactional_template(self,id,to,cc,bcc,attr,attachmentUrl,attachment):
return self.put("template/" + str(id),json.dumps({"cc":cc,"to":to,"attr":attr,"bcc":bcc,"attachment_url":attachmentUrl,"attachment":attachment}))
def create_template(self,from_name,name,bat_sent,html_content,html_url,subject,from_email,reply_to,to_field,status,attach):
return self.post("template",json.dumps({"from_name":from_name,"template_name":name,"bat":bat_sent,"html_content":html_content,"html_url":html_url,"subject":subject,"from_email":from_email,"reply_to":reply_to,"to_field":to_field,"status":status,"attachment":attach}))
def update_template(self,id,from_name,name,bat_sent,html_content,html_url,subject,from_email,reply_to,to_field,status,attach):
return self.put("template/" + str(id),json.dumps({"from_name":from_name,"template_name":name,"bat":bat_sent,"html_content":html_content,"html_url":html_url,"subject":subject,"from_email":from_email,"reply_to":reply_to,"to_field":to_field,"status":status,"attachment":attach}))
def get_senders(self,option):
return self.get("advanced",json.dumps({"option":option}))
def create_sender(self,sender_name,sender_email,ip_domain):
return self.post("advanced",json.dumps({"name":sender_name,"email":sender_email,"ip_domain":ip_domain}))
def update_sender(self,id,sender_name,sender_email,ip_domain):
return self.put("advanced/" + str(id),json.dumps({"name":sender_name,"email":sender_email,"ip_domain":ip_domain}))
def delete_sender(self,id):
return self.delete("advanced/" + str(id),"") | {
"content_hash": "97cb56435592b80cd96e0bf1b3a43625",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 420,
"avg_line_length": 78.46706586826348,
"alnum_prop": 0.7154304029304029,
"repo_name": "mailin-api/mailin-api-python",
"id": "9201e70584a0fb1d73ee4b0ae6d79dc6222827e8",
"size": "13104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "V1.0/mailin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66735"
}
],
"symlink_target": ""
} |
# Copyright (c) 2015 Metaswitch Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from twisted.internet import reactor
from twisted.web import server, resource
import json
import logging
import logging.handlers
import sys
import socket
import os
from docker import Client
from netaddr import IPAddress, AddrFormatError
import netns
from ipam import SequentialAssignment, IPAMClient
_log = logging.getLogger(__name__)
ENV_IP = "CALICO_IP"
ENV_PROFILE = "CALICO_PROFILE"
ORCHESTRATOR_ID = "docker"
hostname = socket.gethostname()
LISTEN_PORT = 2378
def setup_logging(logfile):
_log.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s [%(levelname)s] %(filename)s.%(name)s %(lineno)d: '
'%(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
_log.addHandler(handler)
handler = logging.handlers.TimedRotatingFileHandler(logfile,
when='D',
backupCount=10)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
_log.addHandler(handler)
# Propagate to loaded modules
netns.setup_logging(logfile)
class AdapterResource(resource.Resource):
isLeaf = True
def __init__(self):
resource.Resource.__init__(self)
# Init a Docker client, to save having to do so every time a request
# comes in. We need to get the correct Docker socket to use. If
# POWERSTRIP_UNIX_SOCKET is YES, that means Powerstrip has bound to
# the default docker.socket and we should bind to the "real" Docker
# socket to bypass Powerstrip and avoid request loops.
docker_host = os.environ.get('DOCKER_HOST')
enable_unix_socket = os.environ.get('POWERSTRIP_UNIX_SOCKET', "")
if docker_host is None:
# Default to assuming we've got a Docker socket bind-mounted into a
# container we're running in.
if "YES" in enable_unix_socket:
docker_host = "unix:///host-var-run/docker.real.sock"
else:
docker_host = "unix:///host-var-run/docker.sock"
if "://" not in docker_host:
docker_host = "tcp://" + docker_host
self.docker = Client(base_url=docker_host,
version="1.16")
# Init an etcd client.
self.datastore = IPAMClient()
def render_POST(self, request):
"""
Handle a pre-hook.
"""
_log.info("render_POST called with %s", request)
try:
request_content = json.loads(request.content.read())
if request_content["Type"] == "pre-hook":
result = self._handle_pre_hook(request, request_content)
elif request_content["Type"] == "post-hook":
result = self._handle_post_hook(request, request_content)
else:
_log.error("Unsupported hook type: %s",
request_content["Type"])
raise Exception("unsupported hook type %s" %
(request_content["Type"],))
_log.debug("Result: %s", result)
return result
except:
_log.exception("Failed to process POST")
raise
def _handle_pre_hook(self, request, request_content):
_log.info("Handling pre-hook")
# Exceptions hang the Reactor, so ensure we handle all exceptions.
client_request = {}
try:
client_request = request_content["ClientRequest"]
if _calico_ip_in_request(client_request):
# Calico IP was defined in the request, so override the net
# portion of the HostConfig
_client_request_net_none(client_request)
except BaseException:
_log.exception("Unexpected error handling pre-hook")
finally:
return json.dumps({"PowerstripProtocolVersion": 1,
"ModifiedClientRequest": client_request})
def _handle_post_hook(self, request, request_content):
_log.debug("Post-hook response: %s", request_content)
# Exceptions hang the Reactor, so ensure we handle all exceptions.
server_response = {}
try:
# Extract ip, profile, master, docker_options
client_request = request_content["ClientRequest"]
server_response = request_content["ServerResponse"]
request_uri = client_request['Request']
request_path = request_uri.split('/')
# Extract the container ID or name and request type.
# TODO better URI parsing
(_, version, _, cid_or_name, ctype) = request_uri.split("/", 4)
_log.info("Request parameters: version:%s; cid:%s; ctype:%s",
version, cid_or_name, ctype)
# Get the actual container ID, the URL may contain the name or
# a short ID.
cont = self.docker.inspect_container(cid_or_name)
_log.debug("Container info: %s", cont)
cid = cont["Id"]
_log.debug("Container ID: %s", cid)
if ctype == u'start':
# /version/containers/id/start
_log.debug('Intercepted container start request')
self._install_or_reinstall_endpoints(client_request, cont, cid)
elif ctype== 'json':
# /version/containers/*/json
_log.debug('Intercepted container json request')
self._update_container_info(cid, server_response)
else:
_log.debug('Unrecognized path: %s', request_path)
except BaseException:
_log.exception('Unexpected error handling post-hook.')
finally:
output = json.dumps({
"PowerstripProtocolVersion": 1,
"ModifiedServerResponse": server_response
})
_log.debug("Returning output:\n%s", output)
return output
def _install_or_reinstall_endpoints(self, client_request, cont, cid):
"""
Install or reinstall Calico endpoints based on whether we are
restarting a container.
- :param client_request: Powerstrip ClientRequest object as dictionary
from JSON.
:param cont: The Docker container dictionary.
:param cid: The ID of the container to install an endpoint in.
:returns: None
"""
# Grab the running pid from Docker
pid = cont["State"]["Pid"]
_log.debug('Container PID: %s', pid)
# Grab the list of endpoints, if they exist.
eps = self.datastore.get_endpoints(hostname=hostname, workload_id=cid)
if len(eps) == 0:
self._install_endpoint(client_request, cont, cid, pid)
else:
self._reinstall_endpoints(cid, pid, eps)
return
def _install_endpoint(self, client_request, cont, cid, pid):
"""
Install a Calico endpoint (veth) in the container referenced in the
client request object.
- :param client_request: Powerstrip ClientRequest object as dictionary
from JSON.
:param cont: The Docker container dictionary.
:param cid: The ID of the container to install an endpoint in.
:param pid: The PID of the container process.
:returns: None
"""
try:
_log.debug("Installing endpoint for cid %s", cid)
# Attempt to parse out environment variables
env_list = cont["Config"]["Env"]
env_list = env_list if env_list is not None else []
env_dict = env_to_dictionary(env_list)
ip_str = env_dict[ENV_IP]
profile = env_dict.get(ENV_PROFILE, None)
except KeyError as e:
# This error is benign for missing ENV_IP, since it means not to
# set up Calico networking for this container.
_log.info("Key error %s, request: %s", e, client_request)
return
# Just auto assign ipv4 addresses for now.
if ip_str.lower() == "auto":
ip = self.assign_ipv4()
else:
try:
ip = IPAddress(ip_str)
except AddrFormatError:
_log.warning("IP address %s could not be parsed" % ip_str)
return
else:
version = "v%s" % ip.version
_log.debug('Attempting to assign IP%s address %s', version, ip)
pools = self.datastore.get_ip_pools(version)
pool = None
for candidate_pool in pools:
if ip in candidate_pool:
pool = candidate_pool
_log.debug('Using IP pool %s', pool)
break
if not pool:
_log.warning("Requested IP %s isn't in any configured "
"pool. Container %s", ip, cid)
return
if not self.datastore.assign_address(pool, ip):
_log.warning("IP address couldn't be assigned for "
"container %s, IP=%s", cid, ip)
next_hop_ips = self.datastore.get_default_next_hops(hostname)
endpoint = netns.set_up_endpoint(ip=ip,
hostname=hostname,
orchestrator_id=ORCHESTRATOR_ID,
cpid=pid,
next_hop_ips=next_hop_ips)
if profile is not None:
if not self.datastore.profile_exists(profile):
_log.info("Autocreating profile %s", profile)
self.datastore.create_profile(profile)
_log.info("Adding container %s to profile %s", cid, profile)
endpoint.profile_ids = [profile]
_log.info("Finished adding container %s to profile %s",
cid, profile)
self.datastore.set_endpoint(hostname, cid, endpoint)
_log.info("Finished network for container %s, IP=%s", cid, ip)
return
def _reinstall_endpoints(self, cid, pid, eps):
"""
Install a Calico endpoint (veth) in the container referenced in the
client request object.
:param cid: The ID of the container to install an endpoint in.
:param pid: The PID of the container process.
:param eps: The container endpoints.
:returns: None
"""
_log.debug("Re-install endpoints for container %s", cid)
next_hop_ips = self.datastore.get_default_next_hops(hostname)
for old_endpoint in eps:
new_endpoint = netns.reinstate_endpoint(pid, old_endpoint,
next_hop_ips)
self.datastore.update_endpoint(new_endpoint)
_log.info("Finished network for container %s", cid)
return
def _update_container_info(self, cid, server_response):
"""
Update the response for a */container/*/json (docker inspect) request.
Since we've patched the docker networking using --net=none,
docker inspect calls will not return any IP information. This is
required for some orchestrators (such as Kubernetes).
Insert the IP for this container into the config dict.
:param cid: The ID of the container to install an endpoint in.
:param server_response: The response from the Docker API, to be
be updated.
"""
_log.debug('Getting container config from etcd')
try:
# Get a single endpoint ID from the container, and use this to
# get the Endpoint.
ep_id = self.datastore.get_endpoint_id_from_cont(hostname, cid)
ep = self.datastore.get_endpoint(endpoint_id=ep_id)
except KeyError:
_log.info('No workload found for container %s, '
'returning request unmodified.', cid)
return
_log.debug('Pre-load body:\n%s', server_response["Body"])
# Tweak the contents of the NetworkSettings dictionary in the request
# body. We use an arbitrary IPv4 / IPv6 address from the endpoint
# network sets to fill in the IP information since the dictionary only
# allows a single value for each.
body = json.loads(server_response["Body"])
net_settings = body['NetworkSettings']
for ipv4_net in ep.ipv4_nets:
if ipv4_net.prefixlen == 32:
net_settings['IPAddress'] = str(ipv4_net.ip)
break
for ipv6_net in ep.ipv6_nets:
if ipv6_net.prefixlen == 128:
net_settings['GlobalIPv6Address'] = str(ipv6_net.ip)
break
net_settings["MacAddress"] = str(ep.mac)
server_response['Body'] = json.dumps(body, separators=(',', ':'))
_log.debug('Post-load body:\n%s', server_response["Body"])
def assign_ipv4(self):
"""
Assign a IPv4 address from the configured pools.
:return: An IPAddress, or None if an IP couldn't be
assigned
"""
ip = None
# For each configured pool, attempt to assign an IP before giving up.
for pool in self.datastore.get_ip_pools("v4"):
assigner = SequentialAssignment()
ip = assigner.allocate(pool)
if ip is not None:
ip = IPAddress(ip)
break
return ip
def _calico_ip_in_request(client_request):
"""
Examine a ClientRequest object to determine whether the ENV_IP environment
variable is present.
We don't set up Calico networking for container requests if the ENV_IP
variable is absent.
:param client_request:
:return: True if ENV_IP variable is defined, False otherwise.
"""
try:
# Body is passed as a string, so deserialize it to JSON.
body = json.loads(client_request["Body"])
env = body["Env"]
except KeyError:
_log.warning("Client request object had no 'Env' in 'Body': %s",
client_request)
return False
_log.info("Request Env: %s", env)
# env is a list of strings of the form 'VAR=value'. We want an exact match
# on our VAR, so search for it including the = sign at the beginning of the
# string. (Should be faster than compiling a regex and avoids the
# dependency).
search = ENV_IP + "="
for line in env:
if line.startswith(search):
return True
return False
def _client_request_net_none(client_request):
"""
Modify the client_request in place to set net=None Docker option.
:param client_request: Powerstrip ClientRequest object as dictionary from
JSON
:return: None
"""
try:
# Body is passed as a string, so deserialize it to JSON.
body = json.loads(client_request["Body"])
host_config = body["HostConfig"]
_log.debug("Original NetworkMode: %s",
host_config.get("NetworkMode", "<unset>"))
host_config["NetworkMode"] = "none"
# Re-serialize the updated body.
client_request["Body"] = json.dumps(body)
except KeyError as e:
_log.warning("Error setting net=none: %s, request was %s",
e, client_request)
def get_adapter():
root = resource.Resource()
root.putChild("calico-adapter", AdapterResource())
site = server.Site(root)
return site
def env_to_dictionary(env_list):
"""
Parse the environment variables into a dictionary for easy access.
:param env_list: list of strings in the form "var=value"
:return: a dictionary {"var": "value"}
"""
env_dict = {}
for pair in env_list:
(var, value) = pair.split("=", 1)
env_dict[var] = value
return env_dict
if __name__ == "__main__":
setup_logging("/var/log/calico/powerstrip-calico.log")
# Listen only on the loopback so we don't expose the adapter outside the
# host.
reactor.listenTCP(LISTEN_PORT, get_adapter(), interface="127.0.0.1")
reactor.run()
| {
"content_hash": "ab50a487435c6d56f1315f1690f5ab8f",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 79,
"avg_line_length": 38.68493150684932,
"alnum_prop": 0.5810316336166195,
"repo_name": "frostynova/calico-docker",
"id": "3dd606dc757b0bfface29ec54c75ddbacb3cc8f1",
"size": "16944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calico_containers/adapter/powerstrip_calico.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "268760"
},
{
"name": "Shell",
"bytes": "6283"
}
],
"symlink_target": ""
} |
import pytest
import yaml
from pathlib2 import Path
from rackattack.physical.alloc.priority import Priority, OutOfResourcesError
from rackattack.physical.alloc.freepool import FreePool
from rackattack.physical.host import Host
from rackattack.common.hosts import Hosts
from rackattack.common import globallock
DATA_PATH = "rackattack/physical/provider_tests/data/"
def _get_data(request, data_type):
data_dir = Path(DATA_PATH)
result = None
for file in data_dir.iterdir():
parts = file.stem.split('__')
test_name = request.node.name.split('test_')[-1]
test_name = test_name[:test_name.index('[')] if '[' in test_name else test_name
if parts[0] == data_type:
if parts[1] == test_name or (parts[1] == 'default' and result is None):
result = yaml.safe_load(file.read_text())
return result
@pytest.fixture
def requirements(request):
_requirements = _get_data(request, "requirements")
return _requirements
@pytest.fixture
def allocation_info(request):
_allocation_info = _get_data(request, "allocation_info")
return _allocation_info
@pytest.fixture
def freePool(request, hosts, monkeypatch):
monkeypatch.setattr(globallock, "assertLocked", lambda: True)
_freePool = FreePool(hosts)
for host in hosts.all():
monkeypatch.setattr(host, 'setDestroyCallback', lambda x: None, raising=False)
_freePool.put(host)
return _freePool
@pytest.fixture
def allocations(request):
_allocations = []
return _allocations
@pytest.fixture
def hosts(request):
_hosts = _get_data(request, "hosts")
_result = Hosts()
for _host in _hosts['HOSTS']:
instance = Host(len(_result.all()), **_host)
instance._hostImplementation = instance
_result.add(instance)
return _result
@pytest.fixture
def priority(request, requirements, allocation_info, freePool, allocations, hosts):
if 'negative' in request.node.name:
with pytest.raises(OutOfResourcesError):
Priority(requirements, allocation_info, freePool, allocations, hosts)
return
_priority = Priority(requirements, allocation_info, freePool, allocations, hosts)
return _priority
| {
"content_hash": "4741790e662edc670815fffa2ddd0933",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 87,
"avg_line_length": 30.84722222222222,
"alnum_prop": 0.6915803692030617,
"repo_name": "Stratoscale/rackattack-physical",
"id": "150cbab55b450a48311440e7c30d9981762c5c79",
"size": "2221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rackattack/physical/provider_tests/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1851"
},
{
"name": "M4",
"bytes": "688"
},
{
"name": "Makefile",
"bytes": "8407"
},
{
"name": "Python",
"bytes": "232666"
},
{
"name": "Shell",
"bytes": "6319"
}
],
"symlink_target": ""
} |
from nose2 import session
from nose2.plugins import prof
from nose2.events import StartTestRunEvent
from nose2.tests._common import Stub, TestCase
class TestProfPlugin(TestCase):
tags = ['unit']
def setUp(self):
self.plugin = prof.Profiler(session=session.Session())
self.hotshot = prof.hotshot
self.stats = prof.stats
prof.hotshot = Stub()
prof.stats = Stub()
def tearDown(self):
prof.hotshot = self.hotshot
prof.stats = self.stats
def test_startTestRun_sets_executeTests(self):
_prof = Stub()
_prof.runcall = object()
prof.hotshot.Profile = lambda filename: _prof
event = StartTestRunEvent(runner=None, suite=None, result=None,
startTime=None, executeTests=None)
self.plugin.startTestRun(event)
assert event.executeTests is _prof.runcall, \
"executeTests was not replaced"
| {
"content_hash": "1cafcafdc28796aa2d33b0b873a295d1",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 32.62068965517241,
"alnum_prop": 0.6405919661733616,
"repo_name": "little-dude/nose2",
"id": "8455d56dd36317931d826f72648473c73e5d6d9a",
"size": "946",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "nose2/tests/unit/test_prof_plugin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "142"
},
{
"name": "Python",
"bytes": "449819"
},
{
"name": "Shell",
"bytes": "1125"
}
],
"symlink_target": ""
} |
from os.path import join, split
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random',parent_package,top_path)
def generate_libraries(ext, build_dir):
config_cmd = config.get_config_cmd()
libs = get_mathlibs()
tc = testcode_wincrypt()
if config_cmd.try_run(tc):
libs.append('Advapi32')
ext.libraries.extend(libs)
return None
libs = []
# Configure mtrand
config.add_extension('mtrand',
sources=[join('mtrand', x) for x in
['mtrand.c', 'randomkit.c', 'initarray.c',
'distributions.c']]+[generate_libraries],
libraries=libs,
depends = [join('mtrand','*.h'),
join('mtrand','*.pyx'),
join('mtrand','*.pxi'),
]
)
config.add_data_files(('.', join('mtrand', 'randomkit.h')))
config.add_data_dir('tests')
return config
def testcode_wincrypt():
return """\
/* check to see if _WIN32 is defined */
int main(int argc, char *argv[])
{
#ifdef _WIN32
return 0;
#else
return 1;
#endif
}
"""
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| {
"content_hash": "be6f33bc9b1a5706fecf89154e93c4d8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 76,
"avg_line_length": 30.020408163265305,
"alnum_prop": 0.5159755268524813,
"repo_name": "houseind/robothon",
"id": "6ec65ee19be47848c877bbbf50b92dcf74356371",
"size": "1471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GlyphProofer/dist/GlyphProofer.app/Contents/Resources/lib/python2.6/numpy/random/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "291201"
},
{
"name": "C++",
"bytes": "381"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "Objective-C",
"bytes": "45218"
},
{
"name": "Python",
"bytes": "2778792"
},
{
"name": "Shell",
"bytes": "449"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db.models.signals import post_save
from django.dispatch import receiver
from dash.orgs.models import Org
from tracpro.contacts.models import DataField
@receiver(post_save, sender=Org)
def set_visible_data_fields(sender, instance, **kwargs):
"""Hook to update the visible DataFields for an org."""
if hasattr(instance, '_visible_data_fields'):
keys = instance._visible_data_fields.values_list('key', flat=True)
DataField.objects.set_active_for_org(instance, keys)
| {
"content_hash": "81f0cf0630042f9f015bffa0d89292da",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 74,
"avg_line_length": 34,
"alnum_prop": 0.7426470588235294,
"repo_name": "xkmato/tracpro",
"id": "5c17c40080aab37d0a262f0b18f7330e13021752",
"size": "544",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tracpro/orgs_ext/signals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "27726"
},
{
"name": "CoffeeScript",
"bytes": "10296"
},
{
"name": "HTML",
"bytes": "107840"
},
{
"name": "JavaScript",
"bytes": "25237"
},
{
"name": "Makefile",
"bytes": "1962"
},
{
"name": "Python",
"bytes": "406848"
},
{
"name": "SaltStack",
"bytes": "19566"
},
{
"name": "Scheme",
"bytes": "29815"
},
{
"name": "Shell",
"bytes": "205447"
}
],
"symlink_target": ""
} |
'''
Integration Test for one mn host with vm ha, force shutdown and recovery, check host auto connected
@author: SyZhao
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.node_operations as node_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import test_stub
import time
import os
vm = None
ha_vm = None
mn_host = None
def test():
global vm
global ha_vm
global mn_host
ha_vm = test_stub.create_ha_vm()
ha_vm.check()
ha_vm_host = test_lib.lib_get_vm_host(ha_vm.vm)
mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
if len(mn_host) != 1:
test_util.test_fail('MN VM is running on %d host(s)' % len(mn_host))
if ha_vm_host.managementIp != mn_host[0].ip_:
conditions = res_ops.gen_query_conditions('managementIp', '=', mn_host[0].ip_)
host = res_ops.query_resource(res_ops.HOST, conditions)
ha_vm_host.migrate(host[0].uuid)
test_util.test_logger("force shutdown host [%s] that mn vm is running on" % (mn_host[0].ip_))
test_stub.stop_host(mn_host[0], test_lib.all_scenario_config, 'cold')
test_util.test_logger("wait for 20 seconds to see if management node VM starts on another host")
time.sleep(20)
new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
if new_mn_host_ip == "" or new_mn_host_ip == mn_host[0].ip_:
test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host[0].ip_))
count = 60
while count > 0:
new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
if len(new_mn_host) == 1:
test_util.test_logger("management node VM run after its former host down for 30s")
break
elif len(new_mn_host) > 1:
test_util.test_fail("management node VM runs on more than one host after its former host down")
time.sleep(5)
count -= 1
if len(new_mn_host) == 0:
test_util.test_fail("management node VM does not run after its former host down for 30s")
elif len(new_mn_host) > 1:
test_util.test_fail("management node VM runs on more than one host after its former host down")
#node_ops.wait_for_management_server_start(300)
test_stub.wrapper_of_wait_for_management_server_start(600)
conditions = res_ops.gen_query_conditions('managementIp', '=', mn_host[0].ip_)
host = res_ops.query_resource(res_ops.HOST, conditions)
if host[0].status != "Connected":
test_util.test_fail("Target host:%s is not connected as expected." %(host[0].uuid))
ha_vm.destroy()
test_util.test_pass('Create VM Test Success')
#Will be called what ever test result is
def env_recover():
test_util.test_logger("recover host: %s" % (mn_host[0].ip_))
test_stub.recover_host(mn_host[0], test_lib.all_scenario_config, test_lib.deploy_config)
test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)
#Will be called only if exception happens in test().
def error_cleanup():
global vm
global ha_vm
if vm:
try:
vm.destroy()
ha_vm.destroy()
except:
pass
| {
"content_hash": "828a799b8add292032ffa160f74c47a4",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 150,
"avg_line_length": 40.4367816091954,
"alnum_prop": 0.6498010233086982,
"repo_name": "zstackio/zstack-woodpecker",
"id": "6641215a4658358e3107d91245b3677e67b1e8f6",
"size": "3518",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/mn_ha/test_one_mn_host_with_vm_ha_force_stop_auto_connect.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
import sys
import os
import platform
import shutil
import xml.etree.ElementTree
import subprocess
ICU_MIN_VERSION = "59.1"
UNICODE_VERSION = "10.0"
SESHAT_VERSION_MAJOR = 0
SESHAT_VERSION_MINOR = 1
SESHAT_VERSION_PATCH = 0
SESHAT_VERSION = "{}.{}.{}".format(
SESHAT_VERSION_MAJOR,
SESHAT_VERSION_MINOR,
SESHAT_VERSION_PATCH)
options = {
'SESHAT_ICU_BACKEND': False,
'SESHAT_IGNORE_ICU_VERSION': False,
'SESHAT_INFO_FLAGS': '-DSESHAT_BUILD_DATE=\\"`date -u +%Y-%m-%dT%H:%M:%SZ`\\"',
'CXXFLAGS': '-Wall',
}
makefile_template = '''# This file is generated by configure.py
VERSION = {seshat_version}
VERSION_MAJOR = {seshat_version_major}
VERSION_MINOR = {seshat_version_minor}
VERSION_PATCH = {seshat_version_patch}
OBJ = {m_OBJ_LIST}
CXX = {m_CXX}
CXXFLAGS = {m_CXXFLAGS}
SESHAT_INFO_FLAGS = {m_SESHAT_INFO_FLAGS}
export CXX
default: $(OBJ)
\tmkdir -p lib
\t$(CXX) -std=c++11 -shared \
-o lib/libseshat.so.$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_PATCH) \
-Wl,-soname,libseshat.so.$(VERSION) \
$^ -Iinclude
\trm -f lib/libseshat.so.$(VERSION_MAJOR).$(VERSION_MINOR)
\trm -f lib/libseshat.so.$(VERSION_MAJOR)
\trm -f lib/libseshat.so
\tln -s libseshat.so.$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_PATCH) \
lib/libseshat.so.$(VERSION_MAJOR).$(VERSION_MINOR)
\tln -s libseshat.so.$(VERSION_MAJOR).$(VERSION_MINOR) \
lib/libseshat.so.$(VERSION_MAJOR)
\tln -s libseshat.so.$(VERSION_MAJOR) \
lib/libseshat.so
\t$(MAKE) -C tools/
test:
\t$(MAKE) -C tests/
static: $(OBJ)
\tmkdir -p lib
\tar rcs lib/libseshat.a $^
src/info.o: src/info.cpp
\t$(CXX) -std=c++11 $(CXXFLAGS) $(SESHAT_INFO_FLAGS) -c -Iinclude -o $@ $<
src/%.o: src/%.cpp
\t$(CXX) -std=c++11 $(CXXFLAGS) -c -Iinclude -o $@ $<
install:
\tcp -P lib/libseshat.so* /usr/local/lib/
clean:
\trm -f src/*.o
\trm -f src/ucd/*.o
\trm -f src/emoji/*.o
\trm -f src/icu/*.o
\trm -rf lib
\t$(MAKE) -C tools/ -f Makefile clean
\t$(MAKE) -C tests/ -f Makefile clean
'''
obj_list = []
# Object file append functions
def append_obj(cpp_path):
for fname in os.listdir(cpp_path):
(name, ext) = os.path.splitext(fname)
if ext == '.cpp':
obj_list.append(os.path.normpath(cpp_path) + '/' + name + '.o')
def append_src():
append_obj('./src')
def append_ucd():
append_obj('./src/ucd')
append_obj('./src/emoji')
def append_icu():
append_obj('./src/icu')
obj_list.append('src/ucd/normalization_props.o')
obj_list.append('src/ucd/dm.o')
obj_list.append('src/emoji/data.o')
# Detect platform
def detect_platform():
# Get OS is 32bit or 64bit.
# Note that platform.architecture() is not about OS but python interpreter.
arch = platform.architecture()[0]
os_bit = 0
if arch == '64bit':
os_bit = 64
else:
os_bit = 32
if os_bit == 64:
# Exception for cygwin
if sys.platform == 'cygwin':
return
options['CXXFLAGS'] += ' -fPIC'
# Detect compiler
def detect_compiler():
# Get CXX environment variable.
cxx_env = os.getenv('CXX')
if cxx_env != None:
print('CXX environment variable is set as "{}".'.format(cxx_env))
cxx = cxx_env
elif shutil.which('clang++') != None:
cxx = 'clang++'
elif shutil.which('g++') != None:
cxx = 'g++'
else:
print('It seems any C++ compiler installed in this system.')
exit(1)
options['CXX'] = cxx
# Detect ICU version
def detect_icu():
icu_info = {}
if shutil.which('icuinfo') != None:
icuinfo = subprocess.check_output('icuinfo').decode()
icuinfo = icuinfo[:icuinfo.rindex('>')+1]
icuinfo = icuinfo.replace('&', '&')
tree = xml.etree.ElementTree.fromstring(icuinfo)
for e in tree:
icu_info[e.get('name')] = e.text
icu_version = icu_info['version'].split('.')
min_version = ICU_MIN_VERSION.split('.')
uni_version = icu_info['version.unicode'].split('.')
min_uni_version = UNICODE_VERSION.split('.')
if icu_version < min_version:
if options['SESHAT_IGNORE_ICU_VERSION']:
return
print('Seshat requires ICU version {} or later, but version installed your system is {}'.format(ICU_MIN_VERSION, icu_info['version']))
exit(1)
if uni_version < min_uni_version:
if options['SESHAT_IGNORE_ICU_VERSION']:
return
print('Seshat requires ICU which supports Unicode version {} or later, but ICU installed your system supports until {}'.format(UNICODE_VERSION, icu_info['version.unicode']))
else:
print('icuinfo: command not found.')
exit(1)
# Print options
def print_options():
for k, v in options.items():
print('{}={}'.format(k, v))
def print_help():
print('Usage: ./configure.py [--help] <arguments>')
print('Arguments')
print(' --help print this help')
print(' --icu-backend use ICU as backend instead of seshat')
print(' implementation')
print(' --ignore-icu-version')
print(' ignore ICU version check')
exit()
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == '--help':
print_help()
if '--icu-backend' in sys.argv:
options['SESHAT_ICU_BACKEND'] = True
options['CXXFLAGS'] += ' -DSESHAT_ICU_BACKEND'
if '--ignore-icu-version' in sys.argv:
options['SESHAT_IGNORE_ICU_VERSION'] = True
append_src()
if options['SESHAT_ICU_BACKEND'] == True:
detect_icu()
append_icu()
else:
append_ucd()
detect_platform()
detect_compiler()
print_options()
output = makefile_template.format(
seshat_version=SESHAT_VERSION,
seshat_version_major=SESHAT_VERSION_MAJOR,
seshat_version_minor=SESHAT_VERSION_MINOR,
seshat_version_patch=SESHAT_VERSION_PATCH,
m_OBJ_LIST=' '.join(obj_list),
m_CXXFLAGS=options['CXXFLAGS'],
m_CXX=options['CXX'],
m_SESHAT_INFO_FLAGS=options['SESHAT_INFO_FLAGS'])
f = open('Makefile', 'w')
f.write(output)
f.close()
| {
"content_hash": "28c8bc62163362e1de9e1825f817fc49",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 185,
"avg_line_length": 30.028985507246375,
"alnum_prop": 0.5986164736164736,
"repo_name": "hardboiled65/Seshat",
"id": "9bb2293d5e4cdd8bc670444a4709b512a638be9f",
"size": "6296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configure.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "240"
},
{
"name": "C++",
"bytes": "4865890"
},
{
"name": "Makefile",
"bytes": "2365"
},
{
"name": "Python",
"bytes": "88505"
}
],
"symlink_target": ""
} |
import datetime
from oslo.config import cfg
from nova import compute
from nova.compute import flavors
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqa_api
from nova.db.sqlalchemy import models as sqa_models
from nova import exception
from nova.openstack.common import timeutils
from nova import quota
from nova import test
import nova.tests.image.fake
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
class QuotaIntegrationTestCase(test.TestCase):
def setUp(self):
super(QuotaIntegrationTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
quota_instances=2,
quota_cores=4,
quota_floating_ips=1,
network_manager='nova.network.manager.FlatDHCPManager')
# Apparently needed by the RPC tests...
self.network = self.start_service('network')
self.user_id = 'admin'
self.project_id = 'admin'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
nova.tests.image.fake.stub_out_image_service(self.stubs)
def tearDown(self):
super(QuotaIntegrationTestCase, self).tearDown()
nova.tests.image.fake.FakeImageService_reset()
def _create_instance(self, cores=2):
"""Create a test instance."""
inst = {}
inst['image_id'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = '3' # m1.large
inst['vcpus'] = cores
return db.instance_create(self.context, inst)
def test_too_many_instances(self):
instance_uuids = []
for i in range(CONF.quota_instances):
instance = self._create_instance()
instance_uuids.append(instance['uuid'])
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
try:
compute.API().create(self.context, min_count=1, max_count=1,
instance_type=inst_type, image_href=image_uuid)
except exception.QuotaError, e:
expected_kwargs = {'code': 413, 'resource': 'cores', 'req': 1,
'used': 4, 'allowed': 4, 'overs': 'cores,instances'}
self.assertEqual(e.kwargs, expected_kwargs)
else:
self.fail('Expected QuotaError exception')
for instance_uuid in instance_uuids:
db.instance_destroy(self.context, instance_uuid)
def test_too_many_cores(self):
instance = self._create_instance(cores=4)
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
try:
compute.API().create(self.context, min_count=1, max_count=1,
instance_type=inst_type, image_href=image_uuid)
except exception.QuotaError, e:
expected_kwargs = {'code': 413, 'resource': 'cores', 'req': 1,
'used': 4, 'allowed': 4, 'overs': 'cores'}
self.assertEqual(e.kwargs, expected_kwargs)
else:
self.fail('Expected QuotaError exception')
db.instance_destroy(self.context, instance['uuid'])
def test_many_cores_with_unlimited_quota(self):
# Setting cores quota to unlimited:
self.flags(quota_cores=-1)
instance = self._create_instance(cores=4)
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
db.instance_destroy(self.context, instance['uuid'])
def test_too_many_addresses(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'project_id': self.project_id})
self.assertRaises(exception.QuotaError,
self.network.allocate_floating_ip,
self.context,
self.project_id)
db.floating_ip_destroy(context.get_admin_context(), address)
def test_auto_assigned(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'project_id': self.project_id})
# auto allocated addresses should not be counted
self.assertRaises(exception.NoMoreFloatingIps,
self.network.allocate_floating_ip,
self.context,
self.project_id,
True)
db.floating_ip_destroy(context.get_admin_context(), address)
def test_too_many_metadata_items(self):
metadata = {}
for i in range(CONF.quota_metadata_items + 1):
metadata['key%s' % i] = 'value%s' % i
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid,
metadata=metadata)
def _create_with_injected_files(self, files):
api = compute.API()
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context, min_count=1, max_count=1,
instance_type=inst_type, image_href=image_uuid,
injected_files=files)
def test_no_injected_files(self):
api = compute.API()
inst_type = flavors.get_flavor_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context,
instance_type=inst_type,
image_href=image_uuid)
def test_max_injected_files(self):
files = []
for i in xrange(CONF.quota_injected_files):
files.append(('/my/path%d' % i, 'config = test\n'))
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_files(self):
files = []
for i in xrange(CONF.quota_injected_files + 1):
files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_content_bytes(self):
max = CONF.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max)])
files = [('/test/path', content)]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_content_bytes(self):
max = CONF.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max + 1)])
files = [('/test/path', content)]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_path_bytes(self):
max = CONF.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max)])
files = [(path, 'config = quotatest')]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_path_bytes(self):
max = CONF.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max + 1)])
files = [(path, 'config = quotatest')]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_reservation_expire(self):
self.useFixture(test.TimeOverride())
def assertInstancesReserved(reserved):
result = quota.QUOTAS.get_project_quotas(self.context,
self.context.project_id)
self.assertEqual(result['instances']['reserved'], reserved)
quota.QUOTAS.reserve(self.context,
expire=60,
instances=2)
assertInstancesReserved(2)
timeutils.advance_time_seconds(80)
quota.QUOTAS.expire(self.context)
assertInstancesReserved(0)
class FakeContext(object):
def __init__(self, project_id, quota_class):
self.is_admin = False
self.user_id = 'fake_user'
self.project_id = project_id
self.quota_class = quota_class
self.read_deleted = 'no'
def elevated(self):
elevated = self.__class__(self.project_id, self.quota_class)
elevated.is_admin = True
return elevated
class FakeDriver(object):
def __init__(self, by_project=None, by_user=None, by_class=None,
reservations=None):
self.called = []
self.by_project = by_project or {}
self.by_user = by_user or {}
self.by_class = by_class or {}
self.reservations = reservations or []
def get_by_project_and_user(self, context, project_id, user_id, resource):
self.called.append(('get_by_project_and_user',
context, project_id, user_id, resource))
try:
return self.by_user[user_id][resource]
except KeyError:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
def get_by_project(self, context, project_id, resource):
self.called.append(('get_by_project', context, project_id, resource))
try:
return self.by_project[project_id][resource]
except KeyError:
raise exception.ProjectQuotaNotFound(project_id=project_id)
def get_by_class(self, context, quota_class, resource):
self.called.append(('get_by_class', context, quota_class, resource))
try:
return self.by_class[quota_class][resource]
except KeyError:
raise exception.QuotaClassNotFound(class_name=quota_class)
def get_defaults(self, context, resources):
self.called.append(('get_defaults', context, resources))
return resources
def get_class_quotas(self, context, resources, quota_class,
defaults=True):
self.called.append(('get_class_quotas', context, resources,
quota_class, defaults))
return resources
def get_user_quotas(self, context, resources, project_id, user_id,
quota_class=None, defaults=True, usages=True):
self.called.append(('get_user_quotas', context, resources,
project_id, user_id, quota_class, defaults,
usages))
return resources
def get_project_quotas(self, context, resources, project_id,
quota_class=None, defaults=True, usages=True,
remains=False):
self.called.append(('get_project_quotas', context, resources,
project_id, quota_class, defaults, usages,
remains))
return resources
def limit_check(self, context, resources, values, project_id=None,
user_id=None):
self.called.append(('limit_check', context, resources,
values, project_id, user_id))
def reserve(self, context, resources, deltas, expire=None,
project_id=None, user_id=None):
self.called.append(('reserve', context, resources, deltas,
expire, project_id, user_id))
return self.reservations
def commit(self, context, reservations, project_id=None, user_id=None):
self.called.append(('commit', context, reservations, project_id,
user_id))
def rollback(self, context, reservations, project_id=None, user_id=None):
self.called.append(('rollback', context, reservations, project_id,
user_id))
def usage_reset(self, context, resources):
self.called.append(('usage_reset', context, resources))
def destroy_all_by_project_and_user(self, context, project_id, user_id):
self.called.append(('destroy_all_by_project_and_user', context,
project_id, user_id))
def destroy_all_by_project(self, context, project_id):
self.called.append(('destroy_all_by_project', context, project_id))
def expire(self, context):
self.called.append(('expire', context))
class BaseResourceTestCase(test.TestCase):
def test_no_flag(self):
resource = quota.BaseResource('test_resource')
self.assertEqual(resource.name, 'test_resource')
self.assertIsNone(resource.flag)
self.assertEqual(resource.default, -1)
def test_with_flag(self):
# We know this flag exists, so use it...
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, 'quota_instances')
self.assertEqual(resource.default, 10)
def test_with_flag_no_quota(self):
self.flags(quota_instances=-1)
resource = quota.BaseResource('test_resource', 'quota_instances')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, 'quota_instances')
self.assertEqual(resource.default, -1)
def test_quota_no_project_no_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver()
context = FakeContext(None, None)
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 10)
def test_quota_with_project_no_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
))
context = FakeContext('test_project', None)
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 15)
def test_quota_no_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=20),
))
context = FakeContext(None, 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 20)
def test_quota_with_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
),
by_class=dict(
test_class=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 15)
def test_quota_override_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
override_project=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
project_id='override_project')
self.assertEqual(quota_value, 20)
def test_quota_with_project_override_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=15),
override_class=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
quota_class='override_class')
self.assertEqual(quota_value, 20)
class QuotaEngineTestCase(test.TestCase):
def test_init(self):
quota_obj = quota.QuotaEngine()
self.assertEqual(quota_obj._resources, {})
self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver)
def test_init_override_string(self):
quota_obj = quota.QuotaEngine(
quota_driver_class='nova.tests.test_quota.FakeDriver')
self.assertEqual(quota_obj._resources, {})
self.assertIsInstance(quota_obj._driver, FakeDriver)
def test_init_override_obj(self):
quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver)
self.assertEqual(quota_obj._resources, {})
self.assertEqual(quota_obj._driver, FakeDriver)
def test_register_resource(self):
quota_obj = quota.QuotaEngine()
resource = quota.AbsoluteResource('test_resource')
quota_obj.register_resource(resource)
self.assertEqual(quota_obj._resources, dict(test_resource=resource))
def test_register_resources(self):
quota_obj = quota.QuotaEngine()
resources = [
quota.AbsoluteResource('test_resource1'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource3'),
]
quota_obj.register_resources(resources)
self.assertEqual(quota_obj._resources, dict(
test_resource1=resources[0],
test_resource2=resources[1],
test_resource3=resources[2],
))
def test_get_by_project_and_user(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_user=dict(
fake_user=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_project_and_user(context, 'test_project',
'fake_user', 'test_resource')
self.assertEqual(driver.called, [
('get_by_project_and_user', context, 'test_project',
'fake_user', 'test_resource'),
])
self.assertEqual(result, 42)
def test_get_by_project(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_project(context, 'test_project',
'test_resource')
self.assertEqual(driver.called, [
('get_by_project', context, 'test_project', 'test_resource'),
])
self.assertEqual(result, 42)
def test_get_by_class(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_class(context, 'test_class', 'test_resource')
self.assertEqual(driver.called, [
('get_by_class', context, 'test_class', 'test_resource'),
])
self.assertEqual(result, 42)
def _make_quota_obj(self, driver):
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
resources = [
quota.AbsoluteResource('test_resource4'),
quota.AbsoluteResource('test_resource3'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource1'),
]
quota_obj.register_resources(resources)
return quota_obj
def test_get_defaults(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result = quota_obj.get_defaults(context)
self.assertEqual(driver.called, [
('get_defaults', context, quota_obj._resources),
])
self.assertEqual(result, quota_obj._resources)
def test_get_class_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_class_quotas(context, 'test_class')
result2 = quota_obj.get_class_quotas(context, 'test_class', False)
self.assertEqual(driver.called, [
('get_class_quotas', context, quota_obj._resources,
'test_class', True),
('get_class_quotas', context, quota_obj._resources,
'test_class', False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_get_user_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_user_quotas(context, 'test_project',
'fake_user')
result2 = quota_obj.get_user_quotas(context, 'test_project',
'fake_user',
quota_class='test_class',
defaults=False,
usages=False)
self.assertEqual(driver.called, [
('get_user_quotas', context, quota_obj._resources,
'test_project', 'fake_user', None, True, True),
('get_user_quotas', context, quota_obj._resources,
'test_project', 'fake_user', 'test_class', False, False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_get_project_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_project_quotas(context, 'test_project')
result2 = quota_obj.get_project_quotas(context, 'test_project',
quota_class='test_class',
defaults=False,
usages=False)
self.assertEqual(driver.called, [
('get_project_quotas', context, quota_obj._resources,
'test_project', None, True, True, False),
('get_project_quotas', context, quota_obj._resources,
'test_project', 'test_class', False, False, False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_count_no_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource5',
True, foo='bar')
def test_count_wrong_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource1',
True, foo='bar')
def test_count(self):
def fake_count(context, *args, **kwargs):
self.assertEqual(args, (True,))
self.assertEqual(kwargs, dict(foo='bar'))
return 5
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.register_resource(quota.CountableResource('test_resource5',
fake_count))
result = quota_obj.count(context, 'test_resource5', True, foo='bar')
self.assertEqual(result, 5)
def test_limit_check(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.limit_check(context, test_resource1=4, test_resource2=3,
test_resource3=2, test_resource4=1)
self.assertEqual(driver.called, [
('limit_check', context, quota_obj._resources, dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1,
), None, None),
])
def test_reserve(self):
context = FakeContext(None, None)
driver = FakeDriver(reservations=[
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.reserve(context, test_resource1=4,
test_resource2=3, test_resource3=2,
test_resource4=1)
result2 = quota_obj.reserve(context, expire=3600,
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
result3 = quota_obj.reserve(context, project_id='fake_project',
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
self.assertEqual(driver.called, [
('reserve', context, quota_obj._resources, dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1,
), None, None, None),
('reserve', context, quota_obj._resources, dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4,
), 3600, None, None),
('reserve', context, quota_obj._resources, dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4,
), None, 'fake_project', None),
])
self.assertEqual(result1, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
self.assertEqual(result2, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
self.assertEqual(result3, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
def test_commit(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
('commit', context, ['resv-01', 'resv-02', 'resv-03'], None,
None),
])
def test_rollback(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None,
None),
])
def test_usage_reset(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.usage_reset(context, ['res1', 'res2', 'res3'])
self.assertEqual(driver.called, [
('usage_reset', context, ['res1', 'res2', 'res3']),
])
def test_destroy_all_by_project_and_user(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.destroy_all_by_project_and_user(context,
'test_project', 'fake_user')
self.assertEqual(driver.called, [
('destroy_all_by_project_and_user', context, 'test_project',
'fake_user'),
])
def test_destroy_all_by_project(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.destroy_all_by_project(context, 'test_project')
self.assertEqual(driver.called, [
('destroy_all_by_project', context, 'test_project'),
])
def test_expire(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.expire(context)
self.assertEqual(driver.called, [
('expire', context),
])
def test_resources(self):
quota_obj = self._make_quota_obj(None)
self.assertEqual(quota_obj.resources,
['test_resource1', 'test_resource2',
'test_resource3', 'test_resource4'])
class DbQuotaDriverTestCase(test.TestCase):
def setUp(self):
super(DbQuotaDriverTestCase, self).setUp()
self.flags(quota_instances=10,
quota_cores=20,
quota_ram=50 * 1024,
quota_floating_ips=10,
quota_fixed_ips=10,
quota_metadata_items=128,
quota_injected_files=5,
quota_injected_file_content_bytes=10 * 1024,
quota_injected_file_path_bytes=255,
quota_security_groups=10,
quota_security_group_rules=20,
reservation_expire=86400,
until_refresh=0,
max_age=0,
)
self.driver = quota.DbQuotaDriver()
self.calls = []
self.useFixture(test.TimeOverride())
def test_get_defaults(self):
# Use our pre-defined resources
self._stub_quota_class_get_default()
result = self.driver.get_defaults(None, quota.QUOTAS._resources)
self.assertEqual(result, dict(
instances=5,
cores=20,
ram=25 * 1024,
floating_ips=10,
fixed_ips=10,
metadata_items=64,
injected_files=5,
injected_file_content_bytes=5 * 1024,
injected_file_path_bytes=255,
security_groups=10,
security_group_rules=20,
key_pairs=100,
))
def _stub_quota_class_get_default(self):
# Stub out quota_class_get_default
def fake_qcgd(context):
self.calls.append('quota_class_get_default')
return dict(
instances=5,
ram=25 * 1024,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
)
self.stubs.Set(db, 'quota_class_get_default', fake_qcgd)
def _stub_quota_class_get_all_by_name(self):
# Stub out quota_class_get_all_by_name
def fake_qcgabn(context, quota_class):
self.calls.append('quota_class_get_all_by_name')
self.assertEqual(quota_class, 'test_class')
return dict(
instances=5,
ram=25 * 1024,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
)
self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
def test_get_class_quotas(self):
self._stub_quota_class_get_all_by_name()
result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
'test_class')
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(
instances=5,
cores=20,
ram=25 * 1024,
floating_ips=10,
fixed_ips=10,
metadata_items=64,
injected_files=5,
injected_file_content_bytes=5 * 1024,
injected_file_path_bytes=255,
security_groups=10,
security_group_rules=20,
key_pairs=100,
))
def test_get_class_quotas_no_defaults(self):
self._stub_quota_class_get_all_by_name()
result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
'test_class', False)
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(
instances=5,
ram=25 * 1024,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
))
def _stub_get_by_project_and_user(self):
def fake_qgabpau(context, project_id, user_id):
self.calls.append('quota_get_all_by_project_and_user')
self.assertEqual(project_id, 'test_project')
self.assertEqual(user_id, 'fake_user')
return dict(
cores=10,
injected_files=2,
injected_file_path_bytes=127,
)
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return {
'cores': 10,
'injected_files': 2,
'injected_file_path_bytes': 127,
}
def fake_qugabpau(context, project_id, user_id):
self.calls.append('quota_usage_get_all_by_project_and_user')
self.assertEqual(project_id, 'test_project')
self.assertEqual(user_id, 'fake_user')
return dict(
instances=dict(in_use=2, reserved=2),
cores=dict(in_use=4, reserved=4),
ram=dict(in_use=10 * 1024, reserved=0),
floating_ips=dict(in_use=2, reserved=0),
metadata_items=dict(in_use=0, reserved=0),
injected_files=dict(in_use=0, reserved=0),
injected_file_content_bytes=dict(in_use=0, reserved=0),
injected_file_path_bytes=dict(in_use=0, reserved=0),
)
self.stubs.Set(db, 'quota_get_all_by_project_and_user', fake_qgabpau)
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project_and_user',
fake_qugabpau)
self._stub_quota_class_get_all_by_name()
def test_get_user_quotas(self):
self.maxDiff = None
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', 'fake_user')
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_usage_get_all_by_project_and_user',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def _stub_get_by_project(self):
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(
cores=10,
injected_files=2,
injected_file_path_bytes=127,
)
def fake_qugabp(context, project_id):
self.calls.append('quota_usage_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(
instances=dict(in_use=2, reserved=2),
cores=dict(in_use=4, reserved=4),
ram=dict(in_use=10 * 1024, reserved=0),
floating_ips=dict(in_use=2, reserved=0),
metadata_items=dict(in_use=0, reserved=0),
injected_files=dict(in_use=0, reserved=0),
injected_file_content_bytes=dict(in_use=0, reserved=0),
injected_file_path_bytes=dict(in_use=0, reserved=0),
)
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
self._stub_quota_class_get_all_by_name()
self._stub_quota_class_get_default()
def test_get_project_quotas(self):
self.maxDiff = None
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_user_quotas_alt_context_no_class(self):
self.maxDiff = None
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', None),
quota.QUOTAS._resources, 'test_project', 'fake_user')
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_usage_get_all_by_project_and_user',
])
self.assertEqual(result, dict(
instances=dict(
limit=10,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=50 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=128,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=10 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_alt_context_no_class(self):
self.maxDiff = None
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_default',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_user_quotas_alt_context_with_class(self):
self.maxDiff = None
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', 'fake_user',
quota_class='test_class')
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_usage_get_all_by_project_and_user',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_alt_context_with_class(self):
self.maxDiff = None
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project', quota_class='test_class')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
fixed_ips=dict(
limit=10,
in_use=0,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_user_quotas_no_defaults(self):
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', 'fake_user',
defaults=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_usage_get_all_by_project_and_user',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_no_defaults(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', defaults=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default',
])
self.assertEqual(result, dict(
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
))
def test_get_user_quotas_no_usages(self):
self._stub_get_by_project_and_user()
result = self.driver.get_user_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', 'fake_user', usages=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project_and_user',
'quota_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
),
cores=dict(
limit=10,
),
ram=dict(
limit=25 * 1024,
),
floating_ips=dict(
limit=10,
),
fixed_ips=dict(
limit=10,
),
metadata_items=dict(
limit=64,
),
injected_files=dict(
limit=2,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
),
injected_file_path_bytes=dict(
limit=127,
),
security_groups=dict(
limit=10,
),
security_group_rules=dict(
limit=20,
),
key_pairs=dict(
limit=100,
),
))
def test_get_project_quotas_no_usages(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', usages=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_class_get_all_by_name',
'quota_class_get_default',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
),
cores=dict(
limit=10,
),
ram=dict(
limit=25 * 1024,
),
floating_ips=dict(
limit=10,
),
fixed_ips=dict(
limit=10,
),
metadata_items=dict(
limit=64,
),
injected_files=dict(
limit=2,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
),
injected_file_path_bytes=dict(
limit=127,
),
security_groups=dict(
limit=10,
),
security_group_rules=dict(
limit=20,
),
key_pairs=dict(
limit=100,
),
))
def _stub_get_settable_quotas(self):
def fake_get_project_quotas(context, resources, project_id,
quota_class=None, defaults=True,
usages=True, remains=False):
self.calls.append('get_project_quotas')
result = {}
for k, v in resources.items():
if k == 'instances':
remains = v.default - 5
in_use = 1
else:
remains = v.default
in_use = 0
result[k] = {'limit': v.default, 'in_use': in_use,
'reserved': 0, 'remains': remains}
return result
def fake_get_user_quotas(context, resources, project_id, user_id,
quota_class=None, defaults=True,
usages=True):
self.calls.append('get_user_quotas')
result = {}
for k, v in resources.items():
if k == 'instances':
in_use = 1
else:
in_use = 0
result[k] = {'limit': v.default,
'in_use': in_use, 'reserved': 0}
return result
def fake_qgabpau(context, project_id, user_id):
self.calls.append('quota_get_all_by_project_and_user')
return {'instances': 2}
self.stubs.Set(self.driver, 'get_project_quotas',
fake_get_project_quotas)
self.stubs.Set(self.driver, 'get_user_quotas',
fake_get_user_quotas)
self.stubs.Set(db, 'quota_get_all_by_project_and_user',
fake_qgabpau)
def test_get_settable_quotas_with_user(self):
self._stub_get_settable_quotas()
result = self.driver.get_settable_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', user_id='test_user')
self.assertEqual(self.calls, [
'get_project_quotas',
'get_user_quotas',
'quota_get_all_by_project_and_user',
])
self.assertEqual(result, {
'instances': {
'minimum': 1,
'maximum': 7,
},
'cores': {
'minimum': 0,
'maximum': 20,
},
'ram': {
'minimum': 0,
'maximum': 50 * 1024,
},
'floating_ips': {
'minimum': 0,
'maximum': 10,
},
'fixed_ips': {
'minimum': 0,
'maximum': 10,
},
'metadata_items': {
'minimum': 0,
'maximum': 128,
},
'injected_files': {
'minimum': 0,
'maximum': 5,
},
'injected_file_content_bytes': {
'minimum': 0,
'maximum': 10 * 1024,
},
'injected_file_path_bytes': {
'minimum': 0,
'maximum': 255,
},
'security_groups': {
'minimum': 0,
'maximum': 10,
},
'security_group_rules': {
'minimum': 0,
'maximum': 20,
},
'key_pairs': {
'minimum': 0,
'maximum': 100,
},
})
def test_get_settable_quotas_without_user(self):
self._stub_get_settable_quotas()
result = self.driver.get_settable_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'get_project_quotas',
])
self.assertEqual(result, {
'instances': {
'minimum': 5,
'maximum': -1,
},
'cores': {
'minimum': 0,
'maximum': -1,
},
'ram': {
'minimum': 0,
'maximum': -1,
},
'floating_ips': {
'minimum': 0,
'maximum': -1,
},
'fixed_ips': {
'minimum': 0,
'maximum': -1,
},
'metadata_items': {
'minimum': 0,
'maximum': -1,
},
'injected_files': {
'minimum': 0,
'maximum': -1,
},
'injected_file_content_bytes': {
'minimum': 0,
'maximum': -1,
},
'injected_file_path_bytes': {
'minimum': 0,
'maximum': -1,
},
'security_groups': {
'minimum': 0,
'maximum': -1,
},
'security_group_rules': {
'minimum': 0,
'maximum': -1,
},
'key_pairs': {
'minimum': 0,
'maximum': -1,
},
})
def _stub_get_project_quotas(self):
def fake_get_project_quotas(context, resources, project_id,
quota_class=None, defaults=True,
usages=True, remains=False):
self.calls.append('get_project_quotas')
return dict((k, dict(limit=v.default))
for k, v in resources.items())
self.stubs.Set(self.driver, 'get_project_quotas',
fake_get_project_quotas)
def test_get_quotas_has_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['unknown'], True)
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['unknown'], False)
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync_no_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['metadata_items'], True)
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_has_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['instances'], False)
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync(self):
self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
['instances', 'cores', 'ram',
'floating_ips', 'security_groups'],
True)
self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
instances=10,
cores=20,
ram=50 * 1024,
floating_ips=10,
security_groups=10,
))
def test_get_quotas_no_sync(self):
self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
['metadata_items', 'injected_files',
'injected_file_content_bytes',
'injected_file_path_bytes',
'security_group_rules'], False)
self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
metadata_items=128,
injected_files=5,
injected_file_content_bytes=10 * 1024,
injected_file_path_bytes=255,
security_group_rules=20,
))
def test_limit_check_under(self):
self._stub_get_project_quotas()
self.assertRaises(exception.InvalidQuotaValue,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=-1))
def test_limit_check_over(self):
self._stub_get_project_quotas()
self.assertRaises(exception.OverQuota,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=129))
def test_limit_check_unlimited(self):
self.flags(quota_metadata_items=-1)
self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=32767))
def test_limit_check(self):
self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=128))
def _stub_quota_reserve(self):
def fake_quota_reserve(context, resources, quotas, user_quotas, deltas,
expire, until_refresh, max_age, project_id=None,
user_id=None):
self.calls.append(('quota_reserve', expire, until_refresh,
max_age))
return ['resv-1', 'resv-2', 'resv-3']
self.stubs.Set(db, 'quota_reserve', fake_quota_reserve)
def test_reserve_bad_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.assertRaises(exception.InvalidReservationExpiration,
self.driver.reserve,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire='invalid')
self.assertEqual(self.calls, [])
def test_reserve_default_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2))
expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_int_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=3600)
expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_timedelta_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire_delta = datetime.timedelta(seconds=60)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire_delta)
expire = timeutils.utcnow() + expire_delta
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_datetime_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_until_refresh(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(until_refresh=500)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 500, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_max_age(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(max_age=86400)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 86400),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_usage_reset(self):
calls = []
def fake_quota_usage_update(context, project_id, user_id, resource,
**kwargs):
calls.append(('quota_usage_update', context, project_id, user_id,
resource, kwargs))
if resource == 'nonexist':
raise exception.QuotaUsageNotFound(project_id=project_id)
self.stubs.Set(db, 'quota_usage_update', fake_quota_usage_update)
ctx = FakeContext('test_project', 'test_class')
resources = ['res1', 'res2', 'nonexist', 'res4']
self.driver.usage_reset(ctx, resources)
# Make sure we had some calls
self.assertEqual(len(calls), len(resources))
# Extract the elevated context that was used and do some
# sanity checks
elevated = calls[0][1]
self.assertEqual(elevated.project_id, ctx.project_id)
self.assertEqual(elevated.quota_class, ctx.quota_class)
self.assertEqual(elevated.is_admin, True)
# Now check that all the expected calls were made
exemplar = [('quota_usage_update', elevated, 'test_project',
'fake_user', res, dict(in_use=-1)) for res in resources]
self.assertEqual(calls, exemplar)
class FakeSession(object):
def begin(self):
return self
def add(self, instance):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
return False
class FakeUsage(sqa_models.QuotaUsage):
def save(self, *args, **kwargs):
pass
class QuotaReserveSqlAlchemyTestCase(test.TestCase):
# nova.db.sqlalchemy.api.quota_reserve is so complex it needs its
# own test case, and since it's a quota manipulator, this is the
# best place to put it...
def setUp(self):
super(QuotaReserveSqlAlchemyTestCase, self).setUp()
self.sync_called = set()
self.quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
fixed_ips=5,
)
self.deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
fixed_ips=2,
)
def make_sync(res_name):
def sync(context, project_id, user_id, session):
self.sync_called.add(res_name)
if res_name in self.usages:
if self.usages[res_name].in_use < 0:
return {res_name: 2}
else:
return {res_name: self.usages[res_name].in_use - 1}
return {res_name: 0}
return sync
self.resources = {}
for res_name in ('instances', 'cores', 'ram', 'fixed_ips'):
method_name = '_sync_%s' % res_name
sqa_api.QUOTA_SYNC_FUNCTIONS[method_name] = make_sync(res_name)
res = quota.ReservableResource(res_name, '_sync_%s' % res_name)
self.resources[res_name] = res
self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.usages = {}
self.usages_created = {}
self.reservations_created = {}
self.usages_list = [
dict(resource='instances',
project_id='test_project',
user_id='fake_user',
in_use=2,
reserved=2,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
user_id='fake_user',
in_use=2,
reserved=4,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
user_id='fake_user',
in_use=2,
reserved=2 * 1024,
until_refresh=None),
dict(resource='fixed_ips',
project_id='test_project',
user_id=None,
in_use=2,
reserved=2,
until_refresh=None),
]
def fake_get_session():
return FakeSession()
def fake_get_project_quota_usages(context, session, project_id):
return self.usages.copy()
def fake_get_user_quota_usages(context, session, project_id, user_id):
return self.usages.copy()
def fake_quota_usage_create(context, project_id, user_id, resource,
in_use, reserved, until_refresh,
session=None, save=True):
quota_usage_ref = self._make_quota_usage(
project_id, user_id, resource, in_use, reserved, until_refresh,
timeutils.utcnow(), timeutils.utcnow())
self.usages_created[resource] = quota_usage_ref
return quota_usage_ref
def fake_reservation_create(context, uuid, usage_id, project_id,
user_id, resource, delta, expire,
session=None):
reservation_ref = self._make_reservation(
uuid, usage_id, project_id, user_id, resource, delta, expire,
timeutils.utcnow(), timeutils.utcnow())
self.reservations_created[resource] = reservation_ref
return reservation_ref
self.stubs.Set(sqa_api, 'get_session', fake_get_session)
self.stubs.Set(sqa_api, '_get_project_quota_usages',
fake_get_project_quota_usages)
self.stubs.Set(sqa_api, '_get_user_quota_usages',
fake_get_user_quota_usages)
self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create)
self.stubs.Set(sqa_api, '_reservation_create', fake_reservation_create)
self.useFixture(test.TimeOverride())
def _make_quota_usage(self, project_id, user_id, resource, in_use,
reserved, until_refresh, created_at, updated_at):
quota_usage_ref = FakeUsage()
quota_usage_ref.id = len(self.usages) + len(self.usages_created)
quota_usage_ref.project_id = project_id
quota_usage_ref.user_id = user_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
quota_usage_ref.created_at = created_at
quota_usage_ref.updated_at = updated_at
quota_usage_ref.deleted_at = None
quota_usage_ref.deleted = False
return quota_usage_ref
def init_usage(self, project_id, user_id, resource, in_use, reserved=0,
until_refresh=None, created_at=None, updated_at=None):
if created_at is None:
created_at = timeutils.utcnow()
if updated_at is None:
updated_at = timeutils.utcnow()
if resource == 'fixed_ips':
user_id = None
quota_usage_ref = self._make_quota_usage(project_id, user_id, resource,
in_use, reserved,
until_refresh,
created_at, updated_at)
self.usages[resource] = quota_usage_ref
def compare_usage(self, usage_dict, expected):
for usage in expected:
resource = usage['resource']
for key, value in usage.items():
actual = getattr(usage_dict[resource], key)
self.assertEqual(actual, value,
"%s != %s on usage for resource %s" %
(actual, value, resource))
def _make_reservation(self, uuid, usage_id, project_id, user_id, resource,
delta, expire, created_at, updated_at):
reservation_ref = sqa_models.Reservation()
reservation_ref.id = len(self.reservations_created)
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage_id
reservation_ref.project_id = project_id
reservation_ref.user_id = user_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.created_at = created_at
reservation_ref.updated_at = updated_at
reservation_ref.deleted_at = None
reservation_ref.deleted = False
return reservation_ref
def compare_reservation(self, reservations, expected):
reservations = set(reservations)
for resv in expected:
resource = resv['resource']
resv_obj = self.reservations_created[resource]
self.assertIn(resv_obj.uuid, reservations)
reservations.discard(resv_obj.uuid)
for key, value in resv.items():
actual = getattr(resv_obj, key)
self.assertEqual(actual, value,
"%s != %s on reservation for resource %s" %
(actual, value, resource))
self.assertEqual(len(reservations), 0)
def _update_reservations_list(self, usage_id_change=False,
delta_change=False):
reservations_list = [
dict(resource='instances',
project_id='test_project',
delta=2),
dict(resource='cores',
project_id='test_project',
delta=4),
dict(resource='ram',
delta=2 * 1024),
dict(resource='fixed_ips',
project_id='test_project',
delta=2),
]
if usage_id_change:
reservations_list[0]["usage_id"] = self.usages_created['instances']
reservations_list[1]["usage_id"] = self.usages_created['cores']
reservations_list[2]["usage_id"] = self.usages_created['ram']
reservations_list[3]["usage_id"] = self.usages_created['fixed_ips']
else:
reservations_list[0]["usage_id"] = self.usages['instances']
reservations_list[1]["usage_id"] = self.usages['cores']
reservations_list[2]["usage_id"] = self.usages['ram']
reservations_list[3]["usage_id"] = self.usages['fixed_ips']
if delta_change:
reservations_list[0]["delta"] = -2
reservations_list[1]["delta"] = -4
reservations_list[2]["delta"] = -2 * 1024
reservations_list[3]["delta"] = -2
return reservations_list
def _init_usages(self, *in_use, **kwargs):
for i, option in enumerate(('instances', 'cores', 'ram', 'fixed_ips')):
self.init_usage('test_project', 'fake_user',
option, in_use[i], **kwargs)
return FakeContext('test_project', 'test_class')
def test_quota_reserve_create_usages(self):
context = FakeContext('test_project', 'test_class')
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores',
'ram', 'fixed_ips']))
self.usages_list[0]["in_use"] = 0
self.usages_list[1]["in_use"] = 0
self.usages_list[2]["in_use"] = 0
self.usages_list[3]["in_use"] = 0
self.compare_usage(self.usages_created, self.usages_list)
reservations_list = self._update_reservations_list(True)
self.compare_reservation(result, reservations_list)
def test_quota_reserve_negative_in_use(self):
context = self._init_usages(-1, -1, -1, -1, until_refresh=1)
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
5, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores',
'ram', 'fixed_ips']))
self.usages_list[0]["until_refresh"] = 5
self.usages_list[1]["until_refresh"] = 5
self.usages_list[2]["until_refresh"] = 5
self.usages_list[3]["until_refresh"] = 5
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, self._update_reservations_list())
def test_quota_reserve_until_refresh(self):
context = self._init_usages(3, 3, 3, 3, until_refresh=1)
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
5, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores',
'ram', 'fixed_ips']))
self.usages_list[0]["until_refresh"] = 5
self.usages_list[1]["until_refresh"] = 5
self.usages_list[2]["until_refresh"] = 5
self.usages_list[3]["until_refresh"] = 5
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, self._update_reservations_list())
def test_quota_reserve_max_age(self):
max_age = 3600
record_created = (timeutils.utcnow() -
datetime.timedelta(seconds=max_age))
context = self._init_usages(3, 3, 3, 3, created_at=record_created,
updated_at=record_created)
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, max_age)
self.assertEqual(self.sync_called, set(['instances', 'cores',
'ram', 'fixed_ips']))
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, self._update_reservations_list())
def test_quota_reserve_no_refresh(self):
context = self._init_usages(3, 3, 3, 3)
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, 0)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 3
self.usages_list[1]["in_use"] = 3
self.usages_list[2]["in_use"] = 3
self.usages_list[3]["in_use"] = 3
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, self._update_reservations_list())
def test_quota_reserve_unders(self):
context = self._init_usages(1, 3, 1 * 1024, 1)
self.deltas["instances"] = -2
self.deltas["cores"] = -4
self.deltas["ram"] = -2 * 1024
self.deltas["fixed_ips"] = -2
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, 0)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 1
self.usages_list[0]["reserved"] = 0
self.usages_list[1]["in_use"] = 3
self.usages_list[1]["reserved"] = 0
self.usages_list[2]["in_use"] = 1 * 1024
self.usages_list[2]["reserved"] = 0
self.usages_list[3]["in_use"] = 1
self.usages_list[3]["reserved"] = 0
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
reservations_list = self._update_reservations_list(False, True)
self.compare_reservation(result, reservations_list)
def test_quota_reserve_overs(self):
context = self._init_usages(4, 8, 10 * 1024, 4)
try:
sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire, 0, 0)
except exception.OverQuota, e:
expected_kwargs = {'code': 500,
'usages': {'instances': {'reserved': 0, 'in_use': 4},
'ram': {'reserved': 0, 'in_use': 10240},
'fixed_ips': {'reserved': 0, 'in_use': 4},
'cores': {'reserved': 0, 'in_use': 8}},
'headroom': {'cores': 2, 'ram': 0, 'fixed_ips': 1,
'instances': 1},
'overs': ['cores', 'fixed_ips', 'instances', 'ram'],
'quotas': {'cores': 10, 'ram': 10240,
'fixed_ips': 5, 'instances': 5}}
self.assertEqual(e.kwargs, expected_kwargs)
else:
self.fail('Expected OverQuota failure')
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 4
self.usages_list[0]["reserved"] = 0
self.usages_list[1]["in_use"] = 8
self.usages_list[1]["reserved"] = 0
self.usages_list[2]["in_use"] = 10 * 1024
self.usages_list[2]["reserved"] = 0
self.usages_list[3]["in_use"] = 4
self.usages_list[3]["reserved"] = 0
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.assertEqual(self.reservations_created, {})
def test_quota_reserve_cores_unlimited(self):
# Requesting 8 cores, quota_cores set to unlimited:
self.flags(quota_cores=-1)
context = self._init_usages(1, 8, 1 * 1024, 1)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 1
self.usages_list[0]["reserved"] = 0
self.usages_list[1]["in_use"] = 8
self.usages_list[1]["reserved"] = 0
self.usages_list[2]["in_use"] = 1 * 1024
self.usages_list[2]["reserved"] = 0
self.usages_list[3]["in_use"] = 1
self.usages_list[3]["reserved"] = 0
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.assertEqual(self.reservations_created, {})
def test_quota_reserve_ram_unlimited(self):
# Requesting 10*1024 ram, quota_ram set to unlimited:
self.flags(quota_ram=-1)
context = self._init_usages(1, 1, 10 * 1024, 1)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 1
self.usages_list[0]["reserved"] = 0
self.usages_list[1]["in_use"] = 1
self.usages_list[1]["reserved"] = 0
self.usages_list[2]["in_use"] = 10 * 1024
self.usages_list[2]["reserved"] = 0
self.usages_list[3]["in_use"] = 1
self.usages_list[3]["reserved"] = 0
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
self.assertEqual(self.reservations_created, {})
def test_quota_reserve_reduction(self):
context = self._init_usages(10, 20, 20 * 1024, 10)
self.deltas["instances"] = -2
self.deltas["cores"] = -4
self.deltas["ram"] = -2 * 1024
self.deltas["fixed_ips"] = -2
result = sqa_api.quota_reserve(context, self.resources, self.quotas,
self.quotas, self.deltas, self.expire,
0, 0)
self.assertEqual(self.sync_called, set([]))
self.usages_list[0]["in_use"] = 10
self.usages_list[0]["reserved"] = 0
self.usages_list[1]["in_use"] = 20
self.usages_list[1]["reserved"] = 0
self.usages_list[2]["in_use"] = 20 * 1024
self.usages_list[2]["reserved"] = 0
self.usages_list[3]["in_use"] = 10
self.usages_list[3]["reserved"] = 0
self.compare_usage(self.usages, self.usages_list)
self.assertEqual(self.usages_created, {})
reservations_list = self._update_reservations_list(False, True)
self.compare_reservation(result, reservations_list)
class NoopQuotaDriverTestCase(test.TestCase):
def setUp(self):
super(NoopQuotaDriverTestCase, self).setUp()
self.flags(quota_instances=10,
quota_cores=20,
quota_ram=50 * 1024,
quota_floating_ips=10,
quota_metadata_items=128,
quota_injected_files=5,
quota_injected_file_content_bytes=10 * 1024,
quota_injected_file_path_bytes=255,
quota_security_groups=10,
quota_security_group_rules=20,
reservation_expire=86400,
until_refresh=0,
max_age=0,
)
self.expected_with_usages = {}
self.expected_without_usages = {}
self.expected_without_dict = {}
for r in quota.QUOTAS._resources:
self.expected_with_usages[r] = dict(limit=-1,
in_use=-1,
reserved=-1)
self.expected_without_usages[r] = dict(limit=-1)
self.expected_without_dict[r] = -1
self.driver = quota.NoopQuotaDriver()
def test_get_defaults(self):
# Use our pre-defined resources
result = self.driver.get_defaults(None, quota.QUOTAS._resources)
self.assertEqual(self.expected_without_dict, result)
def test_get_class_quotas(self):
result = self.driver.get_class_quotas(None,
quota.QUOTAS._resources,
'test_class')
self.assertEqual(self.expected_without_dict, result)
def test_get_class_quotas_no_defaults(self):
result = self.driver.get_class_quotas(None,
quota.QUOTAS._resources,
'test_class',
False)
self.assertEqual(self.expected_without_dict, result)
def test_get_project_quotas(self):
result = self.driver.get_project_quotas(None,
quota.QUOTAS._resources,
'test_project')
self.assertEqual(self.expected_with_usages, result)
def test_get_user_quotas(self):
result = self.driver.get_user_quotas(None,
quota.QUOTAS._resources,
'test_project',
'fake_user')
self.assertEqual(self.expected_with_usages, result)
def test_get_project_quotas_no_defaults(self):
result = self.driver.get_project_quotas(None,
quota.QUOTAS._resources,
'test_project',
defaults=False)
self.assertEqual(self.expected_with_usages, result)
def test_get_user_quotas_no_defaults(self):
result = self.driver.get_user_quotas(None,
quota.QUOTAS._resources,
'test_project',
'fake_user',
defaults=False)
self.assertEqual(self.expected_with_usages, result)
def test_get_project_quotas_no_usages(self):
result = self.driver.get_project_quotas(None,
quota.QUOTAS._resources,
'test_project',
usages=False)
self.assertEqual(self.expected_without_usages, result)
def test_get_user_quotas_no_usages(self):
result = self.driver.get_user_quotas(None,
quota.QUOTAS._resources,
'test_project',
'fake_user',
usages=False)
self.assertEqual(self.expected_without_usages, result)
| {
"content_hash": "b1ddd200d5bf41111df5f7ff00316da2",
"timestamp": "",
"source": "github",
"line_count": 2471,
"max_line_length": 79,
"avg_line_length": 39.054633751517606,
"alnum_prop": 0.48432189339301995,
"repo_name": "imsplitbit/nova",
"id": "498d1d1fa3ddd48cd9e2bb2a730df7d8f56c7ef8",
"size": "97281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/test_quota.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13518591"
},
{
"name": "Shell",
"bytes": "16950"
}
],
"symlink_target": ""
} |
import optparse
from datetime import datetime, timedelta
from base import NoseDBReporterBase
__author__ = "Ali-Akber Saifee"
__email__ = "ali@indydevs.org"
__copyright__ = "Copyright 2014, Ali-Akber Saifee"
class NoseMySQLReporter(NoseDBReporterBase):
"""
MySQL Connector. Reports the results of each test run into the tables
``testcase``, ``testsuite``, ``testcaseexecution`` and ``testsuiteexecution``
"""
name = "nosedbreport"
run_insert_query = """
insert into testcaseexecution (testcase, startTime, timeTaken, status, traceback, suiteexecution)
values ('%(testcase)s', '%(startTime)s', '%(timeTaken)s', '%(status)s', '%(traceback)s', %(suiteexecution)d);
"""
case_start_query = """
insert into testcase values('%(id)s', '%(name)s', '%(description)s', '%(suite)s', '%(lastStarted)s', 0)
on duplicate key update lastStarted='%(lastStarted)s', description='%(description)s';
"""
suite_start_query = """
insert into testsuite (name, lastStarted) values('%(suite)s', '%(lastStarted)s')
on duplicate key update lastStarted='%(lastStarted)s';
"""
suite_complete_query = """
insert into testsuite (name, lastCompleted) values('%(suite)s', '%(lastCompleted)s')
on duplicate key update lastCompleted='%(lastCompleted)s';
"""
suiteexecution_complete_query = """
insert into testsuiteexecution (suite, startTime, endTime)
values ('%(suite)s', '%(startTime)s', '%(lastCompleted)s');
"""
case_complete_query = """
update testcase set lastCompleted = '%(lastCompleted)s';
"""
def __init__(self):
NoseDBReporterBase.__init__(self)
def __execute_query(self, query, args):
"""
helper method to execute a MySQL query and commit
the result.
:param query: the query to execute
:param args: variable arguments to use when formatting the query
"""
# santize quotes.
for k,v in args.items():
if type(v) == type("string"):
args[k] = v.replace("'","\\'")
ret = 0
try:
import MySQLdb
cursor = self.connection.cursor()
ret = cursor.execute( query % args )
self.connection.commit()
except MySQLdb.ProgrammingError, e:
self.logger.error ( "failed to execute query with error: %s" % str(e[1]))
except Exception, e:
self.logger.error ("unknown error executing query: %s" % str(e))
return ret
def configure(self, options, conf):
"""
sets up the MySQL database connection based on the options
provided on the command line.
"""
import MySQLdb
try:
self.connection = MySQLdb.connect(
options.dbreport_host,
options.dbreport_username,
options.dbreport_password,
options.dbreport_db,
connect_timeout=5
)
except ImportError, e:
self.enabled = False
self.logger.error ("The MySQLdb module is required for nosedbreporter to work with mysql")
except MySQLdb.OperationalError, e:
self.enabled = False
self.logger.error (e[1])
def report(self, stream):
"""
After successful completion of a nose run, perform the final reporting
of the test results to the MySQL database.
"""
if self.connection:
results = self.test_case_results
suiteexecids={}
for suite in self.test_suites:
suite_update = { "suite" : suite,
"startTime" : self.start_time,
"lastCompleted" : self.test_suites[suite]["lastCompleted"]
}
self.__execute_query(self.suite_complete_query, suite_update)
self.__execute_query(self.suiteexecution_complete_query, suite_update)
# get the suiteexecution id now.
self.connection.query ("""
select id from testsuiteexecution where suite='%(suite)s' and
startTime='%(startTime)s' and
endTime='%(lastCompleted)s'
""" % suite_update)
result = self.connection.store_result()
suiteexecids[suite] = result.fetch_row()[0][0]
for case in results:
case_update = { "id":case,
"name":results[case]["name"],
"description":results[case]["description"],
"suite":results[case]["suite"],
"lastStarted":results[case]["lastStarted"],
"lastCompleted":(
datetime.strptime(results[case]["lastStarted"], self.time_fmt) +
timedelta(seconds=results[case]["timeTaken"])
).strftime(self.time_fmt)
}
run_update = { "testcase":case,
"suite":results[case]["suite"],
"suiteexecution":suiteexecids[results[case]["suite"]],
"startTime":results[case]["lastStarted"],
"timeTaken":results[case]["timeTaken"],
"status":results[case]["status"],
"traceback":results[case]["traceback"]
}
self.__execute_query(self.case_complete_query, case_update)
self.__execute_query(self.run_insert_query, run_update)
def startTest(self, test):
"""
record initiation of a test case. Update the last start time
of the test suite & test case.
"""
if self.connection:
description = self.get_full_doc(test)
test_id = test.id()
file_path, suite, case = test.address()
case_update = { "id":test_id,
"name":case,
"description":description,
"suite":suite,
"lastStarted":NoseDBReporterBase.time_now()
}
suite_update = { "suite":suite,
"lastStarted":NoseDBReporterBase.time_now()
}
self.__execute_query(self.suite_start_query, suite_update)
self.__execute_query(self.case_start_query, case_update)
super(NoseMySQLReporter, self).startTest(test)
def construct_schema(self):
"""
called when the `--dbreport_create_schema` command option
is passed to the plugin to create the mysql table schema.
"""
testcase_schema = """
CREATE TABLE `testcase` (
`id` varchar(255) NOT NULL,
`name` varchar(255) NOT NULL,
`description` varchar(255) NOT NULL,
`suite` varchar(255) NOT NULL,
`lastStarted` datetime DEFAULT NULL,
`lastCompleted` datetime DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `idx_name` (`name`),
KEY `idx_suite` (`suite`),
CONSTRAINT `fk_suite_name` FOREIGN KEY (`suite`) REFERENCES `testsuite` (`name`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
"""
testsuite_schema = """
CREATE TABLE `testsuite` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(255) NOT NULL,
`lastStarted` datetime DEFAULT NULL,
`lastCompleted` datetime DEFAULT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `idx_name` (`name`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=latin1
"""
testcaseexecution_schema = """
CREATE TABLE `testcaseexecution` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`testcase` varchar(255) NOT NULL,
`suiteexecution` int(11) NOT NULL,
`startTime` datetime NOT NULL,
`timeTaken` float NOT NULL,
`status` enum('success','fail','error','skipped','') NOT NULL,
`traceback` text NOT NULL,
PRIMARY KEY (`id`),
KEY `idx_status` (`status`),
KEY `idx_testcase` (`testcase`) USING BTREE,
KEY `idx_suiteexecution` (`suiteexecution`),
CONSTRAINT `fk_testcase_id` FOREIGN KEY (`testcase`) REFERENCES `testcase` (`id`),
CONSTRAINT `fk_suiteexec_id` FOREIGN KEY (`suiteexecution`) REFERENCES `testsuiteexecution` (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=latin1
"""
testsuiteexecution_schema = """
CREATE TABLE `testsuiteexecution` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`suite` varchar(255) NOT NULL,
`startTime` datetime NOT NULL,
`endTime` datetime NOT NULL,
PRIMARY KEY (`id`),
KEY `idx_start` (`startTime`),
KEY `idx_end` (`endTime`),
CONSTRAINT `fk_testsuite_name` FOREIGN KEY (`suite`) REFERENCES `testsuite` (`name`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=latin1
"""
if self.connection:
cursor = self.connection.cursor()
if not cursor.execute("show tables like 'test%%'") == 4:
cursor.execute ( testsuite_schema )
cursor.execute ( testcase_schema )
cursor.execute ( testsuiteexecution_schema )
cursor.execute ( testcaseexecution_schema )
return True
else:
self.logger.error("Unable to setup scheme due to mysql configuration error")
return False
| {
"content_hash": "6e5a373ae500d74fed68cae37d06683e",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 113,
"avg_line_length": 42.54771784232365,
"alnum_prop": 0.5191144919055978,
"repo_name": "alisaifee/nosedbreport",
"id": "0ce357956bdfa235b539a9b15ef1ffcd0559fc34",
"size": "10254",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nosedbreport/mysql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27877"
},
{
"name": "Shell",
"bytes": "429"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
'fbcode_builder steps to build rsocket'
import specs.rsocket as rsocket
def fbcode_builder_spec(builder):
return {
'depends_on': [rsocket],
}
config = {
'github_project': 'rsocket/rsocket-cpp',
'fbcode_builder_spec': fbcode_builder_spec,
}
| {
"content_hash": "4171cce32b44a1d5e43da69ec58499ed",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 47,
"avg_line_length": 20.95,
"alnum_prop": 0.6968973747016707,
"repo_name": "ReactiveSocket/reactivesocket-cpp",
"id": "85018bf0521977dca38056defad8e68e66d0c58b",
"size": "1039",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/fbcode_builder_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "798423"
},
{
"name": "CMake",
"bytes": "23342"
},
{
"name": "Python",
"bytes": "4565"
},
{
"name": "Shell",
"bytes": "3614"
}
],
"symlink_target": ""
} |
"""This example demonstrates how to handle policy violation errors.
To get ad groups, run get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: AdGroupAdService.mutate
"""
__author__ = 'Joseph DiLallo'
import re
import suds
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
ad_group_ad_service = client.GetService('AdGroupAdService', 'v201502')
# Create text ad.
text_ad_operation = {
'operator': 'ADD',
'operand': {
'adGroupId': ad_group_id,
'ad': {
# The 'xsi_type' field allows you to specify the xsi:type of the
# object being created. It's only necessary when you must provide
# an explicit type that the client library can't infer.
'xsi_type': 'TextAd',
'headline': 'Mars Cruise!!!',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'finalUrls': ['http://www.example.com'],
'displayUrl': 'www.example.com',
}
}
}
operations = [text_ad_operation]
# Validate the ad.
try:
# Enable "validate only" to check for errors.
client.validate_only = True
ad_group_ad_service.mutate(operations)
print 'Validation successful, no errors returned.'
except suds.WebFault, e:
for error in e.fault.detail.ApiExceptionFault.errors:
if error['ApiError.Type'] == 'PolicyViolationError':
operation_index = re.findall(r'operations\[(.*)\]\.',
error['fieldPath'])
if operation_index:
operation = operations[int(operation_index[0])]
print ('Ad with headline \'%s\' violated %s policy \'%s\'.' %
(operation['operand']['ad']['headline'],
'exemptable' if error['isExemptable'] else 'non-exemptable',
error['externalPolicyName']))
if error['isExemptable'].lower() == 'true':
# Add exemption request to the operation.
print ('Adding exemption request for policy name \'%s\' on text '
'\'%s\'.' %
(error['key']['policyName'], error['key']['violatingText']))
if 'exemptionRequests' not in operation:
operation['exemptionRequests'] = []
operation['exemptionRequests'].append({
'key': error['key']
})
else:
# Remove non-exemptable operation
print 'Removing the operation from the request.'
operations.delete(operation)
else:
# Non-policy error returned, re-throw exception.
raise e
# Add these ads. Disable "validate only" so the ads will get created.
client.validate_only = False
if operations:
response = ad_group_ad_service.mutate(operations)
if response and response['value']:
ads = response['value']
print 'Added %s ad(s) to ad group %s.' % (len(ads), ad_group_id)
for ad in ads:
print (' Ad id is %s, type is %s and status is \'%s\'.' %
(ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
print 'No ads were added.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
| {
"content_hash": "2ca85349a3bfb3d5254b3dd1451a27e1",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 79,
"avg_line_length": 35.554455445544555,
"alnum_prop": 0.5953773322194374,
"repo_name": "coxmediagroup/googleads-python-lib",
"id": "2a5a390d3a49d4ca29c10f28302b109cd62b77d0",
"size": "4209",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/adwords/v201502/error_handling/handle_policy_violation_error.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2535137"
}
],
"symlink_target": ""
} |
from tempest.api.compute import base
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class MultipleCreateNegativeTestJSON(base.BaseV2ComputeTest):
@decorators.attr(type=['negative'])
@decorators.idempotent_id('daf29d8d-e928-4a01-9a8c-b129603f3fc0')
def test_min_count_less_than_one(self):
# Creating server with min_count=0 should fail.
invalid_min_count = 0
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
min_count=invalid_min_count)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('999aa722-d624-4423-b813-0d1ac9884d7a')
def test_min_count_non_integer(self):
# Creating server with non-integer min_count should fail.
invalid_min_count = 2.5
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
min_count=invalid_min_count)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a6f9c2ab-e060-4b82-b23c-4532cb9390ff')
def test_max_count_less_than_one(self):
# Creating server with max_count < 1 shoudld fail.
invalid_max_count = 0
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
max_count=invalid_max_count)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9c5698d1-d7af-4c80-b971-9d403135eea2')
def test_max_count_non_integer(self):
# Creating server with non-integer max_count should fail.
invalid_max_count = 2.5
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
max_count=invalid_max_count)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('476da616-f1ef-4271-a9b1-b9fc87727cdf')
def test_max_count_less_than_min_count(self):
# Creating server with max_count less than min_count should fail.
min_count = 3
max_count = 2
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
min_count=min_count,
max_count=max_count)
| {
"content_hash": "ebe8c655643b79bc2e14d2c5f07d9723",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 44.104166666666664,
"alnum_prop": 0.6589513462446859,
"repo_name": "cisco-openstack/tempest",
"id": "6bdf83bb69f95a8e5a5bb7cf15b56e820dd19765",
"size": "2741",
"binary": false,
"copies": "1",
"ref": "refs/heads/proposed",
"path": "tempest/api/compute/servers/test_multiple_create_negative.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4431271"
},
{
"name": "Shell",
"bytes": "7435"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.