text stringlengths 4 1.02M | meta dict |
|---|---|
__author__ = 'kotaimen'
__date__ = '4/4/15'
import unittest
import time
import io
import re
from stonemason.util.timer import Timer, human_duration
class TestTimer(unittest.TestCase):
def test_human_duration(self):
self.assertEqual(human_duration(0.00123), '1.23ms')
self.assertEqual(human_duration(1.23), '1.2300s')
self.assertEqual(human_duration(123), '2.05m')
self.assertEqual(human_duration(12345), '3h25.75m')
def test_timer_message(self):
timer = Timer(message='Time: %(time)s')
timer.tic()
time.sleep(1)
timer.tac()
message = timer.get_message()
self.assertIsNotNone(re.match(r'Time: [01]\.\d+s', message))
self.assertLess(abs(timer.get_time() - 1), 0.5)
def test_timer_with(self):
class Writer(object):
def __init__(self):
self.message = ''
def __call__(self, message):
self.message += message
writer = Writer()
with Timer(writer=writer):
time.sleep(1)
self.assertIsNotNone(
re.match(r'Time taken: [01]\.\d+s', writer.message))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "1eb2d0907fb365439e31ea3cdc572a3b",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 68,
"avg_line_length": 25.208333333333332,
"alnum_prop": 0.5768595041322314,
"repo_name": "Kotaimen/stonemason",
"id": "9de4485e83c07f955becbe2e8698c9dc95e157b4",
"size": "1237",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/util/test_timer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "188442"
},
{
"name": "Python",
"bytes": "448406"
}
],
"symlink_target": ""
} |
import os, sys
import datetime
import iris
import iris.unit as unit
import iris.analysis.cartography
import numpy as np
from iris.coord_categorisation import add_categorised_coord
diag = 'avg.5216'
cube_name_explicit='stratiform_rainfall_rate'
cube_name_param='convective_rainfall_rate'
pp_file_path='/projects/cascade/pwille/moose_retrievals/'
experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzns', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ]
#experiment_ids = [ 'dklwu', 'dklzq', 'dklyu', 'dkmbq', 'dkbhu', 'djznu', 'dkhgu', 'djzns' ]
#experiment_ids = ['djznu', 'dkhgu' ] # High Res
#experiment_ids = ['djznw', 'djzny', 'djznq', 'dkjxq']
#experiment_ids = ['djznw', 'djzny', 'djznq', 'dkmbq', 'dklzq', 'dkjxq' ] # Params
# Load global LAM
dtmindt = datetime.datetime(2011,8,19,0,0,0)
dtmaxdt = datetime.datetime(2011,9,7,23,0,0)
dtmin = unit.date2num(dtmindt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD)
dtmax = unit.date2num(dtmaxdt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD)
time_constraint = iris.Constraint(time= lambda t: dtmin <= t.point <= dtmax)
# Min and max heights and lats/lons from smallest model domain (dkbhu) - see spreadsheet
height_min=600.
height_max=5000.
# Min and max lats lons from smallest model domain (dkbhu) - see spreadsheet
latmin=8
latmax=21
lonmin=72
lonmax=77
lat_constraint=iris.Constraint(grid_latitude= lambda la: latmin <= la.point <= latmax)
lon_constraint=iris.Constraint(grid_longitude= lambda lo: lonmin <= lo.point <= lonmax)
fg = '%sdjzn/djznw/%s.pp' % (pp_file_path, diag)
glob_load = iris.load_cube(fg, ('%s' % cube_name_param) & time_constraint)
## Get time points from global LAM to use as time constraint when loading other runs
time_list = glob_load.coord('time').points
glob_tc = iris.Constraint(time=time_list)
del glob_load
def unrotate_pole_update_cube(cube):
lat = cube.coord('grid_latitude').points
lon = cube.coord('grid_longitude').points
cs = cube.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print ' %s - %s - Unrotate pole %s' % (diag, experiment_id, cs)
lons, lats = np.meshgrid(lon, lat)
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
for i, coord in enumerate (cube.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_cube = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_cube = i
csur=cs.ellipsoid
cube.remove_coord('grid_latitude')
cube.remove_coord('grid_longitude')
cube.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord_cube)
cube.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord_cube)
return cube
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
fu = '%s%s/%s/%s.pp' % (pp_file_path, expmin1, experiment_id, diag)
flsm = '%s%s/%s/30.pp' % (pp_file_path, expmin1, experiment_id)
print experiment_id
sys.stdout.flush()
try:
#cube_names = ['%s' % cube_name_param, '%s' % cube_name_explicit]
cubeconv = iris.load_cube(fu,'%s' % cube_name_param & glob_tc)
cubeconv= unrotate_pole_update_cube(cubeconv)
cubestrat = iris.load_cube(fu,'%s' % cube_name_explicit & glob_tc)
cubestrat= unrotate_pole_update_cube(cubestrat)
print cubestrat
cube=cubeconv.extract(lat_constraint & lon_constraint) + cubestrat.extract(lat_constraint & lon_constraint)
cube.rename('total_precipitation_rate')
except iris.exceptions.ConstraintMismatchError:
cube = iris.load_cube(fu, ('%s' % cube_name_explicit) & glob_tc)
cube= unrotate_pole_update_cube(cube)
cube = cube.extract(lat_constraint & lon_constraint)
# Mean at each grid point by hour of day and save
add_categorised_coord(cube, 'hour', 'time',lambda coord, x: coord.units.num2date(x).hour)
diurnal_mean_cube = cube.aggregated_by('hour', iris.analysis.MEAN)
del cube
#try:
# iris.save(diurnal_mean_cube, '%s%s/%s/%s_rainfall_hourly_mean.pp' % (pp_file_path, expmin1, experiment_id, diag))
#except Exception, e:
# print e
# pass
# Load land/sea mask
lsm = iris.load_cube(flsm, ('land_binary_mask' ) )
lsm = unrotate_pole_update_cube(lsm)
lsm=lsm.extract(lat_constraint & lon_constraint)
print lsm
oro = iris.load_cube(floro, ('surface_altitude' ))
oro = unrotate_pole_update_cube(oro)
oro=oro.extract(lat_constraint & lon_constraint)
heights= oro.slices(['grid_latitude', 'grid_longitude']).next().data
sys.stdout.flush()
# For Sea and Land, mask area and calculate mean of each hour for sea/land and SAVE as numpy array
tdmc= diurnal_mean_cube.collapsed(['grid_latitude', 'grid_longitude'], iris.analysis.MEAN)
total_diurnal_mean_cube=[tdmc.data.data, diurnal_mean_cube.coord('hour').points+0.5]
print total_diurnal_mean_cube
np.save('%s%s/%s/%s_total_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), total_diurnal_mean_cube)
for s in ([0,1]):
#nancube = np.where(lsm.data==s, diurnal_mean_cube.data, np.NaN)
nancube = np.where((lsm.data==s) & (heights>=height_min) & (heights<=height_max), diurnal_mean_cube.data, np.NaN)
maskedcube = np.ma.masked_array(nancube,np.isnan(nancube))
total_rainfall = np.mean(maskedcube.reshape(maskedcube.shape[0], (maskedcube.shape[1]*maskedcube.shape[2])), axis=1)
trnp =[total_rainfall.data, diurnal_mean_cube.coord('hour').points+0.5]
if s == 0:
# Areas of ocean
print total_rainfall
np.save('%s%s/%s/%s_sea_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), trnp)
#np.save('%s%s/%s/%s_sea_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s_MASKED_ARRAY' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), maskedcube)
if s == 1:
# Areas of land
np.save('%s%s/%s/%s_land_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), trnp)
#np.save('%s%s/%s/%s_land_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s_MASKED_ARRAY' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), maskedcube)
del lsm
| {
"content_hash": "6ed5587cb03d8e57c7e14ce2cb98c704",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 197,
"avg_line_length": 43.079754601226995,
"alnum_prop": 0.655511250356024,
"repo_name": "peterwilletts24/Monsoon-Python-Scripts",
"id": "bea9f1c5cc8d07c99ab5b03e1a768d7aaf5f5134",
"size": "7022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rain/land_sea_diurnal/rain_mask_save_lat_lon_western_ghats_over_600m.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "576592"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import HttpResponseError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SparkBatchOperations:
"""SparkBatchOperations async operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
workspace_name: str,
spark_pool_name: str,
from_parameter: Optional[int] = None,
size: Optional[int] = None,
detailed: Optional[bool] = None,
**kwargs
) -> "models.ExtendedLivyListBatchResponse":
"""List all spark batch jobs which are running under a particular spark pool.
:param workspace_name: The name of the workspace to execute operations on.
:type workspace_name: str
:param spark_pool_name: Name of the spark pool. "ondemand" targets the ondemand pool.
:type spark_pool_name: str
:param from_parameter: Optional param specifying which index the list should begin from.
:type from_parameter: int
:param size: Optional param specifying the size of the returned list.
By default it is 20 and that is the maximum.
:type size: int
:param detailed: Optional query param specifying whether detailed response is returned beyond
plain livy.
:type detailed: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExtendedLivyListBatchResponse or the result of cls(response)
:rtype: ~azure.synapse.models.ExtendedLivyListBatchResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls: ClsType["models.ExtendedLivyListBatchResponse"] = kwargs.pop('cls', None)
error_map = kwargs.pop('error_map', {})
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', skip_quote=True),
'SynapseDnsSuffix': self._serialize.url("self._config.synapse_dns_suffix", self._config.synapse_dns_suffix, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("spark_pool_name", spark_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters: Dict[str, Any] = {}
if from_parameter is not None:
query_parameters['from'] = self._serialize.query("from_parameter", from_parameter, 'int')
if size is not None:
query_parameters['size'] = self._serialize.query("size", size, 'int')
if detailed is not None:
query_parameters['detailed'] = self._serialize.query("detailed", detailed, 'bool')
# Construct headers
header_parameters: Dict[str, Any] = {}
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('ExtendedLivyListBatchResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/livyApi/versions/{livyApiVersion}/sparkPools/{sparkPoolName}/batches'}
async def create(
self,
workspace_name: str,
spark_pool_name: str,
livy_request: "models.ExtendedLivyBatchRequest",
detailed: Optional[bool] = None,
**kwargs
) -> "models.ExtendedLivyBatchResponse":
"""Create new spark batch job.
:param workspace_name: The name of the workspace to execute operations on.
:type workspace_name: str
:param spark_pool_name: Name of the spark pool. "ondemand" targets the ondemand pool.
:type spark_pool_name: str
:param livy_request: Livy compatible batch job request payload.
:type livy_request: ~azure.synapse.models.ExtendedLivyBatchRequest
:param detailed: Optional query param specifying whether detailed response is returned beyond
plain livy.
:type detailed: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExtendedLivyBatchResponse or the result of cls(response)
:rtype: ~azure.synapse.models.ExtendedLivyBatchResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls: ClsType["models.ExtendedLivyBatchResponse"] = kwargs.pop('cls', None)
error_map = kwargs.pop('error_map', {})
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', skip_quote=True),
'SynapseDnsSuffix': self._serialize.url("self._config.synapse_dns_suffix", self._config.synapse_dns_suffix, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("spark_pool_name", spark_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters: Dict[str, Any] = {}
if detailed is not None:
query_parameters['detailed'] = self._serialize.query("detailed", detailed, 'bool')
# Construct headers
header_parameters: Dict[str, Any] = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json'
# Construct body
body_content = self._serialize.body(livy_request, 'ExtendedLivyBatchRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('ExtendedLivyBatchResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/livyApi/versions/{livyApiVersion}/sparkPools/{sparkPoolName}/batches'}
async def get(
self,
workspace_name: str,
spark_pool_name: str,
batch_id: int,
detailed: Optional[bool] = None,
**kwargs
) -> "models.ExtendedLivyBatchResponse":
"""Gets a single spark batch job.
:param workspace_name: The name of the workspace to execute operations on.
:type workspace_name: str
:param spark_pool_name: Name of the spark pool. "ondemand" targets the ondemand pool.
:type spark_pool_name: str
:param batch_id: Identifier for the batch job.
:type batch_id: int
:param detailed: Optional query param specifying whether detailed response is returned beyond
plain livy.
:type detailed: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExtendedLivyBatchResponse or the result of cls(response)
:rtype: ~azure.synapse.models.ExtendedLivyBatchResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls: ClsType["models.ExtendedLivyBatchResponse"] = kwargs.pop('cls', None)
error_map = kwargs.pop('error_map', {})
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', skip_quote=True),
'SynapseDnsSuffix': self._serialize.url("self._config.synapse_dns_suffix", self._config.synapse_dns_suffix, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("spark_pool_name", spark_pool_name, 'str'),
'batchId': self._serialize.url("batch_id", batch_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters: Dict[str, Any] = {}
if detailed is not None:
query_parameters['detailed'] = self._serialize.query("detailed", detailed, 'bool')
# Construct headers
header_parameters: Dict[str, Any] = {}
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize('ExtendedLivyBatchResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/livyApi/versions/{livyApiVersion}/sparkPools/{sparkPoolName}/batches/{batchId}'}
async def delete(
self,
workspace_name: str,
spark_pool_name: str,
batch_id: int,
**kwargs
) -> None:
"""Cancels a running spark batch job.
:param workspace_name: The name of the workspace to execute operations on.
:type workspace_name: str
:param spark_pool_name: Name of the spark pool. "ondemand" targets the ondemand pool.
:type spark_pool_name: str
:param batch_id: Identifier for the batch job.
:type batch_id: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls: ClsType[None] = kwargs.pop('cls', None)
error_map = kwargs.pop('error_map', {})
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', skip_quote=True),
'SynapseDnsSuffix': self._serialize.url("self._config.synapse_dns_suffix", self._config.synapse_dns_suffix, 'str', skip_quote=True),
'livyApiVersion': self._serialize.url("self._config.livy_api_version", self._config.livy_api_version, 'str', skip_quote=True),
'sparkPoolName': self._serialize.url("spark_pool_name", spark_pool_name, 'str'),
'batchId': self._serialize.url("batch_id", batch_id, 'int'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters: Dict[str, Any] = {}
# Construct headers
header_parameters: Dict[str, Any] = {}
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/livyApi/versions/{livyApiVersion}/sparkPools/{sparkPoolName}/batches/{batchId}'}
| {
"content_hash": "a47a83a4df50a14ac65344222f015fb5",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 144,
"avg_line_length": 46.37152777777778,
"alnum_prop": 0.6551104455260203,
"repo_name": "Azure/azure-sdk-for-python",
"id": "f2e3e19342d8e18416e55cb8a12b33813e2e7414",
"size": "13822",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/synapse/azure-synapse/azure/synapse/aio/operations_async/_spark_batch_operations_async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
Complex data model example modeling stackoverflow-like data.
It is used to showcase several key features of elasticsearch-dsl:
* Object and Nested fields: see User and Comment classes and fields they
are used in
* method add_comment is used to add comments
* Parent/Child relationship
* See the Join field on Post creating the relationship between Question
and Answer
* Meta.matches allows the hits from same index to be wrapped in proper
classes
* to see how child objects are created see Question.add_answer
* Question.search_answers shows how to query for children of a
particular parent
"""
from datetime import datetime
from elasticsearch_dsl import (
Boolean,
Date,
Document,
InnerDoc,
Join,
Keyword,
Long,
Nested,
Object,
Text,
connections,
)
class User(InnerDoc):
"""
Class used to represent a denormalized user stored on other objects.
"""
id = Long(required=True)
signed_up = Date()
username = Text(fields={"keyword": Keyword()}, required=True)
email = Text(fields={"keyword": Keyword()})
location = Text(fields={"keyword": Keyword()})
class Comment(InnerDoc):
"""
Class wrapper for nested comment objects.
"""
author = Object(User, required=True)
created = Date(required=True)
content = Text(required=True)
class Post(Document):
"""
Base class for Question and Answer containing the common fields.
"""
author = Object(User, required=True)
created = Date(required=True)
body = Text(required=True)
comments = Nested(Comment)
question_answer = Join(relations={"question": "answer"})
@classmethod
def _matches(cls, hit):
# Post is an abstract class, make sure it never gets used for
# deserialization
return False
class Index:
name = "test-qa-site"
settings = {
"number_of_shards": 1,
"number_of_replicas": 0,
}
def add_comment(self, user, content, created=None, commit=True):
c = Comment(author=user, content=content, created=created or datetime.now())
self.comments.append(c)
if commit:
self.save()
return c
def save(self, **kwargs):
# if there is no date, use now
if self.created is None:
self.created = datetime.now()
return super().save(**kwargs)
class Question(Post):
# use multi True so that .tags will return empty list if not present
tags = Keyword(multi=True)
title = Text(fields={"keyword": Keyword()})
@classmethod
def _matches(cls, hit):
"""Use Question class for parent documents"""
return hit["_source"]["question_answer"] == "question"
@classmethod
def search(cls, **kwargs):
return cls._index.search(**kwargs).filter("term", question_answer="question")
def add_answer(self, user, body, created=None, accepted=False, commit=True):
answer = Answer(
# required make sure the answer is stored in the same shard
_routing=self.meta.id,
# since we don't have explicit index, ensure same index as self
_index=self.meta.index,
# set up the parent/child mapping
question_answer={"name": "answer", "parent": self.meta.id},
# pass in the field values
author=user,
created=created,
body=body,
accepted=accepted,
)
if commit:
answer.save()
return answer
def search_answers(self):
# search only our index
s = Answer.search()
# filter for answers belonging to us
s = s.filter("parent_id", type="answer", id=self.meta.id)
# add routing to only go to specific shard
s = s.params(routing=self.meta.id)
return s
def get_answers(self):
"""
Get answers either from inner_hits already present or by searching
elasticsearch.
"""
if "inner_hits" in self.meta and "answer" in self.meta.inner_hits:
return self.meta.inner_hits.answer.hits
return list(self.search_answers())
def save(self, **kwargs):
self.question_answer = "question"
return super().save(**kwargs)
class Answer(Post):
is_accepted = Boolean()
@classmethod
def _matches(cls, hit):
"""Use Answer class for child documents with child name 'answer'"""
return (
isinstance(hit["_source"]["question_answer"], dict)
and hit["_source"]["question_answer"].get("name") == "answer"
)
@classmethod
def search(cls, **kwargs):
return cls._index.search(**kwargs).exclude("term", question_answer="question")
@property
def question(self):
# cache question in self.meta
# any attributes set on self would be interpretted as fields
if "question" not in self.meta:
self.meta.question = Question.get(
id=self.question_answer.parent, index=self.meta.index
)
return self.meta.question
def save(self, **kwargs):
# set routing to parents id automatically
self.meta.routing = self.question_answer.parent
return super().save(**kwargs)
def setup():
"""Create an IndexTemplate and save it into elasticsearch."""
index_template = Post._index.as_template("base")
index_template.save()
if __name__ == "__main__":
# initiate the default connection to elasticsearch
connections.create_connection()
# create index
setup()
# user objects to use
nick = User(
id=47,
signed_up=datetime(2017, 4, 3),
username="fxdgear",
email="nick.lang@elastic.co",
location="Colorado",
)
honza = User(
id=42,
signed_up=datetime(2013, 4, 3),
username="honzakral",
email="honza@elastic.co",
location="Prague",
)
# create a question object
question = Question(
_id=1,
author=nick,
tags=["elasticsearch", "python"],
title="How do I use elasticsearch from Python?",
body="""
I want to use elasticsearch, how do I do it from Python?
""",
)
question.save()
answer = question.add_answer(honza, "Just use `elasticsearch-py`!")
| {
"content_hash": "8e665b09210d7f7caa79cae083c1ce81",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 86,
"avg_line_length": 28.18421052631579,
"alnum_prop": 0.6034858387799564,
"repo_name": "elastic/elasticsearch-dsl-py",
"id": "df832d650dd3dc5cbf08b8011cc79d52ac6ce9f1",
"size": "7214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/parent_child.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "423092"
}
],
"symlink_target": ""
} |
import os
from .base import * # noqa
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'docs',
'USER': 'postgres', # Not used with sqlite3.
'PASSWORD': '',
'HOST': '10.177.73.97',
'PORT': '',
}
}
DEBUG = False
TEMPLATE_DEBUG = False
CELERY_ALWAYS_EAGER = False
MEDIA_URL = 'https://media.readthedocs.org/'
STATIC_URL = 'https://media.readthedocs.org/static/'
ADMIN_MEDIA_PREFIX = MEDIA_URL + 'admin/'
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://odin:8983/solr',
}
}
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': 'localhost:6379',
'PREFIX': 'docs',
'OPTIONS': {
'DB': 1,
'PARSER_CLASS': 'redis.connection.HiredisParser'
},
},
}
# Elasticsearch settings.
ES_HOSTS = ['backup:9200', 'db:9200']
ES_DEFAULT_NUM_REPLICAS = 1
ES_DEFAULT_NUM_SHARDS = 5
SLUMBER_API_HOST = 'https://readthedocs.org'
WEBSOCKET_HOST = 'websocket.readthedocs.org:8088'
PRODUCTION_DOMAIN = 'readthedocs.org'
USE_SUBDOMAIN = True
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# Lock builds for 10 minutes
REPO_LOCK_SECONDS = 300
# Don't re-confirm existing accounts
ACCOUNT_EMAIL_VERIFICATION = 'none'
FILE_SYNCER = 'privacy.backends.syncers.DoubleRemotePuller'
# set GitHub scope
SOCIALACCOUNT_PROVIDERS = {
'github': {'SCOPE': ['user:email', 'read:org', 'admin:repo_hook', 'repo:status']}
}
# allauth settings
ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'
if not os.environ.get('DJANGO_SETTINGS_SKIP_LOCAL', False):
try:
from local_settings import * # noqa
except ImportError:
pass
| {
"content_hash": "b1ba84ad7a1d3225953f0104f0663c79",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 85,
"avg_line_length": 23.756410256410255,
"alnum_prop": 0.6341068537506745,
"repo_name": "espdev/readthedocs.org",
"id": "236a02044afff8edf35e635ed2bdae4edc2101db",
"size": "1853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/settings/postgres.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "65340"
},
{
"name": "HTML",
"bytes": "216474"
},
{
"name": "JavaScript",
"bytes": "1437755"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Nginx",
"bytes": "891"
},
{
"name": "Perl",
"bytes": "6814"
},
{
"name": "Python",
"bytes": "1452456"
},
{
"name": "Shell",
"bytes": "1501"
}
],
"symlink_target": ""
} |
import functools
import importlib
import inspect
import threading
from json.decoder import JSONDecoder
from json.encoder import JSONEncoder
from pants.build_graph.address import Address
from pants.engine.internals.objects import Resolvable, Serializable
from pants.engine.internals.parser import ParseError, Parser
from pants.util.memo import memoized, memoized_property
from pants.util.strutil import ensure_text
@memoized
def _import(typename):
modulename, _, symbolname = typename.rpartition(".")
if not modulename:
raise ParseError(f"Expected a fully qualified type name, given {typename}")
try:
mod = importlib.import_module(modulename)
try:
return getattr(mod, symbolname)
except AttributeError:
raise ParseError(
"The symbol {} was not found in module {} when attempting to convert "
"type name {}".format(symbolname, modulename, typename)
)
except ImportError as e:
raise ParseError(
"Failed to import type name {} from module {}: {}".format(typename, modulename, e)
)
class JsonParser(Parser):
def __init__(self, symbol_table):
super().__init__()
self.symbol_table = symbol_table
def _as_type(self, type_or_name):
return _import(type_or_name) if isinstance(type_or_name, str) else type_or_name
@staticmethod
def _object_decoder(obj, symbol_table):
# A magic field will indicate type and this can be used to wrap the object in a type.
type_alias = obj.get("type_alias", None)
if not type_alias:
return obj
else:
symbol = symbol_table(type_alias)
return symbol(**obj)
@memoized_property
def _decoder(self):
symbol_table = self.symbol_table.table
decoder = functools.partial(
self._object_decoder,
symbol_table=symbol_table.__getitem__ if symbol_table else self._as_type,
)
return JSONDecoder(object_hook=decoder, strict=True)
def parse(self, filepath, filecontent, _extra_symbols):
"""Parse the given json encoded string into a list of top-level objects found.
The parser accepts both blank lines and comment lines (those beginning with optional whitespace
followed by the '#' character) as well as more than one top-level JSON object.
The parse also supports a simple protocol for serialized types that have an `_asdict` method.
This includes `namedtuple` subtypes as well as any custom class with an `_asdict` method defined;
see :class:`pants.engine.serializable.Serializable`.
"""
json = ensure_text(filecontent)
decoder = self._decoder
# Strip comment lines and blank lines, which we allow, but preserve enough information about the
# stripping to constitute a reasonable error message that can be used to find the portion of the
# JSON document containing the error.
def non_comment_line(l):
stripped = l.lstrip()
return stripped if (stripped and not stripped.startswith("#")) else None
offset = 0
objects = []
while True:
lines = json[offset:].splitlines()
if not lines:
break
# Strip whitespace and comment lines preceding the next JSON object.
while True:
line = non_comment_line(lines[0])
if not line:
comment_line = lines.pop(0)
offset += len(comment_line) + 1
elif line.startswith("{") or line.startswith("["):
# Account for leading space in this line that starts off the JSON object.
offset += len(lines[0]) - len(line)
break
else:
raise ParseError(f"Unexpected json line:\n{lines[0]}")
lines = json[offset:].splitlines()
if not lines:
break
# Prepare the JSON blob for parsing - strip blank and comment lines recording enough information
# To reconstitute original offsets after the parse.
comment_lines = []
non_comment_lines = []
for line_number, line in enumerate(lines):
if non_comment_line(line):
non_comment_lines.append(line)
else:
comment_lines.append((line_number, line))
data = "\n".join(non_comment_lines)
try:
obj, idx = decoder.raw_decode(data)
objects.append(obj)
if idx >= len(data):
break
offset += idx
# Add back in any parsed blank or comment line offsets.
parsed_line_count = len(data[:idx].splitlines())
for line_number, line in comment_lines:
if line_number >= parsed_line_count:
break
offset += len(line) + 1
parsed_line_count += 1
except ValueError as e:
json_lines = data.splitlines()
col_width = len(str(len(json_lines)))
col_padding = " " * col_width
def format_line(line):
return f"{col_padding} {line}"
header_lines = [format_line(line) for line in json[:offset].splitlines()]
formatted_json_lines = [
(
"{line_number:{col_width}}: {line}".format(
col_width=col_width, line_number=line_number, line=line
)
)
for line_number, line in enumerate(json_lines, start=1)
]
for line_number, line in comment_lines:
formatted_json_lines.insert(line_number, format_line(line))
raise ParseError(
"{error}\nIn document at {filepath}:\n{json_data}".format(
error=e,
filepath=filepath,
json_data="\n".join(header_lines + formatted_json_lines),
)
)
return objects
def _object_encoder(obj, inline):
if isinstance(obj, Resolvable):
return obj.resolve() if inline else obj.address
if isinstance(obj, Address):
return obj.reference()
if not Serializable.is_serializable(obj):
raise ParseError(
"Can only encode Serializable objects in JSON, given {!r} of type {}".format(
obj, type(obj).__name__
)
)
encoded = obj._asdict()
if "type_alias" not in encoded:
encoded = encoded.copy()
encoded["type_alias"] = f"{inspect.getmodule(obj).__name__}.{type(obj).__name__}"
return {k: v for k, v in encoded.items() if v}
def encode_json(obj, inline=False, **kwargs):
"""Encode the given object as json.
Supports objects that follow the `_asdict` protocol. See `parse_json` for more information.
:param obj: A serializable object.
:param bool inline: `True` to inline all resolvable objects as nested JSON objects, `False` to
serialize those objects' addresses instead; `False` by default.
:param **kwargs: Any kwargs accepted by :class:`json.JSONEncoder` besides `encoding` and
`default`.
:returns: A UTF-8 json encoded blob representing the object.
:rtype: string
:raises: :class:`ParseError` if there were any problems encoding the given `obj` in json.
"""
encoder = JSONEncoder(default=functools.partial(_object_encoder, inline=inline), **kwargs)
return encoder.encode(obj)
class PythonAssignmentsParser(Parser):
"""A parser that parses the given python code into a list of top-level objects found.
Only Serializable objects assigned to top-level variables will be collected and returned. These
objects will be addressable via their top-level variable names in the parsed namespace.
"""
def __init__(self, symbol_table):
super().__init__()
self.symbol_table = symbol_table
@memoized_property
def _globals(self):
def aliased(type_alias, object_type, **kwargs):
return object_type(type_alias=type_alias, **kwargs)
parse_globals = {}
for alias, symbol in self.symbol_table.table.items():
parse_globals[alias] = functools.partial(aliased, alias, symbol)
return parse_globals
def parse(self, filepath, filecontent, _extra_symbols):
parse_globals = self._globals
python = filecontent
symbols = {}
exec(python, parse_globals, symbols)
objects = []
for name, obj in symbols.items():
if isinstance(obj, type):
# Allow type imports
continue
if not Serializable.is_serializable(obj):
raise ParseError(f"Found a non-serializable top-level object: {obj}")
attributes = obj._asdict()
if "name" in attributes:
attributes = attributes.copy()
redundant_name = attributes.pop("name", None)
if redundant_name and redundant_name != name:
raise ParseError(
"The object named {!r} is assigned to a mismatching name {!r}".format(
redundant_name, name
)
)
obj_type = type(obj)
named_obj = obj_type(name=name, **attributes)
objects.append(named_obj)
return objects
class PythonCallbacksParser(Parser):
"""A parser that parses the given python code into a list of top-level objects found.
Only Serializable objects with `name`s will be collected and returned. These objects will be
addressable via their name in the parsed namespace.
"""
def __init__(self, symbol_table):
super().__init__()
self.symbol_table = symbol_table
self._lock = threading.Lock()
@memoized_property
def _globals(self):
objects = []
def registered(type_name, object_type, name=None, **kwargs):
if name:
obj = object_type(name=name, type_alias=type_name, **kwargs)
if Serializable.is_serializable(obj):
objects.append(obj)
return obj
else:
return object_type(type_alias=type_name, **kwargs)
parse_globals = {}
for alias, symbol in self.symbol_table.table.items():
parse_globals[alias] = functools.partial(registered, alias, symbol)
return objects, parse_globals
def parse(self, filepath, filecontent, _extra_symbols):
objects, parse_globals = self._globals
python = filecontent
with self._lock:
del objects[:]
exec(python, parse_globals, {})
return list(objects)
| {
"content_hash": "54266ab7a4b755a41a8aa77204113fa7",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 108,
"avg_line_length": 37.826530612244895,
"alnum_prop": 0.5793543746066001,
"repo_name": "tdyas/pants",
"id": "987a8390b3155f96d8844de8a113ed0f1c779450",
"size": "11253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/engine/internals/examples/parsers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5596"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "518180"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7955590"
},
{
"name": "Rust",
"bytes": "1031208"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "109904"
},
{
"name": "Starlark",
"bytes": "502255"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
from . import register
from spirit.models.comment_like import CommentLike
from spirit.forms.comment_like import LikeForm
@register.inclusion_tag('spirit/comment_like/_form.html')
def render_like_form(comment, like, next=None):
form = LikeForm()
return {'form': form, 'comment_id': comment.pk, 'like': like, 'next': next}
@register.simple_tag()
def populate_likes(comments, user, to_attr='like'):
# TODO: use Prefetch on django 1.7, move this to CustomQuerySet.as_manager
likes = CommentLike.objects.filter(comment__in=comments, user=user)
likes_dict = {l.comment_id: l for l in likes}
for c in comments:
setattr(c, to_attr, likes_dict.get(c.pk))
return "" | {
"content_hash": "82d175a0c7ebbcf12727841717b55664",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 33.19047619047619,
"alnum_prop": 0.703012912482066,
"repo_name": "bjorncooley/rainforest_makers",
"id": "ddfb8c900d468f60e219270355d09626ef830f84",
"size": "721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spirit/templatetags/tags/comment_like.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "94665"
},
{
"name": "CoffeeScript",
"bytes": "54667"
},
{
"name": "JavaScript",
"bytes": "97637"
},
{
"name": "Python",
"bytes": "395030"
}
],
"symlink_target": ""
} |
from mamba import describe
with describe('Fixture#without_inner_contexts'):
def first_example():
pass
def second_example():
pass
def third_example():
pass
| {
"content_hash": "109635eb74507e0fb2030ce60d2664d3",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 48,
"avg_line_length": 17.636363636363637,
"alnum_prop": 0.6185567010309279,
"repo_name": "jaimegildesagredo/mamba",
"id": "1aaec85460b2feefeb9d1ec64e425c8ce414e434",
"size": "194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spec/fixtures/without_inner_contexts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51394"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import relatively if available to work before installing catkin or overlaying installed version
if os.path.exists(os.path.join('/opt/ros/groovy/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/groovy/share/catkin/cmake', '..', 'python'))
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/vsunder/ButlerBot/rosbuild_ws/src/intelligence/build/devel/env.sh')
output_filename = '/home/vsunder/ButlerBot/rosbuild_ws/src/intelligence/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| {
"content_hash": "bb800169ea6c41700b2d4930fb22eed1",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 111,
"avg_line_length": 44.8,
"alnum_prop": 0.7600446428571429,
"repo_name": "Boberito25/ButlerBot",
"id": "aebb6ba847e9ded20db02bf82e45c4e0e016a699",
"size": "896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rosbuild_ws/src/intelligence/build/catkin_generated/generate_cached_setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "16142"
},
{
"name": "C",
"bytes": "2213677"
},
{
"name": "C++",
"bytes": "3480597"
},
{
"name": "CSS",
"bytes": "5038"
},
{
"name": "Common Lisp",
"bytes": "78045"
},
{
"name": "FORTRAN",
"bytes": "1454013"
},
{
"name": "Java",
"bytes": "4099"
},
{
"name": "JavaScript",
"bytes": "7839"
},
{
"name": "Matlab",
"bytes": "8737"
},
{
"name": "Objective-C",
"bytes": "6505"
},
{
"name": "Python",
"bytes": "251594"
},
{
"name": "Shell",
"bytes": "58375"
},
{
"name": "TeX",
"bytes": "21642"
}
],
"symlink_target": ""
} |
__author__ = 'jonathan'
import test.nova._fixtures as models
from lib.rome.core.orm.query import Query
from lib.rome.core.orm.query import or_
current_milli_time = lambda: int(round(time.time() * 1000))
from threading import Thread
import time
instance_uuid = ""
class SelectorThread(Thread):
def __init__(self, instance_uuid):
Thread.__init__(self)
self.instance_uuid = instance_uuid
def run(self):
query = Query(models.InstanceInfoCache).filter_by(instance_uuid=instance_uuid)
query.first()
if __name__ == '__main__':
import logging
logging.getLogger().setLevel(logging.DEBUG)
n = 100000
one_info_cache = Query(models.InstanceInfoCache).first()
for i in range(0, n):
thread_1 = SelectorThread(one_info_cache.instance_uuid)
thread_1.start()
# thread_1.join()
time.sleep(10)
| {
"content_hash": "20efc2096fdc14ec2772dc5a40967488",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 86,
"avg_line_length": 23.05263157894737,
"alnum_prop": 0.6575342465753424,
"repo_name": "BeyondTheClouds/rome",
"id": "2a7758f7f4a4686360662291954cc7a4f760b71e",
"size": "876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/nova/benchmarks/bench_secondary_index_query.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "629714"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.SlugField(help_text='The key will be used to reference this campaign from your scripts.', unique=True, verbose_name='key')),
('name', models.CharField(max_length=255, verbose_name='name')),
('subject', models.CharField(help_text='May contain template variables.', max_length=255, verbose_name='e-mail subject')),
('prefix_subject', models.BooleanField(default=True, help_text='Wheter to prefix the subject with "None" or not.', verbose_name='prefix subject')),
('is_enabled', models.BooleanField(default=True, help_text="E-mails won't be sent for campaigns that are not enabled. Even if a script requests for sending. This is a way to turn off some campaigns temporarily without changing the source code.", verbose_name='enabled')),
('template_file', models.FileField(upload_to='mailing/templates', verbose_name='template file')),
],
options={
'verbose_name_plural': 'e-mail campaigns',
'verbose_name': 'e-mail campaign',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='CampaignMailHeader',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.SlugField(max_length=70, verbose_name='name')),
('value', models.TextField(validators=[django.core.validators.MaxLengthValidator(998)], verbose_name='value')),
('campaign', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='extra_headers', to='mailing.Campaign')),
],
options={
'verbose_name_plural': 'additional headers',
'verbose_name': 'additional header',
},
),
migrations.CreateModel(
name='Mail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.PositiveSmallIntegerField(choices=[(1, 'Pending'), (2, 'Sent'), (3, 'Canceled'), (4, 'Failure')], default=1, verbose_name='status')),
('scheduled_on', models.DateTimeField(default=django.utils.timezone.now, verbose_name='scheduled on')),
('sent_on', models.DateTimeField(blank=True, editable=False, null=True, verbose_name='sent on')),
('subject', models.CharField(max_length=255, verbose_name='subject')),
('html_body', models.TextField(verbose_name='html body')),
('text_body', models.TextField(blank=True, help_text='Leave blank to generate from html body.', verbose_name='text body')),
('failure_reason', models.TextField(blank=True, editable=False, verbose_name='failure reason')),
('campaign', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='mailing.Campaign', verbose_name='campaign')),
],
options={
'verbose_name_plural': 'e-mails',
'verbose_name': 'e-mail',
'ordering': ['-scheduled_on'],
},
),
migrations.CreateModel(
name='MailHeader',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.SlugField(max_length=70, verbose_name='name')),
('value', models.TextField(validators=[django.core.validators.MaxLengthValidator(998)], verbose_name='value')),
('mail', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='headers', to='mailing.Mail')),
],
options={
'verbose_name_plural': 'headers',
'verbose_name': 'header',
},
),
]
| {
"content_hash": "9e90ade7beec001fd8305723ae254c70",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 287,
"avg_line_length": 56.34177215189873,
"alnum_prop": 0.5924511345764997,
"repo_name": "Aladom/django-mailing",
"id": "d62660b5fa32162f6f7511805ddf638f592c6833",
"size": "4523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mailing/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1603"
},
{
"name": "Python",
"bytes": "81798"
}
],
"symlink_target": ""
} |
"""Questions about the core Python language.
https://docs.python.org/3.6/reference/index.html
"""
__all__ = ["compound_statements"]
| {
"content_hash": "508cccb09ea1fbf68a47f026ebb033f8",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 48,
"avg_line_length": 26.6,
"alnum_prop": 0.706766917293233,
"repo_name": "samuelfekete/Pythonometer",
"id": "527475d5f67f8ba65adcc3fdecb43f827a065dbf",
"size": "133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonometer/questions/language/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22516"
}
],
"symlink_target": ""
} |
import socket
import ssl
import json
class ConnectionManager:
def __init__(self):
self.verify_mode = ssl.CERT_REQUIRED
self.hostname = '127.0.0.1'
self.port = 2099
def getWrappedSocket(self, sock, hostname):
try:
# Get Context
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.verify_mode = self.verify_mode
wrappedSocket = context.wrap_socket(sock, server_hostname = hostname)
return wrappedSocket
except ImportError as e:
print json.dumps({"status" : "error", "ConnectionManager.getWrappedSocket" : str(e)})
exit(0)
def getSocket(self):
try:
# Create the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Create the SSL wrapper for the socket object
wrappedSocket = self.getWrappedSocket(sock, self.hostname)
wrappedSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
wrappedSocket.bind((self.hostname, self.port))
return wrappedSocket
except ImportError as e:
print json.dumps({"status" : "error", "ConnectionManager.createConnection" : str(e)})
exit(0) | {
"content_hash": "9f69247a635d22ec8af9fe1610b157c8",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 97,
"avg_line_length": 35.19444444444444,
"alnum_prop": 0.6132596685082873,
"repo_name": "lyubomir1993/AlohaServer",
"id": "b0253931e1620d88a52a97c38d171821618bb507",
"size": "1286",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ConnectionManager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "40"
},
{
"name": "HTML",
"bytes": "5144"
},
{
"name": "JavaScript",
"bytes": "2637"
},
{
"name": "Python",
"bytes": "71954"
}
],
"symlink_target": ""
} |
from iso_parser import ENInternationalStandardParser
from month_name_little_endian import ENMonthNameLittleEndianParser
from month_name_middle_endian import ENMonthNameMiddleEndianParser
from slash_format import ENSlashDateFormatParser
from time_expression import ENTimeExpressionParser | {
"content_hash": "73fac9b98540ab3a842d0148b7f15273",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 66,
"avg_line_length": 57.2,
"alnum_prop": 0.9020979020979021,
"repo_name": "wanasit/chrono-python",
"id": "f3f7e35f9f76e648d5a1cec3564cc36429364fd3",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chrono/parsers/en/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52458"
}
],
"symlink_target": ""
} |
import grpc
import qrl.generated.qrl_pb2 as qrl__pb2
class PublicAPIStub(object):
"""//////////////////////////
//////////////////////////
//////////////////////////
//// API ///////
//////////////////////////
//////////////////////////
//////////////////////////
This service describes the Public API used by clients (wallet/cli/etc)
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetNodeState = channel.unary_unary(
'/qrl.PublicAPI/GetNodeState',
request_serializer=qrl__pb2.GetNodeStateReq.SerializeToString,
response_deserializer=qrl__pb2.GetNodeStateResp.FromString,
)
self.GetKnownPeers = channel.unary_unary(
'/qrl.PublicAPI/GetKnownPeers',
request_serializer=qrl__pb2.GetKnownPeersReq.SerializeToString,
response_deserializer=qrl__pb2.GetKnownPeersResp.FromString,
)
self.GetPeersStat = channel.unary_unary(
'/qrl.PublicAPI/GetPeersStat',
request_serializer=qrl__pb2.GetPeersStatReq.SerializeToString,
response_deserializer=qrl__pb2.GetPeersStatResp.FromString,
)
self.GetStats = channel.unary_unary(
'/qrl.PublicAPI/GetStats',
request_serializer=qrl__pb2.GetStatsReq.SerializeToString,
response_deserializer=qrl__pb2.GetStatsResp.FromString,
)
self.GetAddressState = channel.unary_unary(
'/qrl.PublicAPI/GetAddressState',
request_serializer=qrl__pb2.GetAddressStateReq.SerializeToString,
response_deserializer=qrl__pb2.GetAddressStateResp.FromString,
)
self.GetObject = channel.unary_unary(
'/qrl.PublicAPI/GetObject',
request_serializer=qrl__pb2.GetObjectReq.SerializeToString,
response_deserializer=qrl__pb2.GetObjectResp.FromString,
)
self.GetLatestData = channel.unary_unary(
'/qrl.PublicAPI/GetLatestData',
request_serializer=qrl__pb2.GetLatestDataReq.SerializeToString,
response_deserializer=qrl__pb2.GetLatestDataResp.FromString,
)
self.PushTransaction = channel.unary_unary(
'/qrl.PublicAPI/PushTransaction',
request_serializer=qrl__pb2.PushTransactionReq.SerializeToString,
response_deserializer=qrl__pb2.PushTransactionResp.FromString,
)
self.TransferCoins = channel.unary_unary(
'/qrl.PublicAPI/TransferCoins',
request_serializer=qrl__pb2.TransferCoinsReq.SerializeToString,
response_deserializer=qrl__pb2.TransferCoinsResp.FromString,
)
self.ParseAddress = channel.unary_unary(
'/qrl.PublicAPI/ParseAddress',
request_serializer=qrl__pb2.ParseAddressReq.SerializeToString,
response_deserializer=qrl__pb2.ParseAddressResp.FromString,
)
self.GetAddressFromPK = channel.unary_unary(
'/qrl.PublicAPI/GetAddressFromPK',
request_serializer=qrl__pb2.GetAddressFromPKReq.SerializeToString,
response_deserializer=qrl__pb2.GetAddressFromPKResp.FromString,
)
self.GetMessageTxn = channel.unary_unary(
'/qrl.PublicAPI/GetMessageTxn',
request_serializer=qrl__pb2.MessageTxnReq.SerializeToString,
response_deserializer=qrl__pb2.TransferCoinsResp.FromString,
)
self.GetTokenTxn = channel.unary_unary(
'/qrl.PublicAPI/GetTokenTxn',
request_serializer=qrl__pb2.TokenTxnReq.SerializeToString,
response_deserializer=qrl__pb2.TransferCoinsResp.FromString,
)
self.GetTransferTokenTxn = channel.unary_unary(
'/qrl.PublicAPI/GetTransferTokenTxn',
request_serializer=qrl__pb2.TransferTokenTxnReq.SerializeToString,
response_deserializer=qrl__pb2.TransferCoinsResp.FromString,
)
self.GetSlaveTxn = channel.unary_unary(
'/qrl.PublicAPI/GetSlaveTxn',
request_serializer=qrl__pb2.SlaveTxnReq.SerializeToString,
response_deserializer=qrl__pb2.TransferCoinsResp.FromString,
)
self.GetTransactionsByAddress = channel.unary_unary(
'/qrl.PublicAPI/GetTransactionsByAddress',
request_serializer=qrl__pb2.GetTransactionsByAddressReq.SerializeToString,
response_deserializer=qrl__pb2.GetTransactionsByAddressResp.FromString,
)
self.GetTransaction = channel.unary_unary(
'/qrl.PublicAPI/GetTransaction',
request_serializer=qrl__pb2.GetTransactionReq.SerializeToString,
response_deserializer=qrl__pb2.GetTransactionResp.FromString,
)
self.GetBalance = channel.unary_unary(
'/qrl.PublicAPI/GetBalance',
request_serializer=qrl__pb2.GetBalanceReq.SerializeToString,
response_deserializer=qrl__pb2.GetBalanceResp.FromString,
)
self.GetOTS = channel.unary_unary(
'/qrl.PublicAPI/GetOTS',
request_serializer=qrl__pb2.GetOTSReq.SerializeToString,
response_deserializer=qrl__pb2.GetOTSResp.FromString,
)
self.GetHeight = channel.unary_unary(
'/qrl.PublicAPI/GetHeight',
request_serializer=qrl__pb2.GetHeightReq.SerializeToString,
response_deserializer=qrl__pb2.GetHeightResp.FromString,
)
self.GetBlock = channel.unary_unary(
'/qrl.PublicAPI/GetBlock',
request_serializer=qrl__pb2.GetBlockReq.SerializeToString,
response_deserializer=qrl__pb2.GetBlockResp.FromString,
)
self.GetBlockByNumber = channel.unary_unary(
'/qrl.PublicAPI/GetBlockByNumber',
request_serializer=qrl__pb2.GetBlockByNumberReq.SerializeToString,
response_deserializer=qrl__pb2.GetBlockByNumberResp.FromString,
)
class PublicAPIServicer(object):
"""//////////////////////////
//////////////////////////
//////////////////////////
//// API ///////
//////////////////////////
//////////////////////////
//////////////////////////
This service describes the Public API used by clients (wallet/cli/etc)
"""
def GetNodeState(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetKnownPeers(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPeersStat(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetStats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAddressState(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetObject(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetLatestData(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PushTransaction(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TransferCoins(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ParseAddress(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAddressFromPK(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetMessageTxn(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTokenTxn(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTransferTokenTxn(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSlaveTxn(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTransactionsByAddress(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTransaction(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBalance(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetOTS(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetHeight(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBlock(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBlockByNumber(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PublicAPIServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetNodeState': grpc.unary_unary_rpc_method_handler(
servicer.GetNodeState,
request_deserializer=qrl__pb2.GetNodeStateReq.FromString,
response_serializer=qrl__pb2.GetNodeStateResp.SerializeToString,
),
'GetKnownPeers': grpc.unary_unary_rpc_method_handler(
servicer.GetKnownPeers,
request_deserializer=qrl__pb2.GetKnownPeersReq.FromString,
response_serializer=qrl__pb2.GetKnownPeersResp.SerializeToString,
),
'GetPeersStat': grpc.unary_unary_rpc_method_handler(
servicer.GetPeersStat,
request_deserializer=qrl__pb2.GetPeersStatReq.FromString,
response_serializer=qrl__pb2.GetPeersStatResp.SerializeToString,
),
'GetStats': grpc.unary_unary_rpc_method_handler(
servicer.GetStats,
request_deserializer=qrl__pb2.GetStatsReq.FromString,
response_serializer=qrl__pb2.GetStatsResp.SerializeToString,
),
'GetAddressState': grpc.unary_unary_rpc_method_handler(
servicer.GetAddressState,
request_deserializer=qrl__pb2.GetAddressStateReq.FromString,
response_serializer=qrl__pb2.GetAddressStateResp.SerializeToString,
),
'GetObject': grpc.unary_unary_rpc_method_handler(
servicer.GetObject,
request_deserializer=qrl__pb2.GetObjectReq.FromString,
response_serializer=qrl__pb2.GetObjectResp.SerializeToString,
),
'GetLatestData': grpc.unary_unary_rpc_method_handler(
servicer.GetLatestData,
request_deserializer=qrl__pb2.GetLatestDataReq.FromString,
response_serializer=qrl__pb2.GetLatestDataResp.SerializeToString,
),
'PushTransaction': grpc.unary_unary_rpc_method_handler(
servicer.PushTransaction,
request_deserializer=qrl__pb2.PushTransactionReq.FromString,
response_serializer=qrl__pb2.PushTransactionResp.SerializeToString,
),
'TransferCoins': grpc.unary_unary_rpc_method_handler(
servicer.TransferCoins,
request_deserializer=qrl__pb2.TransferCoinsReq.FromString,
response_serializer=qrl__pb2.TransferCoinsResp.SerializeToString,
),
'ParseAddress': grpc.unary_unary_rpc_method_handler(
servicer.ParseAddress,
request_deserializer=qrl__pb2.ParseAddressReq.FromString,
response_serializer=qrl__pb2.ParseAddressResp.SerializeToString,
),
'GetAddressFromPK': grpc.unary_unary_rpc_method_handler(
servicer.GetAddressFromPK,
request_deserializer=qrl__pb2.GetAddressFromPKReq.FromString,
response_serializer=qrl__pb2.GetAddressFromPKResp.SerializeToString,
),
'GetMessageTxn': grpc.unary_unary_rpc_method_handler(
servicer.GetMessageTxn,
request_deserializer=qrl__pb2.MessageTxnReq.FromString,
response_serializer=qrl__pb2.TransferCoinsResp.SerializeToString,
),
'GetTokenTxn': grpc.unary_unary_rpc_method_handler(
servicer.GetTokenTxn,
request_deserializer=qrl__pb2.TokenTxnReq.FromString,
response_serializer=qrl__pb2.TransferCoinsResp.SerializeToString,
),
'GetTransferTokenTxn': grpc.unary_unary_rpc_method_handler(
servicer.GetTransferTokenTxn,
request_deserializer=qrl__pb2.TransferTokenTxnReq.FromString,
response_serializer=qrl__pb2.TransferCoinsResp.SerializeToString,
),
'GetSlaveTxn': grpc.unary_unary_rpc_method_handler(
servicer.GetSlaveTxn,
request_deserializer=qrl__pb2.SlaveTxnReq.FromString,
response_serializer=qrl__pb2.TransferCoinsResp.SerializeToString,
),
'GetTransactionsByAddress': grpc.unary_unary_rpc_method_handler(
servicer.GetTransactionsByAddress,
request_deserializer=qrl__pb2.GetTransactionsByAddressReq.FromString,
response_serializer=qrl__pb2.GetTransactionsByAddressResp.SerializeToString,
),
'GetTransaction': grpc.unary_unary_rpc_method_handler(
servicer.GetTransaction,
request_deserializer=qrl__pb2.GetTransactionReq.FromString,
response_serializer=qrl__pb2.GetTransactionResp.SerializeToString,
),
'GetBalance': grpc.unary_unary_rpc_method_handler(
servicer.GetBalance,
request_deserializer=qrl__pb2.GetBalanceReq.FromString,
response_serializer=qrl__pb2.GetBalanceResp.SerializeToString,
),
'GetOTS': grpc.unary_unary_rpc_method_handler(
servicer.GetOTS,
request_deserializer=qrl__pb2.GetOTSReq.FromString,
response_serializer=qrl__pb2.GetOTSResp.SerializeToString,
),
'GetHeight': grpc.unary_unary_rpc_method_handler(
servicer.GetHeight,
request_deserializer=qrl__pb2.GetHeightReq.FromString,
response_serializer=qrl__pb2.GetHeightResp.SerializeToString,
),
'GetBlock': grpc.unary_unary_rpc_method_handler(
servicer.GetBlock,
request_deserializer=qrl__pb2.GetBlockReq.FromString,
response_serializer=qrl__pb2.GetBlockResp.SerializeToString,
),
'GetBlockByNumber': grpc.unary_unary_rpc_method_handler(
servicer.GetBlockByNumber,
request_deserializer=qrl__pb2.GetBlockByNumberReq.FromString,
response_serializer=qrl__pb2.GetBlockByNumberResp.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'qrl.PublicAPI', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class AdminAPIStub(object):
"""This is a place holder for testing/instrumentation APIs
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
class AdminAPIServicer(object):
"""This is a place holder for testing/instrumentation APIs
"""
def add_AdminAPIServicer_to_server(servicer, server):
rpc_method_handlers = {
}
generic_handler = grpc.method_handlers_generic_handler(
'qrl.AdminAPI', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| {
"content_hash": "12b427d6d63166135cc18a290eafc2bb",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 86,
"avg_line_length": 41.27149321266968,
"alnum_prop": 0.7016226290976867,
"repo_name": "randomshinichi/QRL",
"id": "eacd2749aac4e3942b648abd3528733fed457c76",
"size": "18312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/qrl/generated/qrl_pb2_grpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "185833"
},
{
"name": "Python",
"bytes": "1261649"
},
{
"name": "Shell",
"bytes": "2126"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'RegistrationProfile'
db.create_table('registration_registrationprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='registration_profile', unique=True, to=orm['auth.User'])),
('activation_key', self.gf('django.db.models.fields.CharField')(max_length=40)),
))
db.send_create_signal('registration', ['RegistrationProfile'])
def backwards(self, orm):
# Deleting model 'RegistrationProfile'
db.delete_table('registration_registrationprofile')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'registration.registrationprofile': {
'Meta': {'object_name': 'RegistrationProfile'},
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'registration_profile'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['registration']
| {
"content_hash": "08ae4d5f9faf947e40de46ff46cd8a6f",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 182,
"avg_line_length": 63.642857142857146,
"alnum_prop": 0.570594837261504,
"repo_name": "mattrobenolt/django-registration",
"id": "92fa7e9abe4faec78ce8fe0dede1227d57a21def",
"size": "4473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "registration/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "120200"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
from sqlalchemy import func, inspect, over
from sqlalchemy.sql import update
TS_REGEX = re.compile(r'([@<>!()&|:\'])')
def limit_groups(query, model, partition_by, order_by, limit=None, offset=0):
"""Limits the number of rows returned for each group
This utility allows you to apply a limit/offset to grouped rows of a query.
Note that the query will only contain the data from `model`; i.e. you cannot
add additional entities.
:param query: The original query, including filters, joins, etc.
:param model: The model class for `query`
:param partition_by: The column to group by
:param order_by: The column to order the partitions by
:param limit: The maximum number of rows for each partition
:param offset: The number of rows to skip in each partition
"""
inner = query.add_columns(over(func.row_number(), partition_by=partition_by,
order_by=order_by).label('rownum')).subquery()
query = model.query.select_entity_from(inner)
if limit:
return query.filter(offset < inner.c.rownum, inner.c.rownum <= (limit + offset))
else:
return query.filter(offset < inner.c.rownum)
def db_dates_overlap(entity, start_column, start, end_column, end, inclusive=False):
element_start = getattr(entity, start_column)
element_end = getattr(entity, end_column)
if inclusive:
return (element_start <= end) & (start <= element_end)
else:
return (element_start < end) & (start < element_end)
def escape_like(value):
"""Escapes a string to be used as a plain string in LIKE"""
escape_char = '\\'
return (value
.replace(escape_char, escape_char * 2) # literal escape char needs to be escaped
.replace('%', escape_char + '%') # we don't want % wildcards inside the value
.replace('_', escape_char + '_')) # same for _ wildcards
def preprocess_ts_string(text, prefix=True):
atoms = [TS_REGEX.sub(r'\\\1', atom.strip()) for atom in text.split()]
return ' & '.join('{}:*'.format(atom) if prefix else atom for atom in atoms)
def has_extension(conn, name):
"""Checks if the postgres database has a certain extension installed"""
return conn.execute("SELECT EXISTS(SELECT TRUE FROM pg_extension WHERE extname = %s)", (name,)).scalar()
def get_postgres_version():
from indico.core.db import db
version = db.engine.execute("SELECT current_setting('server_version_num')::int").scalar()
return '{}.{}.{}'.format(version // 10000, version % 10000 // 100, version % 100)
def increment_and_get(col, filter_, n=1):
"""Increments and returns a numeric column.
This is committed to the database immediately in a separate
transaction to avoid possible conflicts.
The main purpose of this utility is to generate "scoped" IDs
(which cannot be represented using database-level sequences as you
would need one sequence per scope) without risking collisions when
inserting the objects those IDs are eventually assigned to.
:param col: The column to update, e.g. ``SomeModel.last_num``
:param filter_: A filter expression such as ``SomeModel.id == n``
to restrict which columns to update.
:param n: The number of units to increment the ID of.
"""
from indico.core.db import db
with db.tmp_session() as s:
rv = s.execute(update(col.class_).where(filter_).values({col: col + n}).returning(col)).fetchone()[0]
s.commit()
return rv
def get_related_object(obj, relationship, criteria):
"""Get an object from a one-to-many relationship.
If the relationship is already loaded, the criteria are evaluated
in Python; otherwise a query is sent to the database to get just
the specified object.
For maximum compatibility between the two loading methods, values
consisting of only digits are compared as numbers even if they are
provided as strings since this is how it works when sending a query.
:param obj: A model instance that has a relationship
:param relationship: The name of said relationship
:param criteria: A dict used to filter the objects from the
relationship.
:return: A single object from the relationship or ``None`` if no
such object could be found.
"""
def _compare(a, b):
if isinstance(a, basestring) and a.isdigit():
a = int(a)
if isinstance(b, basestring) and b.isdigit():
b = int(b)
return a == b
# if the relationship is loaded evaluate the criteria in python
if relationship not in inspect(obj).unloaded:
return next((x for x in getattr(obj, relationship)
if all(_compare(getattr(x, k), v) for k, v in criteria.iteritems())),
None)
# otherwise query that specific object
cls = getattr(type(obj), relationship).prop.mapper.class_
return cls.query.with_parent(obj, relationship).filter_by(**criteria).first()
def get_n_matching(query, n, predicate):
"""Get N objects from a query that satisfy a condition.
This queries for ``n * 5`` objects initially and then loads
more objects until no more results are available or ``n`` objects
have been found.
:param query: A sqlalchemy query object
:param n: The max number of objects to return
:param predicate: A callable used to filter the found objects
"""
_offset = [0]
def _get():
limit = n * 5
rv = query.offset(_offset[0]).limit(limit).all()
_offset[0] += limit
return rv
results = filter(predicate, _get())
while len(results) < n:
objects = _get()
if not objects:
break
results.extend(x for x in objects if predicate(x))
return results[:n]
def with_total_rows(query, single_entity=True):
"""Get the result of a query and its total row count.
:param query: a sqlalchemy query
:param single_entity: whether the original query only returns
a single entity. In this case, each
returned result will just be that entity
instead of a tuple.
:return: a ``(results, total_count)`` tuple
"""
res = query.add_columns(func.count().over()).all()
if not res:
return [], 0
total = res[0][-1]
rows = [row[0] for row in res] if single_entity else [row[:-1] for row in res]
return rows, total
| {
"content_hash": "c565c8d5271e6a073e91b869ff3c675b",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 109,
"avg_line_length": 38.075581395348834,
"alnum_prop": 0.6484959535806993,
"repo_name": "mic4ael/indico",
"id": "5db11b7ea15cbb8a300e9d32a24368022c1b8d55",
"size": "6763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/core/db/sqlalchemy/util/queries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "553825"
},
{
"name": "HTML",
"bytes": "1375160"
},
{
"name": "JavaScript",
"bytes": "1852830"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4612709"
},
{
"name": "Shell",
"bytes": "2665"
},
{
"name": "TeX",
"bytes": "23292"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, absolute_import
import django
DEBUG = True
USE_TZ = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "ssssssssssssssssssssssssssssssssssssssssssssssssss"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
ROOT_URLCONF = "tests.urls"
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"django_fine_uploader",
]
SITE_ID = 1
if django.VERSION >= (1, 10):
MIDDLEWARE = ()
else:
MIDDLEWARE_CLASSES = ()
| {
"content_hash": "d68a90efeb651aeb4f75dbe46c415ec5",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 66,
"avg_line_length": 19.28125,
"alnum_prop": 0.6628849270664505,
"repo_name": "douglasmiranda/django-fine-uploader",
"id": "8450be82ac24ffbb03b533fd993a04999b05a1e1",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "17588"
},
{
"name": "Makefile",
"bytes": "1712"
},
{
"name": "Python",
"bytes": "40480"
}
],
"symlink_target": ""
} |
from model_io import *
import sys
from loss_function import *
import math
import time
def train_model(model, outputChannels, learningRate, trainFeeder, valFeeder,
modelSavePath=None, savePrefix=None, initialIteration=1, batchSize=1):
with tf.Session() as sess:
tfBatchImages = tf.placeholder("float")
tfBatchGT = tf.placeholder("float")
tfBatchWeight = tf.placeholder("float")
tfBatchSS = tf.placeholder("float")
tfBatchSSMask = tf.placeholder("float")
keepProb = tf.placeholder("float")
with tf.name_scope("model_builder"):
print "attempting to build model"
model.build(tfBatchImages, tfBatchSS, tfBatchSSMask, keepProb=keepProb)
print "built the model"
sys.stdout.flush()
loss = modelTotalLoss(pred=model.outputData, gt=tfBatchGT, weight=tfBatchWeight, ss=tfBatchSS, outputChannels=outputChannels)
numPredictedWeighted = countTotalWeighted(ss=tfBatchSS, weight=tfBatchWeight)
numPredicted = countTotal(ss=tfBatchSS)
numCorrect = countCorrect(pred=model.outputData, gt=tfBatchGT, ss=tfBatchSS, k=1, outputChannels=outputChannels)
print "setting adam optimizer"
sys.stdout.flush()
train_op = tf.train.AdamOptimizer(learning_rate=learningRate).minimize(loss=loss)
init = tf.initialize_all_variables()
print "attempting to run init"
sys.stdout.flush()
sess.run(init)
print "completed init"
sys.stdout.flush()
iteration = initialIteration
while iteration < 1000:
batchLosses = []
totalPredictedWeighted = 0
totalPredicted = 0
totalCorrect = 0
for k in range(int(math.floor(valFeeder.total_samples() / batchSize))):
imageBatch, gtBatch, weightBatch, ssBatch, ssMaskBatch, _ = valFeeder.next_batch()
batchLoss, batchPredicted, batchPredictedWeighted, batchCorrect = sess.run(
[loss, numPredicted, numPredictedWeighted, numCorrect],
feed_dict={tfBatchImages: imageBatch,
tfBatchGT: gtBatch,
tfBatchWeight: weightBatch,
tfBatchSS: ssBatch,
tfBatchSSMask: ssMaskBatch,
keepProb: 1.0})
batchLosses.append(batchLoss)
totalPredicted += batchPredicted
totalPredictedWeighted += batchPredictedWeighted
totalCorrect += batchCorrect
if np.isnan(np.mean(batchLosses)):
print "LOSS RETURNED NaN"
sys.stdout.flush()
return 1
print "%s Itr: %d - val loss: %.6f, correct: %.6f" % (time.strftime("%H:%M:%S"),
iteration, float(np.mean(batchLosses)), totalCorrect / totalPredicted)
sys.stdout.flush()
if (iteration > 0 and iteration % 5 == 0) or checkSaveFlag(modelSavePath):
modelSaver(sess, modelSavePath, savePrefix, iteration)
#for j in range(10):
for j in range(int(math.floor(trainFeeder.total_samples() / batchSize))):
# print "attempting to run train batch"
# sys.stdout.flush()
imageBatch, gtBatch, weightBatch, ssBatch, ssMaskBatch, _ = trainFeeder.next_batch()
sess.run(train_op, feed_dict={tfBatchImages: imageBatch,
tfBatchGT: gtBatch,
tfBatchWeight: weightBatch,
tfBatchSS: ssBatch,
tfBatchSSMask: ssMaskBatch,
keepProb: 0.7})
# print "ran one iteration"
iteration += 1 | {
"content_hash": "68edb8e46c062f1a892d3d2918cb3be8",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 133,
"avg_line_length": 42.81521739130435,
"alnum_prop": 0.5694338664635694,
"repo_name": "min2209/dwt",
"id": "be35d2192fff0dace8b051a8b95c27ad2125fd23",
"size": "3939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "E2E/train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "5938"
},
{
"name": "Python",
"bytes": "103353"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, unicode_literals
import os
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import numpy as np
from scipy.spatial.distance import euclidean
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.outputs import Vasprun, Locpot, VolumetricData
from pymatgen.io.vasp.inputs import Incar
from pymatgen.electronic_structure.plotter import BSPlotter, BSPlotterProjected
from pymatgen.electronic_structure.core import Spin
from mpinterfaces.utils import is_converged
__author__ = "Michael Ashton"
__copyright__ = "Copyright 2017, Henniggroup"
__maintainer__ = "Michael Ashton"
__email__ = "joshgabriel92@gmail.com"
__status__ = "Production"
__date__ = "March 3, 2017"
def get_band_edges():
"""
Calculate the band edge locations relative to the vacuum level
for a semiconductor. For a metal, returns the fermi level.
Returns:
edges (dict): {'up_cbm': , 'up_vbm': , 'dn_cbm': , 'dn_vbm': , 'efermi'}
"""
# Vacuum level energy from LOCPOT.
locpot = Locpot.from_file('LOCPOT')
evac = max(locpot.get_average_along_axis(2))
vasprun = Vasprun('vasprun.xml')
bs = vasprun.get_band_structure()
eigenvals = vasprun.eigenvalues
efermi = vasprun.efermi - evac
if bs.is_metal():
edges = {'up_cbm': None, 'up_vbm': None, 'dn_cbm': None, 'dn_vbm': None,
'efermi': efermi}
elif bs.is_spin_polarized:
up_cbm = min(
[min([e[0] for e in eigenvals[Spin.up][i] if not e[1]])
for i in range(len(eigenvals[Spin.up]))]) - evac
up_vbm = max(
[max([e[0] for e in eigenvals[Spin.up][i] if e[1]])
for i in range(len(eigenvals[Spin.up]))]) - evac
dn_cbm = min(
[min([e[0] for e in eigenvals[Spin.down][i] if not e[1]])
for i in range(len(eigenvals[Spin.down]))]) - evac
dn_vbm = max(
[max([e[0] for e in eigenvals[Spin.down][i] if e[1]])
for i in range(len(eigenvals[Spin.down]))]) - evac
edges = {'up_cbm': up_cbm, 'up_vbm': up_vbm, 'dn_cbm': dn_cbm,
'dn_vbm': dn_vbm, 'efermi': efermi}
else:
cbm = bs.get_cbm()['energy'] - evac
vbm = bs.get_vbm()['energy'] - evac
edges = {'up_cbm': cbm, 'up_vbm': vbm, 'dn_cbm': cbm, 'dn_vbm': vbm,
'efermi': efermi}
return edges
def plot_band_alignments(directories, run_type='PBE', fmt='pdf'):
"""
Plot CBM's and VBM's of all compounds together, relative to the band
edges of H2O.
Args:
directories (list): list of the directory paths for materials
to include in the plot.
run_type (str): 'PBE' or 'HSE', so that the function knows which
subdirectory to go into (pbe_bands or hse_bands).
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
if run_type == 'HSE':
subdirectory = 'hse_bands'
else:
subdirectory = 'pbe_bands'
band_gaps = {}
for directory in directories:
sub_dir = os.path.join(directory, subdirectory)
if is_converged(sub_dir):
os.chdir(sub_dir)
band_structure = Vasprun('vasprun.xml').get_band_structure()
band_gap = band_structure.get_band_gap()
# Vacuum level energy from LOCPOT.
locpot = Locpot.from_file('LOCPOT')
evac = max(locpot.get_average_along_axis(2))
if not band_structure.is_metal():
is_direct = band_gap['direct']
cbm = band_structure.get_cbm()
vbm = band_structure.get_vbm()
else:
cbm = None
vbm = None
is_direct = False
band_gaps[directory] = {'CBM': cbm, 'VBM': vbm,
'Direct': is_direct,
'Metal': band_structure.is_metal(),
'E_vac': evac}
os.chdir('../../')
ax = plt.figure(figsize=(16, 10)).gca()
x_max = len(band_gaps) * 1.315
ax.set_xlim(0, x_max)
# Rectangle representing band edges of water.
ax.add_patch(plt.Rectangle((0, -5.67), height=1.23, width=len(band_gaps),
facecolor='#00cc99', linewidth=0))
ax.text(len(band_gaps) * 1.01, -4.44, r'$\mathrm{H+/H_2}$', size=20,
verticalalignment='center')
ax.text(len(band_gaps) * 1.01, -5.67, r'$\mathrm{O_2/H_2O}$', size=20,
verticalalignment='center')
x_ticklabels = []
y_min = -8
i = 0
# Nothing but lies.
are_directs, are_indirects, are_metals = False, False, False
for compound in [cpd for cpd in directories if cpd in band_gaps]:
x_ticklabels.append(compound)
# Plot all energies relative to their vacuum level.
evac = band_gaps[compound]['E_vac']
if band_gaps[compound]['Metal']:
cbm = -8
vbm = -2
else:
cbm = band_gaps[compound]['CBM']['energy'] - evac
vbm = band_gaps[compound]['VBM']['energy'] - evac
# Add a box around direct gap compounds to distinguish them.
if band_gaps[compound]['Direct']:
are_directs = True
linewidth = 5
elif not band_gaps[compound]['Metal']:
are_indirects = True
linewidth = 0
# Metals are grey.
if band_gaps[compound]['Metal']:
are_metals = True
linewidth = 0
color_code = '#404040'
else:
color_code = '#002b80'
# CBM
ax.add_patch(plt.Rectangle((i, cbm), height=-cbm, width=0.8,
facecolor=color_code, linewidth=linewidth,
edgecolor="#e68a00"))
# VBM
ax.add_patch(plt.Rectangle((i, y_min),
height=(vbm - y_min), width=0.8,
facecolor=color_code, linewidth=linewidth,
edgecolor="#e68a00"))
i += 1
ax.set_ylim(y_min, 0)
# Set tick labels
ax.set_xticks([n + 0.4 for n in range(i)])
ax.set_xticklabels(x_ticklabels, family='serif', size=20, rotation=60)
ax.set_yticklabels(ax.get_yticks(), family='serif', size=20)
# Add a legend
height = y_min
if are_directs:
ax.add_patch(plt.Rectangle((i*1.165, height), width=i*0.15,
height=(-y_min*0.1), facecolor='#002b80',
edgecolor='#e68a00', linewidth=5))
ax.text(i*1.24, height - y_min * 0.05, 'Direct', family='serif',
color='w', size=20, horizontalalignment='center',
verticalalignment='center')
height -= y_min * 0.15
if are_indirects:
ax.add_patch(plt.Rectangle((i*1.165, height), width=i*0.15,
height=(-y_min*0.1), facecolor='#002b80',
linewidth=0))
ax.text(i*1.24, height - y_min * 0.05, 'Indirect', family='serif',
size=20, color='w', horizontalalignment='center',
verticalalignment='center')
height -= y_min * 0.15
if are_metals:
ax.add_patch(plt.Rectangle((i*1.165, height), width=i*0.15,
height=(-y_min*0.1), facecolor='#404040',
linewidth=0))
ax.text(i*1.24, height - y_min * 0.05, 'Metal', family='serif',
size=20, color='w', horizontalalignment='center',
verticalalignment='center')
# Who needs axes?
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_ylabel('eV', family='serif', size=24)
if fmt == "None":
return ax
else:
plt.savefig('band_alignments.{}'.format(fmt), transparent=True)
plt.close()
def plot_local_potential(axis=2, ylim=(-20, 0), fmt='pdf'):
"""
Plot data from the LOCPOT file along any of the 3 primary axes.
Useful for determining surface dipole moments and electric
potentials on the interior of the material.
Args:
axis (int): 0 = x, 1 = y, 2 = z
ylim (tuple): minimum and maximum potentials for the plot's y-axis.
fmt (str): matplotlib format style. Check the matplotlib docs
for options.
"""
ax = plt.figure(figsize=(16, 10)).gca()
locpot = Locpot.from_file('LOCPOT')
structure = Structure.from_file('CONTCAR')
vd = VolumetricData(structure, locpot.data)
abs_potentials = vd.get_average_along_axis(axis)
vacuum_level = max(abs_potentials)
vasprun = Vasprun('vasprun.xml')
bs = vasprun.get_band_structure()
if not bs.is_metal():
cbm = bs.get_cbm()['energy'] - vacuum_level
vbm = bs.get_vbm()['energy'] - vacuum_level
potentials = [potential - vacuum_level for potential in abs_potentials]
axis_length = structure.lattice._lengths[axis]
positions = np.arange(0, axis_length, axis_length / len(potentials))
ax.plot(positions, potentials, linewidth=2, color='k')
ax.set_xlim(0, axis_length)
ax.set_ylim(ylim[0], ylim[1])
ax.set_xticklabels(
[r'$\mathrm{%s}$' % tick for tick in ax.get_xticks()], size=20)
ax.set_yticklabels(
[r'$\mathrm{%s}$' % tick for tick in ax.get_yticks()], size=20)
ax.set_xlabel(r'$\mathrm{\AA}$', size=24)
ax.set_ylabel(r'$\mathrm{V\/(eV)}$', size=24)
if not bs.is_metal():
ax.text(ax.get_xlim()[1], cbm, r'$\mathrm{CBM}$',
horizontalalignment='right', verticalalignment='bottom',
size=20)
ax.text(ax.get_xlim()[1], vbm, r'$\mathrm{VBM}$',
horizontalalignment='right', verticalalignment='top', size=20)
ax.fill_between(ax.get_xlim(), cbm, ax.get_ylim()[1],
facecolor=plt.cm.jet(0.3), zorder=0, linewidth=0)
ax.fill_between(ax.get_xlim(), ax.get_ylim()[0], vbm,
facecolor=plt.cm.jet(0.7), zorder=0, linewidth=0)
if fmt == "None":
return ax
else:
plt.savefig('locpot.{}'.format(fmt))
plt.close()
### This function uses Pymatgen to plot band structures, and doesn't
### handle KPOINTS with IBZKPT at the top very well. It also doesn't
### work with latex in the latest matplotlib version. If those
### things ever get fixed in Pymatgen we could go back to using
### this function.
# def plot_band_structure(ylim=(-5, 5), draw_fermi=False, fmt='pdf'):
# """
# Plot a standard band structure with no projections.
#
# Args:
# ylim (tuple): minimum and maximum potentials for the plot's y-axis.
# draw_fermi (bool): whether or not to draw a dashed line at E_F.
# fmt (str): matplotlib format style. Check the matplotlib docs
# for options.
# """
#
# vasprun = Vasprun('vasprun.xml')
# efermi = vasprun.efermi
# bsp = BSPlotter(vasprun.get_band_structure('KPOINTS', line_mode=True,
# efermi=efermi))
# if fmt == "None":
# return bsp.bs_plot_data()
# else:
# plot = bsp.get_plot(ylim=ylim)
# fig = plot.gcf()
# ax = fig.gca()
# ax.set_xticklabels([r'$\mathrm{%s}$' % t for t in ax.get_xticklabels()])
# ax.set_yticklabels([r'$\mathrm{%s}$' % t for t in ax.get_yticklabels()])
# if draw_fermi:
# ax.plot([ax.get_xlim()[0], ax.get_xlim()[1]], [0, 0], 'k--')
# plt.savefig('band_structure.{}'.format(fmt), transparent=True)
#
# plt.close()
def plot_band_structure(ylim=(-5, 5), draw_fermi=False, fmt="pdf"):
"""
Plot a standard band structure with no projections. Requires
EIGENVAL, OUTCAR and KPOINTS files in the current working directory.
Args:
ylim (tuple): minimum and maximum potentials for the plot's y-axis.
draw_fermi (bool): whether or not to draw a dashed line at E_F.
fmt (str): matplotlib format style. Check the matplotlib docs
for options.
"""
eigenval_lines = open("EIGENVAL").readlines()
kpoints_lines = open("KPOINTS").readlines()
# IBZ k-points used for SCF but not useful for plotting bands.
ibz_kpoints = [k for k in kpoints_lines[3:] if int(k.split()[3]) != 0]
# Lines containing hig-symmetry k-points (e.g. Gamma)
vertex_lines = [k for k in kpoints_lines[3:] if len(k.split()) == 5]
n_bands = int(eigenval_lines[5].split()[2])
with open("OUTCAR", "r") as outcar:
for line in outcar:
if "E-fermi" in line:
efermi = float(line.split()[2])
break
spin_polarized = False
if len(eigenval_lines[8].split()) == 5:
spin_polarized = True
bs_kpoints = []
vertices = []
bands = [[[], []] for x in range(n_bands)]
i = 7 + len(ibz_kpoints)*(n_bands+2)
while i < len(eigenval_lines):
kpt_coords = [float(x) for x in eigenval_lines[i].split()[:3]]
for kpt in vertex_lines:
ref_coords = [float(x) for x in kpt.split()[:3]]
if euclidean(kpt_coords, ref_coords) < 0.0001:
kpt_coords.append(kpt.split()[-1])
vertices.append(kpt_coords)
break
bs_kpoints.append(kpt_coords)
for j in range(n_bands):
i += 1
split_line = eigenval_lines[i].split()
bands[j][0].append(float(split_line[1]) - efermi)
if spin_polarized:
bands[j][1].append(float(split_line[2]) - efermi)
i += 2
path_lengths, kpt_distances = [], [0]
discontinuity = False
for i in range(1, len(vertices)):
if discontinuity:
path_lengths.append(0)
else:
path_lengths.append(euclidean(vertices[i][:3],vertices[i-1][:3]))
if i < len(vertices)-1 and vertices[i][3] != vertices[i-1][3] and\
vertices[i][3] != vertices[i+1][3] and not discontinuity:
discontinuity = True
else:
discontinuity = False
n_kpt_divs = len(bs_kpoints) / float(len(path_lengths))
x, j = 0, 0
for i in range(1, len(bs_kpoints)):
if len(bs_kpoints[i]) == 4 and len(bs_kpoints[i-1]) == 4 and \
bs_kpoints[i][3] != bs_kpoints[i-1][3]:
x += 0
else:
x += euclidean(bs_kpoints[i][:3], bs_kpoints[i-1][:3])
kpt_distances.append(x)
ax = plt.figure(figsize=(11, 8.5)).gca()
font = FontProperties()
font.set_size(24)
font.set_family("serif")
large_font = font.copy()
large_font.set_size(32)
for b in bands:
ax.plot(kpt_distances, b[0], 'b-')
if spin_polarized:
ax.plot(kpt_distances, b[1], 'r--')
if draw_fermi:
ax.plot([min(kpt_distances), max(kpt_distances)], [0, 0], 'k-')
ax.set_xlim(min(kpt_distances), max(kpt_distances))
ax.set_xticks([])
d = 0
ax.text(d, ylim[0]*1.05, r"$\mathrm{%s}$" % vertices[0][-1],
fontproperties=font, verticalalignment="top",
horizontalalignment="center")
for i in range(len(path_lengths)):
d += path_lengths[i]
if i < len(path_lengths)-1 and path_lengths[i+1] == 0 and\
vertices[i+1][-1] != vertices[i+2][-1]:
label = "{}|{}".format(vertices[i+1][-1], vertices[i+2][-1])
else:
label = vertices[i+1][-1]
if path_lengths[i] != 0:
ax.text(d, ylim[0]*1.05, r"$\mathrm{%s}$" % label,
fontproperties=font, verticalalignment="top",
horizontalalignment="center")
ax.plot([d, d], [ylim[0], ylim[1]], 'k--')
ax.set_ylim(ylim)
ax.set_ylabel(r"$\mathrm{E - E_F (eV)}$", fontproperties=large_font)
ax.set_yticklabels([int(t) for t in ax.get_yticks()], fontproperties=font)
plt.savefig("band_structure.{}".format(fmt))
def plot_color_projected_bands(ylim=(-5, 5), fmt='pdf'):
"""
Plot a single band structure where the color of the band indicates
the elemental character of the eigenvalue.
Args:
ylim (tuple): minimum and maximum energies for the plot's
y-axis.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
vasprun = Vasprun('vasprun.xml', parse_projected_eigen=True)
bs = vasprun.get_band_structure('KPOINTS', line_mode=True)
bspp = BSPlotterProjected(bs)
ax = bspp.get_elt_projected_plots_color().gcf().gca()
ax.set_xticklabels([r'$\mathrm{%s}$' % t for t in ax.get_xticklabels()])
ax.set_yticklabels([r'$\mathrm{%s}$' % t for t in ax.get_yticklabels()])
ax.set_ylim(ylim)
if fmt == "None":
return ax
else:
plt.savefig('color_projected_bands.{}'.format(fmt))
plt.close()
def plot_elt_projected_bands(ylim=(-5, 5), fmt='pdf'):
"""
Plot separate band structures for each element where the size of the
markers indicates the elemental character of the eigenvalue.
Args:
ylim (tuple): minimum and maximum energies for the plot's
y-axis.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
vasprun = Vasprun('vasprun.xml', parse_projected_eigen=True)
bs = vasprun.get_band_structure('KPOINTS', line_mode=True)
bspp = BSPlotterProjected(bs)
ax = bspp.get_elt_projected_plots(ylim=ylim).gcf().gca()
ax.set_xticklabels([r'$\mathrm{%s}$' % t for t in ax.get_xticklabels()])
ax.set_yticklabels([r'$\mathrm{%s}$' % t for t in ax.get_yticklabels()])
if fmt == "None":
return ax
else:
plt.savefig('elt_projected_bands.{}'.format(fmt))
plt.close()
def plot_orb_projected_bands(orbitals, fmt='pdf', ylim=(-5, 5)):
"""
Plot a separate band structure for each orbital of each element in
orbitals.
Args:
orbitals (dict): dictionary of the form
{element: [orbitals]},
e.g. {'Mo': ['s', 'p', 'd'], 'S': ['p']}
ylim (tuple): minimum and maximum energies for the plot's
y-axis.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
vasprun = Vasprun('vasprun.xml', parse_projected_eigen=True)
bs = vasprun.get_band_structure('KPOINTS', line_mode=True)
bspp = BSPlotterProjected(bs)
ax = bspp.get_projected_plots_dots(orbitals, ylim=ylim).gcf().gca()
ax.set_xticklabels([r'$\mathrm{%s}$' % t for t in ax.get_xticklabels()])
ax.set_yticklabels([r'$\mathrm{%s}$' % t for t in ax.get_yticklabels()])
if fmt == "None":
return ax
else:
plt.savefig('orb_projected_bands.{}'.format(fmt))
plt.close()
def get_effective_mass():
"""
This function is in a beta stage, and its results are not
guaranteed to be useful.
Finds effective masses from a band structure, using parabolic
fitting to determine the band curvature at the CBM
for electrons and at the VBM for holes. This curvature enters
the equation m* = (hbar)**2 / (d^2E/dk^2).
To consider anisotropy, the k-space directions to the left and right
of the CBM/VBM in the band diagram are returned separately.
*NOTE* Only works for semiconductors and linemode calculations (non-
spin polarized).
>30 k-points per string recommended to obtain
reliable curvatures.
*NOTE* The parabolic fit can be quite sensitive to the number of
k-points fit to, so it might be worthwhile adjusting N_KPTS
to obtain some sense of the error bar.
TODO: Warn user if CBM/VBM is at the edge of the diagram, and which
direction (either left or right) was not actually fit to.
Until fixed, this (most likely) explains any negative masses
returned.
Returns:
Dictionary of the form
{'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},
'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}
where 'left' and 'right' indicate the reciprocal
directions to the left and right of the extremum in the
band structure.
"""
H_BAR = 6.582119514e-16 # eV*s
M_0 = 9.10938356e-31 # kg
N_KPTS = 6 # Number of k-points included in the parabola.
spin_up = Spin(1)
band_structure = Vasprun('vasprun.xml').get_band_structure()
# Locations of CBM and VBM in band_structure.bands
cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]
cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]
vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]
vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]
k = {'electron': {'left': [], 'right': []},
'hole': {'left': [], 'right': []}}
E = {'electron': {'left': [], 'right': []},
'hole': {'left': [], 'right': []}}
e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords
h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords
for n in range(-N_KPTS, 1):
e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords
h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords
k['electron']['left'].append(
((e_coords[0] - e_ref_coords[0])**2 +
(e_coords[1] - e_ref_coords[1])**2 +
(e_coords[2] - e_ref_coords[2])**2)**0.5
)
k['hole']['left'].append(
((h_coords[0] - h_ref_coords[0])**2 +
(h_coords[1] - h_ref_coords[1])**2 +
(h_coords[2] - h_ref_coords[2])**2)**0.5
)
e_energy = band_structure.bands[
spin_up][cbm_band_index][cbm_kpoint_index + n]
h_energy = band_structure.bands[
spin_up][vbm_band_index][vbm_kpoint_index + n]
E['electron']['left'].append(e_energy)
E['hole']['left'].append(h_energy)
for n in range(1, 1 + N_KPTS):
e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords
h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords
k['electron']['right'].append(
((e_coords[0] - e_ref_coords[0])**2 +
(e_coords[1] - e_ref_coords[1])**2 +
(e_coords[2] - e_ref_coords[2])**2)**0.5
)
k['hole']['right'].append(
((h_coords[0] - h_ref_coords[0])**2 +
(h_coords[1] - h_ref_coords[1])**2 +
(h_coords[2] - h_ref_coords[2])**2)**0.5
)
e_energy = band_structure.bands[
spin_up][cbm_band_index][cbm_kpoint_index + n]
h_energy = band_structure.bands[
spin_up][vbm_band_index][vbm_kpoint_index + n]
E['electron']['right'].append(e_energy)
E['hole']['right'].append(h_energy)
# 2nd order fits
e_l_fit = np.poly1d(
np.polyfit(k['electron']['left'], E['electron']['left'], 2))
e_r_fit = np.poly1d(
np.polyfit(k['electron']['right'], E['electron']['right'], 2))
h_l_fit = np.poly1d(
np.polyfit(k['hole']['left'], E['hole']['left'], 2))
h_r_fit = np.poly1d(
np.polyfit(k['hole']['right'], E['hole']['right'], 2))
# Curvatures
e_l_curvature = e_l_fit.deriv().deriv()[0]
e_r_curvature = e_r_fit.deriv().deriv()[0]
h_l_curvature = h_l_fit.deriv().deriv()[0]
h_r_curvature = h_r_fit.deriv().deriv()[0]
# Unit conversion
e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0
e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0
h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0
h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0
return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},
'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}
def plot_density_of_states(xlim=(-10, 5), ylim=(-1.5, 1.5), fmt='pdf'):
"""
Plots the density of states from the DOSCAR in the cwd. Plots
spin up in red, down in green, and the sum in black. Efermi = 0.
Args:
xlim (tuple): minimum and maximum energies for the plot's
x-axis.
ylim (tuple): minimum and maximum for the plot's
y-axis.
fmt (str): matplotlib format style. Check the matplotlib
docs for options.
"""
efermi = Vasprun('vasprun.xml').efermi
dos_lines = open ('DOSCAR').readlines()
x, up, down = [], [], []
nedos = Incar.from_file('INCAR').as_dict()['NEDOS'] - 1
for line in dos_lines[6:6+nedos]:
split_line = line.split()
x.append(float(split_line[0]) - efermi)
up.append(float(split_line[1]))
down.append(-float(split_line[2]))
x, up, down = np.array(x), np.array(up), np.array(down)
sum = up + down
ax = plt.figure().gca()
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
ax.set_xlabel(r'$\mathrm{E\/(eV)}$')
ax.set_ylabel(r'$\mathrm{Density\/of\/States$')
ax.set_xticklabels([r'$\mathrm{%s}$' % t for t in ax.get_xticklabels()])
ax.set_yticklabels([r'$\mathrm{%s}$' % t for t in ax.get_yticklabels()])
ax.plot(x, up, color='red' )
ax.plot(x, down, color='green')
ax.plot(x, sum, color='black' )
if fmt is not None:
plt.savefig('density_of_states.{}'.format(fmt))
else:
return ax
plt.close()
def get_fermi_velocities():
"""
Calculates the fermi velocity of each band that crosses the fermi
level, according to v_F = dE/(h_bar*dk).
Returns:
fermi_velocities (list). The absolute values of the
adjusted slopes of each band, in Angstroms/s.
"""
vr = Vasprun('vasprun.xml')
# eigenvalues = vr.eigenvalues
bs = vr.get_band_structure()
bands = bs.bands
kpoints = bs.kpoints
efermi = bs.efermi
h_bar = 6.582e-16 # eV*s
fermi_bands = []
for spin in bands:
for i in range(len(bands[spin])):
if max(bands[spin][i]) > efermi > min(bands[spin][i]):
fermi_bands.append(bands[spin][i])
fermi_velocities = []
for band in fermi_bands:
for i in range(len(band)-1):
if (band[i] < efermi < band[i+1]) or (band[i] > efermi > band[i+1]):
dk = np.sqrt((kpoints[i+1].cart_coords[0]
- kpoints[i].cart_coords[0])**2
+ (kpoints[i+1].cart_coords[1]
- kpoints[i].cart_coords[1])**2)
v_f = abs((band[i+1] - band[i]) / (h_bar * dk))
fermi_velocities.append(v_f)
return fermi_velocities # Values are in Angst./s
def find_dirac_nodes():
"""
Look for band crossings near (within `tol` eV) the Fermi level.
Returns:
boolean. Whether or not a band crossing occurs at or near
the fermi level.
"""
vasprun = Vasprun('vasprun.xml')
dirac = False
if vasprun.get_band_structure().get_band_gap()['energy'] < 0.1:
efermi = vasprun.efermi
bsp = BSPlotter(vasprun.get_band_structure('KPOINTS', line_mode=True,
efermi=efermi))
bands = []
data = bsp.bs_plot_data(zero_to_efermi=True)
for d in range(len(data['distances'])):
for i in range(bsp._nb_bands):
x = data['distances'][d],
y = [data['energy'][d][str(Spin.up)][i][j]
for j in range(len(data['distances'][d]))]
band = [x, y]
bands.append(band)
considered = []
for i in range(len(bands)):
for j in range(len(bands)):
if i != j and (j, i) not in considered:
considered.append((j, i))
for k in range(len(bands[i][0])):
if ((-0.1 < bands[i][1][k] < 0.1) and
(-0.1 < bands[i][1][k] - bands[j][1][k] < 0.1)):
dirac = True
return dirac
def plot_spin_texture(inner_index, outer_index, center=(0, 0), fmt='pdf'):
"""
Create six plots- one for the spin texture in x, y, and z in
each of two bands: an inner band and an outer band. For
Rashba spin-splitting, these two bands should be the two that
have split.
Args:
inner_index (int): indices of the two spin-split bands.
outer_index (int): indices of the two spin-split bands.
center (tuple): coordinates of the center of the splitting
(where the bands cross). Defaults to Gamma.
fmt: matplotlib format style. Check the matplotlib
docs for options.
"""
procar_lines = open("PROCAR").readlines()
data = procar_lines[1].split()
n_kpts = int(data[3])
n_bands = int(data[7])
n_ions = int(data[11])
# These numbers, along with almost everything else in this
# function, are magical. Don't touch them.
band_step = (n_ions + 1) * 4 + 4
k_step = n_bands * band_step + 3
kpoints = []
spin_textures = {'inner': {'x': [], 'y': [], 'z': []},
'outer': {'x': [], 'y': [], 'z': []}}
for n in range(n_kpts):
for var in ['x', 'y', 'z']:
spin_textures['inner'][var].append(0)
spin_textures['outer'][var].append(0)
i = 3
j = 0
while i < len(procar_lines):
kpoints.append([float(procar_lines[i][18:29]) - center[0],
float(procar_lines[i][29:40]) - center[1]])
spin_textures['inner']['x'][j] += float(
procar_lines[i+(4+(n_ions+1)*2)+inner_index*band_step].split()[-1])
spin_textures['inner']['y'][j] += float(
procar_lines[i+(4+(n_ions+1)*3)+inner_index*band_step].split()[-1])
spin_textures['inner']['z'][j] += float(
procar_lines[i+(4+(n_ions+1)*4)+inner_index*band_step].split()[-1])
spin_textures['outer']['x'][j] += float(
procar_lines[i+(4+(n_ions+1)*2)+outer_index*band_step].split()[-1])
spin_textures['outer']['y'][j] += float(
procar_lines[i+(4+(n_ions+1)*3)+outer_index*band_step].split()[-1])
spin_textures['outer']['z'][j] += float(
procar_lines[i+(4+(n_ions+1)*4)+outer_index*band_step].split()[-1])
i += k_step
j += 1
for branch in spin_textures:
for vector in spin_textures[branch]:
print('plotting {}_{}.{}'.format(branch, vector, fmt))
ax = plt.subplot(111, projection='polar')
raw = [spin_textures[branch][vector][k] for k in range(len(kpoints))]
minimum = min(raw)
maximum = max(raw) - minimum
r_max = max([np.sqrt(kpt[0]**2 + kpt[1]**2) for kpt in kpoints])
for l in range(len(kpoints)):
if kpoints[l][0] == 0 and kpoints[l][1] > 0:
theta = np.pi / 2.0
elif kpoints[l][0] == 0:
theta = 3.0 * np.pi / 2.0
elif kpoints[l][0] < 0:
theta = np.pi + np.arctan(kpoints[l][1] / kpoints[l][0])
else:
theta = np.arctan(kpoints[l][1] / kpoints[l][0])
r = np.sqrt(kpoints[l][0]**2 + kpoints[l][1]**2)
if r == 0:
w = 0
else:
w = r_max*0.07/r
ax.add_patch(
plt.Rectangle(
(theta, r), width=w, height=r_max*0.07,
color=plt.cm.rainbow(
(spin_textures[branch][vector][l]-minimum)/maximum
)
)
)
ax.plot(0, 0, linewidth=0, marker='o', color='k', markersize=18)
ax.set_rmax(r_max)
plt.axis('off')
plt.savefig('{}_{}.{}'.format(branch, vector, fmt))
plt.close()
| {
"content_hash": "c609ab8e3ede66f0a40f4cf3cd33d942",
"timestamp": "",
"source": "github",
"line_count": 879,
"max_line_length": 82,
"avg_line_length": 36.689419795221845,
"alnum_prop": 0.5504496124031008,
"repo_name": "joshgabriel/MPInterfaces",
"id": "c0acc09d769099ad068048ad011659f4ac23ad72",
"size": "32250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mpinterfaces/mat2d/electronic_structure/analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "514956"
},
{
"name": "Shell",
"bytes": "3252"
}
],
"symlink_target": ""
} |
def add_logging(message):
def logging_decorator(f):
def wrapper(*args, **kwargs):
print("{} f:{} args:{} kwargs: {} ".format(message, f, args, kwargs))
x = f(*args, **kwargs)
return x.upper()
return wrapper
return logging_decorator
@add_logging("Entering foo:\n")
def foo(fname):
return fname
print(foo("joe"))
print(foo("mary"))
| {
"content_hash": "d58fbe935ba5435d53f340a167165d30",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 81,
"avg_line_length": 20.681818181818183,
"alnum_prop": 0.4967032967032967,
"repo_name": "fernandozamoraj/py_sandbox",
"id": "4b9a029dda8c4145f15ddd507244f18e7be05aa8",
"size": "455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py101/decorator-2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "54544"
}
],
"symlink_target": ""
} |
"""
Tests for Volume Code.
"""
import contextlib
import datetime
import mock
import os
import shutil
import socket
import tempfile
import eventlet
import mox
from oslo.config import cfg
from stevedore import extension
from taskflow.engines.action_engine import engine
from cinder.backup import driver as backup_driver
from cinder.brick.iscsi import iscsi
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import context
from cinder import db
from cinder import exception
from cinder.image import image_utils
from cinder import keymgr
from cinder.openstack.common import fileutils
from cinder.openstack.common import importutils
from cinder.openstack.common import jsonutils
import cinder.policy
from cinder import quota
from cinder import test
from cinder.tests.brick.fake_lvm import FakeBrickLVM
from cinder.tests import conf_fixture
from cinder.tests import fake_notifier
from cinder.tests.image import fake as fake_image
from cinder.tests.keymgr import fake as fake_keymgr
from cinder.tests import utils as tests_utils
from cinder import units
from cinder import utils
import cinder.volume
from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume.drivers import lvm
from cinder.volume.manager import VolumeManager
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volutils
from cinder.volume import volume_types
QUOTAS = quota.QUOTAS
CONF = cfg.CONF
ENCRYPTION_PROVIDER = 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor'
fake_opt = [
cfg.StrOpt('fake_opt', default='fake', help='fake opts')
]
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa'
class FakeImageService:
def __init__(self, db_driver=None, image_service=None):
pass
def show(self, context, image_id):
return {'size': 2 * units.GiB,
'disk_format': 'raw',
'container_format': 'bare'}
class BaseVolumeTestCase(test.TestCase):
"""Test Case for volumes."""
def setUp(self):
super(BaseVolumeTestCase, self).setUp()
self.extension_manager = extension.ExtensionManager(
"BaseVolumeTestCase")
vol_tmpdir = tempfile.mkdtemp()
self.flags(volumes_dir=vol_tmpdir,
notification_driver=["test"])
self.volume = importutils.import_object(CONF.volume_manager)
self.context = context.get_admin_context()
self.context.user_id = 'fake'
self.context.project_id = 'fake'
self.volume_params = {
'status': 'creating',
'host': CONF.host,
'size': 1}
self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target)
self.stubs.Set(brick_lvm.LVM,
'get_all_volume_groups',
self.fake_get_all_volume_groups)
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
self.stubs.Set(os.path, 'exists', lambda x: True)
self.volume.driver.set_initialized()
self.volume.stats = {'allocated_capacity_gb': 0}
# keep ordered record of what we execute
self.called = []
def tearDown(self):
try:
shutil.rmtree(CONF.volumes_dir)
except OSError:
pass
fake_notifier.reset()
super(BaseVolumeTestCase, self).tearDown()
def fake_get_target(obj, iqn):
return 1
def fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True):
return [{'name': 'cinder-volumes',
'size': '5.00',
'available': '2.50',
'lv_count': '2',
'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}]
class VolumeTestCase(BaseVolumeTestCase):
def setUp(self):
super(VolumeTestCase, self).setUp()
self.stubs.Set(volutils, 'clear_volume',
lambda a, b, volume_clear=mox.IgnoreArg(),
volume_clear_size=mox.IgnoreArg(),
lvm_type=mox.IgnoreArg(): None)
def test_init_host_clears_downloads(self):
"""Test that init_host will unwedge a volume stuck in downloading."""
volume = tests_utils.create_volume(self.context, status='downloading',
size=0, host=CONF.host)
volume_id = volume['id']
self.volume.init_host()
volume = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume['status'], "error")
self.volume.delete_volume(self.context, volume_id)
@mock.patch.object(QUOTAS, 'reserve')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(QUOTAS, 'rollback')
def test_create_driver_not_initialized(self, reserve, commit, rollback):
self.volume.driver._initialized = False
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit_and_rollback(context, reservations, project_id=None):
pass
reserve.return_value = fake_reserve
commit.return_value = fake_commit_and_rollback
rollback.return_value = fake_commit_and_rollback
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
volume_id = volume['id']
self.assertIsNone(volume['encryption_key_id'])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(exception.DriverNotInitialized,
self.volume.create_volume,
self.context, volume_id)
# NOTE(flaper87): The volume status should be error_deleting.
volume = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume.status, "error")
db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch.object(QUOTAS, 'reserve')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(QUOTAS, 'rollback')
def test_delete_driver_not_initialized(self, reserve, commit, rollback):
# NOTE(flaper87): Set initialized to False
self.volume.driver._initialized = False
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit_and_rollback(context, reservations, project_id=None):
pass
reserve.return_value = fake_reserve
commit.return_value = fake_commit_and_rollback
rollback.return_value = fake_commit_and_rollback
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
volume_id = volume['id']
self.assertIsNone(volume['encryption_key_id'])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(exception.DriverNotInitialized,
self.volume.delete_volume,
self.context, volume_id)
# NOTE(flaper87): The volume status should be error.
volume = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume.status, "error_deleting")
db.volume_destroy(context.get_admin_context(), volume_id)
def test_create_delete_volume(self):
"""Test volume can be created and deleted."""
# Need to stub out reserve, commit, and rollback
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit(context, reservations, project_id=None):
pass
def fake_rollback(context, reservations, project_id=None):
pass
self.stubs.Set(QUOTAS, "reserve", fake_reserve)
self.stubs.Set(QUOTAS, "commit", fake_commit)
self.stubs.Set(QUOTAS, "rollback", fake_rollback)
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
volume_id = volume['id']
self.assertIsNone(volume['encryption_key_id'])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.volume.create_volume(self.context, volume_id)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual(msg['event_type'], 'volume.create.start')
expected = {
'status': 'creating',
'display_name': 'test_volume',
'availability_zone': 'nova',
'tenant_id': 'fake',
'created_at': 'DONTCARE',
'volume_id': volume_id,
'volume_type': None,
'snapshot_id': None,
'user_id': 'fake',
'launched_at': 'DONTCARE',
'size': 1,
}
self.assertDictMatch(msg['payload'], expected)
msg = fake_notifier.NOTIFICATIONS[1]
self.assertEqual(msg['event_type'], 'volume.create.end')
expected['status'] = 'available'
self.assertDictMatch(msg['payload'], expected)
self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
volume_id).id)
self.volume.delete_volume(self.context, volume_id)
vol = db.volume_get(context.get_admin_context(read_deleted='yes'),
volume_id)
self.assertEqual(vol['status'], 'deleted')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4)
msg = fake_notifier.NOTIFICATIONS[2]
self.assertEqual(msg['event_type'], 'volume.delete.start')
self.assertDictMatch(msg['payload'], expected)
msg = fake_notifier.NOTIFICATIONS[3]
self.assertEqual(msg['event_type'], 'volume.delete.end')
self.assertDictMatch(msg['payload'], expected)
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume_id)
def test_create_delete_volume_with_metadata(self):
"""Test volume can be created with metadata and deleted."""
test_meta = {'fake_key': 'fake_value'}
volume = tests_utils.create_volume(self.context, metadata=test_meta,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
result_meta = {
volume.volume_metadata[0].key: volume.volume_metadata[0].value}
self.assertEqual(result_meta, test_meta)
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume_id)
def test_create_volume_with_invalid_metadata(self):
"""Test volume create with too much metadata fails."""
volume_api = cinder.volume.api.API()
test_meta = {'fake_key': 'fake_value' * 256}
self.assertRaises(exception.InvalidVolumeMetadataSize,
volume_api.create,
self.context,
1,
'name',
'description',
None,
None,
None,
test_meta)
def test_create_volume_uses_default_availability_zone(self):
"""Test setting availability_zone correctly during volume create."""
volume_api = cinder.volume.api.API()
def fake_list_availability_zones():
return ({'name': 'az1', 'available': True},
{'name': 'az2', 'available': True},
{'name': 'default-az', 'available': True})
self.stubs.Set(volume_api,
'list_availability_zones',
fake_list_availability_zones)
# Test backwards compatibility, default_availability_zone not set
CONF.set_override('storage_availability_zone', 'az2')
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertEqual(volume['availability_zone'], 'az2')
CONF.set_override('default_availability_zone', 'default-az')
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertEqual(volume['availability_zone'], 'default-az')
def test_create_volume_with_volume_type(self):
"""Test volume creation with default volume type."""
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit(context, reservations, project_id=None):
pass
def fake_rollback(context, reservations, project_id=None):
pass
self.stubs.Set(QUOTAS, "reserve", fake_reserve)
self.stubs.Set(QUOTAS, "commit", fake_commit)
self.stubs.Set(QUOTAS, "rollback", fake_rollback)
volume_api = cinder.volume.api.API()
# Create volume with default volume type while default
# volume type doesn't exist, volume_type_id should be NULL
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertIsNone(volume['volume_type_id'])
self.assertIsNone(volume['encryption_key_id'])
# Create default volume type
vol_type = conf_fixture.def_vol_type
db.volume_type_create(context.get_admin_context(),
{'name': vol_type, 'extra_specs': {}})
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
vol_type)
# Create volume with default volume type
volume = volume_api.create(self.context,
1,
'name',
'description')
self.assertEqual(volume['volume_type_id'], db_vol_type.get('id'))
self.assertIsNone(volume['encryption_key_id'])
# Create volume with specific volume type
vol_type = 'test'
db.volume_type_create(context.get_admin_context(),
{'name': vol_type, 'extra_specs': {}})
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
vol_type)
volume = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
self.assertEqual(volume['volume_type_id'], db_vol_type.get('id'))
def test_create_volume_with_encrypted_volume_type(self):
self.stubs.Set(keymgr, "API", fake_keymgr.fake_api)
ctxt = context.get_admin_context()
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
volume_api = cinder.volume.api.API()
db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS')
volume = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
self.assertEqual(volume['volume_type_id'], db_vol_type.get('id'))
self.assertIsNotNone(volume['encryption_key_id'])
def test_create_delete_volume_with_encrypted_volume_type(self):
self.stubs.Set(keymgr, "API", fake_keymgr.fake_api)
ctxt = context.get_admin_context()
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
volume_api = cinder.volume.api.API()
db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS')
volume = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
self.assertIsNotNone(volume.get('encryption_key_id', None))
self.assertEqual(volume['volume_type_id'], db_vol_type.get('id'))
self.assertIsNotNone(volume['encryption_key_id'])
volume['host'] = 'fake_host'
volume['status'] = 'available'
volume_api.delete(self.context, volume)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('deleting', volume['status'])
db.volume_destroy(self.context, volume['id'])
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume['id'])
def test_extra_capabilities(self):
# Test valid extra_capabilities.
fake_capabilities = {'key1': 1, 'key2': 2}
with mock.patch.object(jsonutils, 'loads') as mock_loads:
mock_loads.return_value = fake_capabilities
manager = VolumeManager()
manager.driver.set_initialized()
manager.publish_service_capabilities(self.context)
self.assertTrue(mock_loads.called)
volume_stats = manager.last_capabilities
self.assertEqual(volume_stats['key1'],
fake_capabilities['key1'])
self.assertEqual(volume_stats['key2'],
fake_capabilities['key2'])
def test_extra_capabilities_fail(self):
with mock.patch.object(jsonutils, 'loads') as mock_loads:
mock_loads.side_effect = exception.CinderException('test')
self.assertRaises(exception.CinderException, VolumeManager)
def test_delete_busy_volume(self):
"""Test volume survives deletion if driver reports it as busy."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_volume')
self.volume.driver.delete_volume(
mox.IgnoreArg()).AndRaise(exception.VolumeIsBusy(
volume_name='fake'))
self.mox.ReplayAll()
res = self.volume.delete_volume(self.context, volume_id)
self.assertEqual(True, res)
volume_ref = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume_id, volume_ref.id)
self.assertEqual("available", volume_ref.status)
self.mox.UnsetStubs()
self.volume.delete_volume(self.context, volume_id)
def test_delete_volume_in_error_extending(self):
"""Test volume can be deleted in error_extending stats."""
# create a volume
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
# delete 'error_extending' volume
db.volume_update(self.context, volume['id'],
{'status': 'error_extending'})
self.volume.delete_volume(self.context, volume['id'])
self.assertRaises(exception.NotFound, db.volume_get,
self.context, volume['id'])
def test_create_volume_from_snapshot(self):
"""Test volume can be created from a snapshot."""
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
snapshot_id = self._create_snapshot(volume_src['id'])['id']
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot_id)
volume_dst = tests_utils.create_volume(self.context,
snapshot_id=snapshot_id,
**self.volume_params)
self.volume.create_volume(self.context, volume_dst['id'], snapshot_id)
self.assertEqual(volume_dst['id'],
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_id,
db.volume_get(context.get_admin_context(),
volume_dst['id']).snapshot_id)
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_src['id'])
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_volume_from_snapshot_with_types(self, _get_flow):
"""Test volume create from snapshot with types including mistmatch."""
volume_api = cinder.volume.api.API()
db.volume_type_create(context.get_admin_context(),
{'name': 'foo', 'extra_specs': {}})
db.volume_type_create(context.get_admin_context(),
{'name': 'biz', 'extra_specs': {}})
foo_type = db.volume_type_get_by_name(context.get_admin_context(),
'foo')
biz_type = db.volume_type_get_by_name(context.get_admin_context(),
'biz')
snapshot = {'id': 1234,
'status': 'available',
'volume_size': 10,
'volume_type_id': biz_type['id']}
# Make sure the case of specifying a type that
# doesn't match the snapshots type fails
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
snapshot=snapshot)
# Make sure that trying to specify a type
# when the snapshots type is None fails
snapshot['volume_type_id'] = None
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
snapshot=snapshot)
snapshot['volume_type_id'] = foo_type['id']
volume_api.create(self.context, size=1, name='fake_name',
description='fake_desc', volume_type=foo_type,
snapshot=snapshot)
db.volume_type_destroy(context.get_admin_context(),
foo_type['id'])
db.volume_type_destroy(context.get_admin_context(),
biz_type['id'])
@mock.patch('cinder.volume.flows.api.create_volume.get_flow')
def test_create_volume_from_source_with_types(self, _get_flow):
"""Test volume create from source with types including mistmatch."""
volume_api = cinder.volume.api.API()
db.volume_type_create(context.get_admin_context(),
{'name': 'foo', 'extra_specs': {}})
db.volume_type_create(context.get_admin_context(),
{'name': 'biz', 'extra_specs': {}})
foo_type = db.volume_type_get_by_name(context.get_admin_context(),
'foo')
biz_type = db.volume_type_get_by_name(context.get_admin_context(),
'biz')
source_vol = {'id': 1234,
'status': 'available',
'volume_size': 10,
'volume_type': biz_type,
'volume_type_id': biz_type['id']}
# Make sure the case of specifying a type that
# doesn't match the snapshots type fails
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
# Make sure that trying to specify a type
# when the source type is None fails
source_vol['volume_type_id'] = None
source_vol['volume_type'] = None
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
volume_type=foo_type,
source_volume=source_vol)
source_vol['volume_type_id'] = biz_type['id']
source_vol['volume_type'] = biz_type
volume_api.create(self.context, size=1, name='fake_name',
description='fake_desc', volume_type=biz_type,
source_volume=source_vol)
db.volume_type_destroy(context.get_admin_context(),
foo_type['id'])
db.volume_type_destroy(context.get_admin_context(),
biz_type['id'])
def test_create_snapshot_driver_not_initialized(self):
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
snapshot_id = self._create_snapshot(volume_src['id'])['id']
# NOTE(flaper87): Set initialized to False
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.create_snapshot,
self.context, volume_src['id'],
snapshot_id)
# NOTE(flaper87): The volume status should be error.
snapshot = db.snapshot_get(context.get_admin_context(), snapshot_id)
self.assertEqual(snapshot.status, "error")
# NOTE(flaper87): Set initialized to True,
# lets cleanup the mess
self.volume.driver._initialized = True
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_src['id'])
def _mock_synchronized(self, name, *s_args, **s_kwargs):
def inner_sync1(f):
def inner_sync2(*args, **kwargs):
self.called.append('lock-%s' % (name))
ret = f(*args, **kwargs)
self.called.append('unlock-%s' % (name))
return ret
return inner_sync2
return inner_sync1
def test_create_volume_from_snapshot_check_locks(self):
# mock the synchroniser so we can record events
self.stubs.Set(utils, 'synchronized', self._mock_synchronized)
self.stubs.Set(self.volume.driver, 'create_volume_from_snapshot',
lambda *args, **kwargs: None)
orig_flow = engine.ActionEngine.run
def mock_flow_run(*args, **kwargs):
# ensure the lock has been taken
self.assertEqual(len(self.called), 1)
# now proceed with the flow.
ret = orig_flow(*args, **kwargs)
return ret
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id)
snap_id = self._create_snapshot(src_vol_id)['id']
# no lock
self.volume.create_snapshot(self.context, src_vol_id, snap_id)
dst_vol = tests_utils.create_volume(self.context,
snapshot_id=snap_id,
**self.volume_params)
dst_vol_id = dst_vol['id']
admin_ctxt = context.get_admin_context()
# mock the flow runner so we can do some checks
self.stubs.Set(engine.ActionEngine, 'run', mock_flow_run)
# locked
self.volume.create_volume(self.context, volume_id=dst_vol_id,
snapshot_id=snap_id)
self.assertEqual(len(self.called), 2)
self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id)
self.assertEqual(snap_id,
db.volume_get(admin_ctxt, dst_vol_id).snapshot_id)
# locked
self.volume.delete_volume(self.context, dst_vol_id)
self.assertEqual(len(self.called), 4)
# locked
self.volume.delete_snapshot(self.context, snap_id)
self.assertEqual(len(self.called), 6)
# locked
self.volume.delete_volume(self.context, src_vol_id)
self.assertEqual(len(self.called), 8)
self.assertEqual(self.called,
['lock-%s' % ('%s-delete_snapshot' % (snap_id)),
'unlock-%s' % ('%s-delete_snapshot' % (snap_id)),
'lock-%s' % ('%s-delete_volume' % (dst_vol_id)),
'unlock-%s' % ('%s-delete_volume' % (dst_vol_id)),
'lock-%s' % ('%s-delete_snapshot' % (snap_id)),
'unlock-%s' % ('%s-delete_snapshot' % (snap_id)),
'lock-%s' % ('%s-delete_volume' % (src_vol_id)),
'unlock-%s' % ('%s-delete_volume' % (src_vol_id))])
def test_create_volume_from_volume_check_locks(self):
# mock the synchroniser so we can record events
self.stubs.Set(utils, 'synchronized', self._mock_synchronized)
orig_flow = engine.ActionEngine.run
def mock_flow_run(*args, **kwargs):
# ensure the lock has been taken
self.assertEqual(len(self.called), 1)
# now proceed with the flow.
ret = orig_flow(*args, **kwargs)
return ret
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id)
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
dst_vol_id = dst_vol['id']
admin_ctxt = context.get_admin_context()
# mock the flow runner so we can do some checks
self.stubs.Set(engine.ActionEngine, 'run', mock_flow_run)
# locked
self.volume.create_volume(self.context, volume_id=dst_vol_id,
source_volid=src_vol_id)
self.assertEqual(len(self.called), 2)
self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id)
self.assertEqual(src_vol_id,
db.volume_get(admin_ctxt, dst_vol_id).source_volid)
# locked
self.volume.delete_volume(self.context, dst_vol_id)
self.assertEqual(len(self.called), 4)
# locked
self.volume.delete_volume(self.context, src_vol_id)
self.assertEqual(len(self.called), 6)
self.assertEqual(self.called,
['lock-%s' % ('%s-delete_volume' % (src_vol_id)),
'unlock-%s' % ('%s-delete_volume' % (src_vol_id)),
'lock-%s' % ('%s-delete_volume' % (dst_vol_id)),
'unlock-%s' % ('%s-delete_volume' % (dst_vol_id)),
'lock-%s' % ('%s-delete_volume' % (src_vol_id)),
'unlock-%s' % ('%s-delete_volume' % (src_vol_id))])
def test_create_volume_from_volume_delete_lock_taken(self):
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id)
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
dst_vol_id = dst_vol['id']
admin_ctxt = context.get_admin_context()
orig_elevated = self.context.elevated
ctxt_deepcopy = self.context.deepcopy()
gthreads = []
def mock_elevated(*args, **kwargs):
# unset mock so it is only called once
self.stubs.Set(self.context, 'elevated', orig_elevated)
# we expect this to block and then fail
t = eventlet.spawn(self.volume.create_volume,
ctxt_deepcopy,
volume_id=dst_vol_id, source_volid=src_vol_id)
gthreads.append(t)
return orig_elevated(*args, **kwargs)
# mock something from early on in the delete operation and within the
# lock so that when we do the create we expect it to block.
self.stubs.Set(self.context, 'elevated', mock_elevated)
# locked
self.volume.delete_volume(self.context, src_vol_id)
# we expect the volume create to fail with the following err since the
# source volume was deleted while the create was locked. Note that the
# volume is still in the db since it was created by the test prior to
# calling manager.create_volume.
self.assertRaises(exception.VolumeNotFound, gthreads[0].wait)
def test_create_volume_from_snapshot_delete_lock_taken(self):
# create source volume
src_vol = tests_utils.create_volume(self.context, **self.volume_params)
src_vol_id = src_vol['id']
# no lock
self.volume.create_volume(self.context, src_vol_id)
# create snapshot
snap_id = self._create_snapshot(src_vol_id)['id']
# no lock
self.volume.create_snapshot(self.context, src_vol_id, snap_id)
# create vol from snapshot...
dst_vol = tests_utils.create_volume(self.context,
source_volid=src_vol_id,
**self.volume_params)
dst_vol_id = dst_vol['id']
admin_ctxt = context.get_admin_context()
orig_elevated = self.context.elevated
ctxt_deepcopy = self.context.deepcopy()
gthreads = []
def mock_elevated(*args, **kwargs):
# unset mock so it is only called once
self.stubs.Set(self.context, 'elevated', orig_elevated)
# We expect this to block and then fail
t = eventlet.spawn(self.volume.create_volume, ctxt_deepcopy,
volume_id=dst_vol_id, snapshot_id=snap_id)
gthreads.append(t)
return orig_elevated(*args, **kwargs)
# mock something from early on in the delete operation and within the
# lock so that when we do the create we expect it to block.
self.stubs.Set(self.context, 'elevated', mock_elevated)
# locked
self.volume.delete_snapshot(self.context, snap_id)
# we expect the volume create to fail with the following err since the
# snapshot was deleted while the create was locked. Note that the
# volume is still in the db since it was created by the test prior to
# calling manager.create_volume.
self.assertRaises(exception.SnapshotNotFound, gthreads[0].wait)
# locked
self.volume.delete_volume(self.context, src_vol_id)
# make sure it is gone
self.assertRaises(exception.VolumeNotFound, db.volume_get,
self.context, src_vol_id)
def test_create_volume_from_snapshot_with_encryption(self):
"""Test volume can be created from a snapshot of
an encrypted volume.
"""
self.stubs.Set(keymgr, 'API', fake_keymgr.fake_api)
ctxt = context.get_admin_context()
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
volume_api = cinder.volume.api.API()
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
'LUKS')
volume_src = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume_src,
'name',
'description')
snapshot_ref['status'] = 'available' # status must be available
volume_dst = volume_api.create(self.context,
1,
'name',
'description',
snapshot=snapshot_ref)
self.assertEqual(volume_dst['id'],
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_ref['id'],
db.volume_get(context.get_admin_context(),
volume_dst['id']).snapshot_id)
# ensure encryption keys match
self.assertIsNotNone(volume_src['encryption_key_id'])
self.assertIsNotNone(volume_dst['encryption_key_id'])
key_manager = volume_api.key_manager # must use *same* key manager
volume_src_key = key_manager.get_key(self.context,
volume_src['encryption_key_id'])
volume_dst_key = key_manager.get_key(self.context,
volume_dst['encryption_key_id'])
self.assertEqual(volume_src_key, volume_dst_key)
def test_create_volume_from_encrypted_volume(self):
"""Test volume can be created from an encrypted volume."""
self.stubs.Set(keymgr, 'API', fake_keymgr.fake_api)
volume_api = cinder.volume.api.API()
ctxt = context.get_admin_context()
db.volume_type_create(ctxt,
{'id': '61298380-0c12-11e3-bfd6-4b48424183be',
'name': 'LUKS'})
db.volume_type_encryption_create(
ctxt,
'61298380-0c12-11e3-bfd6-4b48424183be',
{'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER})
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
'LUKS')
volume_src = volume_api.create(self.context,
1,
'name',
'description',
volume_type=db_vol_type)
volume_src['status'] = 'available' # status must be available
volume_dst = volume_api.create(self.context,
1,
'name',
'description',
source_volume=volume_src)
self.assertEqual(volume_dst['id'],
db.volume_get(context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(volume_src['id'],
db.volume_get(context.get_admin_context(),
volume_dst['id']).source_volid)
# ensure encryption keys match
self.assertIsNotNone(volume_src['encryption_key_id'])
self.assertIsNotNone(volume_dst['encryption_key_id'])
key_manager = volume_api.key_manager # must use *same* key manager
volume_src_key = key_manager.get_key(self.context,
volume_src['encryption_key_id'])
volume_dst_key = key_manager.get_key(self.context,
volume_dst['encryption_key_id'])
self.assertEqual(volume_src_key, volume_dst_key)
def test_create_volume_from_snapshot_fail_bad_size(self):
"""Test volume can't be created from snapshot with bad volume size."""
volume_api = cinder.volume.api.API()
snapshot = {'id': 1234,
'status': 'available',
'volume_size': 10}
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
snapshot=snapshot)
def test_create_volume_from_snapshot_fail_wrong_az(self):
"""Test volume can't be created from snapshot in a different az."""
volume_api = cinder.volume.api.API()
def fake_list_availability_zones():
return ({'name': 'nova', 'available': True},
{'name': 'az2', 'available': True})
self.stubs.Set(volume_api,
'list_availability_zones',
fake_list_availability_zones)
volume_src = tests_utils.create_volume(self.context,
availability_zone='az2',
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
snapshot = self._create_snapshot(volume_src['id'])
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot['id'])
snapshot = db.snapshot_get(self.context, snapshot['id'])
volume_dst = volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
snapshot=snapshot)
self.assertEqual(volume_dst['availability_zone'], 'az2')
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
snapshot=snapshot,
availability_zone='nova')
def test_create_volume_with_invalid_exclusive_options(self):
"""Test volume create with multiple exclusive options fails."""
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
1,
'name',
'description',
snapshot='fake_id',
image_id='fake_id',
source_volume='fake_id')
@mock.patch.object(db, 'volume_admin_metadata_get')
@mock.patch.object(db, 'volume_get')
@mock.patch.object(db, 'volume_update')
def test_initialize_connection_fetchqos(self,
_mock_volume_update,
_mock_volume_get,
_mock_volume_admin_metadata_get):
"""Make sure initialize_connection returns correct information."""
_fake_admin_meta = {'fake-key': 'fake-value'}
_fake_volume = {'volume_type_id': 'fake_type_id',
'name': 'fake_name',
'host': 'fake_host',
'id': 'fake_volume_id',
'volume_admin_metadata': _fake_admin_meta}
_mock_volume_get.return_value = _fake_volume
_mock_volume_update.return_value = _fake_volume
_mock_volume_admin_metadata_get.return_value = _fake_admin_meta
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
qos_values = {'consumer': 'front-end',
'specs': {
'key1': 'value1',
'key2': 'value2'}
}
with contextlib.nested(
mock.patch.object(cinder.volume.volume_types,
'get_volume_type_qos_specs'),
mock.patch.object(cinder.tests.fake_driver.FakeISCSIDriver,
'initialize_connection')
) as (type_qos, driver_init):
type_qos.return_value = dict(qos_specs=qos_values)
driver_init.return_value = {'data': {}}
qos_specs_expected = {'key1': 'value1',
'key2': 'value2'}
# initialize_connection() passes qos_specs that is designated to
# be consumed by front-end or both front-end and back-end
conn_info = self.volume.initialize_connection(self.context,
'fake_volume_id',
connector)
self.assertDictMatch(qos_specs_expected,
conn_info['data']['qos_specs'])
qos_values.update({'consumer': 'both'})
conn_info = self.volume.initialize_connection(self.context,
'fake_volume_id',
connector)
self.assertDictMatch(qos_specs_expected,
conn_info['data']['qos_specs'])
# initialize_connection() skips qos_specs that is designated to be
# consumed by back-end only
qos_values.update({'consumer': 'back-end'})
type_qos.return_value = dict(qos_specs=qos_values)
conn_info = self.volume.initialize_connection(self.context,
'fake_volume_id',
connector)
self.assertIsNone(conn_info['data']['qos_specs'])
def test_run_attach_detach_volume_for_instance(self):
"""Make sure volume can be attached and detached from instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertIsNone(vol['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.assertEqual(admin_metadata[1]['key'], 'attached_mode')
self.assertEqual(admin_metadata[1]['value'], 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual(conn_info['data']['access_mode'], 'ro')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_for_host(self):
"""Make sure volume can be attached and detached from host."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertIsNone(vol['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual(vol['attached_host'], 'fake-host')
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'False')
self.assertEqual(admin_metadata[1]['key'], 'attached_mode')
self.assertEqual(admin_metadata[1]['value'], 'rw')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual(conn_info['data']['access_mode'], 'rw')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume_id)
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_with_attach_mode(self):
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
db.volume_update(self.context, volume_id, {'status': 'available',
'mountpoint': None,
'instance_uuid': None,
'attached_host': None,
'attached_mode': None})
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertIsNone(vol['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.assertEqual(admin_metadata[1]['key'], 'attached_mode')
self.assertEqual(admin_metadata[1]['value'], 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual(conn_info['data']['access_mode'], 'ro')
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
self.assertIsNone(vol['mountpoint'])
self.assertIsNone(vol['instance_uuid'])
self.assertIsNone(vol['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 1)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertIsNone(vol['instance_uuid'])
self.assertEqual(vol['attached_host'], 'fake-host')
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.assertEqual(admin_metadata[1]['key'], 'attached_mode')
self.assertEqual(admin_metadata[1]['value'], 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEqual(conn_info['data']['access_mode'], 'ro')
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
self.assertIsNone(vol['mountpoint'])
self.assertIsNone(vol['instance_uuid'])
self.assertIsNone(vol['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 1)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_manager_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
instance_uuid,
None,
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "error_attaching")
self.assertEqual(vol['attach_status'], "detached")
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.assertEqual(admin_metadata[1]['key'], 'attached_mode')
self.assertEqual(admin_metadata[1]['value'], 'rw')
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
None,
'fake_host',
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "error_attaching")
self.assertEqual(vol['attach_status'], "detached")
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
self.assertEqual(admin_metadata[1]['key'], 'attached_mode')
self.assertEqual(admin_metadata[1]['value'], 'rw')
def test_run_api_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
instance_uuid,
None,
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['attach_status'], "detached")
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 1)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
None,
'fake_host',
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['attach_status'], "detached")
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 1)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'True')
@mock.patch.object(db, 'volume_get')
@mock.patch.object(cinder.volume.api.API, 'update')
def test_reserve_volume_success(self, volume_get, volume_update):
fake_volume = {
'id': FAKE_UUID,
'status': 'available'
}
volume_get.return_value = fake_volume
volume_update.return_value = fake_volume
self.assertIsNone(cinder.volume.api.API().reserve_volume(
self.context,
fake_volume,
))
self.assertTrue(volume_get.called)
self.assertTrue(volume_update.called)
def test_reserve_volume_bad_status(self):
fake_volume = {
'id': FAKE_UUID,
'status': 'in-use'
}
with mock.patch.object(db, 'volume_get') as mock_volume_get:
mock_volume_get.return_value = fake_volume
self.assertRaises(exception.InvalidVolume,
cinder.volume.api.API().reserve_volume,
self.context,
fake_volume)
self.assertTrue(mock_volume_get.called)
def test_unreserve_volume_success(self):
fake_volume = {
'id': FAKE_UUID,
'status': 'attaching'
}
with mock.patch.object(cinder.volume.api.API,
'update') as mock_volume_update:
mock_volume_update.return_value = fake_volume
self.assertIsNone(cinder.volume.api.API().unreserve_volume(
self.context,
fake_volume
))
self.assertTrue(mock_volume_update.called)
def test_concurrent_volumes_get_different_targets(self):
"""Ensure multiple concurrent volumes get different targets."""
volume_ids = []
targets = []
def _check(volume_id):
"""Make sure targets aren't duplicated."""
volume_ids.append(volume_id)
admin_context = context.get_admin_context()
iscsi_target = db.volume_get_iscsi_target_num(admin_context,
volume_id)
self.assertNotIn(iscsi_target, targets)
targets.append(iscsi_target)
total_slots = CONF.iscsi_num_targets
for _index in xrange(total_slots):
tests_utils.create_volume(self.context, **self.volume_params)
for volume_id in volume_ids:
self.volume.delete_volume(self.context, volume_id)
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass
@staticmethod
def _create_snapshot(volume_id, size='0', metadata=None):
"""Create a snapshot object."""
snap = {}
snap['volume_size'] = size
snap['user_id'] = 'fake'
snap['project_id'] = 'fake'
snap['volume_id'] = volume_id
snap['status'] = "creating"
if metadata is not None:
snap['metadata'] = metadata
return db.snapshot_create(context.get_admin_context(), snap)
def test_create_delete_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = tests_utils.create_volume(
self.context,
availability_zone=CONF.storage_availability_zone,
**self.volume_params)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.volume.create_volume(self.context, volume['id'])
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
snapshot_id = self._create_snapshot(volume['id'])['id']
self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
self.assertEqual(snapshot_id,
db.snapshot_get(context.get_admin_context(),
snapshot_id).id)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4)
msg = fake_notifier.NOTIFICATIONS[2]
self.assertEqual(msg['event_type'], 'snapshot.create.start')
expected = {
'created_at': 'DONTCARE',
'deleted': '',
'display_name': None,
'snapshot_id': snapshot_id,
'status': 'creating',
'tenant_id': 'fake',
'user_id': 'fake',
'volume_id': volume['id'],
'volume_size': 0,
'availability_zone': 'nova'
}
self.assertDictMatch(msg['payload'], expected)
msg = fake_notifier.NOTIFICATIONS[3]
self.assertEqual(msg['event_type'], 'snapshot.create.end')
self.assertDictMatch(msg['payload'], expected)
self.volume.delete_snapshot(self.context, snapshot_id)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 6)
msg = fake_notifier.NOTIFICATIONS[4]
self.assertEqual(msg['event_type'], 'snapshot.delete.start')
expected['status'] = 'available'
self.assertDictMatch(msg['payload'], expected)
msg = fake_notifier.NOTIFICATIONS[5]
self.assertEqual(msg['event_type'], 'snapshot.delete.end')
self.assertDictMatch(msg['payload'], expected)
snap = db.snapshot_get(context.get_admin_context(read_deleted='yes'),
snapshot_id)
self.assertEqual(snap['status'], 'deleted')
self.assertRaises(exception.NotFound,
db.snapshot_get,
self.context,
snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_create_delete_snapshot_with_metadata(self):
"""Test snapshot can be created with metadata and deleted."""
test_meta = {'fake_key': 'fake_value'}
volume = tests_utils.create_volume(self.context, **self.volume_params)
snapshot = self._create_snapshot(volume['id'], metadata=test_meta)
snapshot_id = snapshot['id']
snap = db.snapshot_get(context.get_admin_context(), snapshot_id)
result_dict = dict(snap.iteritems())
result_meta = {
result_dict['snapshot_metadata'][0].key:
result_dict['snapshot_metadata'][0].value}
self.assertEqual(result_meta, test_meta)
self.volume.delete_snapshot(self.context, snapshot_id)
self.assertRaises(exception.NotFound,
db.snapshot_get,
self.context,
snapshot_id)
def test_cannot_delete_volume_in_use(self):
"""Test volume can't be deleted in invalid stats."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'in-use'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
# 'in-use' status raises InvalidVolume
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume)
# clean up
self.volume.delete_volume(self.context, volume['id'])
def test_force_delete_volume(self):
"""Test volume can be forced to delete."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'error_deleting'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
# 'error_deleting' volumes can't be deleted
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume)
# delete with force
volume_api.delete(self.context, volume, force=True)
# status is deleting
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['status'], 'deleting')
# clean up
self.volume.delete_volume(self.context, volume['id'])
def test_cannot_force_delete_attached_volume(self):
"""Test volume can't be force delete in attached state."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'in-use'
volume['attach_status'] = 'attached'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.VolumeAttached,
volume_api.delete,
self.context,
volume,
force=True)
self.volume.delete_volume(self.context, volume['id'])
def test_cannot_delete_volume_with_snapshots(self):
"""Test volume can't be deleted with dependent snapshots."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
snapshot_id = self._create_snapshot(volume['id'])['id']
self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
self.assertEqual(snapshot_id,
db.snapshot_get(context.get_admin_context(),
snapshot_id).id)
volume['status'] = 'available'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume)
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_can_delete_errored_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
snapshot_id = self._create_snapshot(volume['id'])['id']
self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
snapshot = db.snapshot_get(context.get_admin_context(),
snapshot_id)
volume_api = cinder.volume.api.API()
snapshot['status'] = 'badstatus'
self.assertRaises(exception.InvalidSnapshot,
volume_api.delete_snapshot,
self.context,
snapshot)
snapshot['status'] = 'error'
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_create_snapshot_force(self):
"""Test snapshot in use can be created forcibly."""
instance_uuid = '12345678-1234-5678-1234-567812345678'
# create volume and attach to the instance
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
db.volume_attached(self.context, volume['id'], instance_uuid,
None, '/dev/sda1')
volume_api = cinder.volume.api.API()
volume = volume_api.get(self.context, volume['id'])
self.assertRaises(exception.InvalidVolume,
volume_api.create_snapshot,
self.context, volume,
'fake_name', 'fake_description')
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume,
'fake_name',
'fake_description')
db.snapshot_destroy(self.context, snapshot_ref['id'])
db.volume_destroy(self.context, volume['id'])
# create volume and attach to the host
volume = tests_utils.create_volume(self.context, **self.volume_params)
self.volume.create_volume(self.context, volume['id'])
db.volume_attached(self.context, volume['id'], None,
'fake_host', '/dev/sda1')
volume_api = cinder.volume.api.API()
volume = volume_api.get(self.context, volume['id'])
self.assertRaises(exception.InvalidVolume,
volume_api.create_snapshot,
self.context, volume,
'fake_name', 'fake_description')
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume,
'fake_name',
'fake_description')
db.snapshot_destroy(self.context, snapshot_ref['id'])
db.volume_destroy(self.context, volume['id'])
def test_delete_busy_snapshot(self):
"""Test snapshot can be created and deleted."""
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
snapshot_id = self._create_snapshot(volume_id)['id']
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
self.volume.driver.delete_snapshot(
mox.IgnoreArg()).AndRaise(
exception.SnapshotIsBusy(snapshot_name='fake'))
self.mox.ReplayAll()
self.volume.delete_snapshot(self.context, snapshot_id)
snapshot_ref = db.snapshot_get(self.context, snapshot_id)
self.assertEqual(snapshot_id, snapshot_ref.id)
self.assertEqual("available", snapshot_ref.status)
self.mox.UnsetStubs()
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_id)
def test_delete_no_dev_fails(self):
"""Test delete snapshot with no dev file fails."""
self.stubs.Set(os.path, 'exists', lambda x: False)
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
snapshot_id = self._create_snapshot(volume_id)['id']
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
self.volume.driver.delete_snapshot(
mox.IgnoreArg()).AndRaise(
exception.SnapshotIsBusy(snapshot_name='fake'))
self.mox.ReplayAll()
self.volume.delete_snapshot(self.context, snapshot_id)
snapshot_ref = db.snapshot_get(self.context, snapshot_id)
self.assertEqual(snapshot_id, snapshot_ref.id)
self.assertEqual("available", snapshot_ref.status)
self.mox.UnsetStubs()
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.delete_snapshot,
self.context,
snapshot_id)
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.delete_volume,
self.context,
volume_id)
def _create_volume_from_image(self, fakeout_copy_image_to_volume=False,
fakeout_clone_image=False):
"""Test function of create_volume_from_image.
Test cases call this function to create a volume from image, caller
can choose whether to fake out copy_image_to_volume and conle_image,
after calling this, test cases should check status of the volume.
"""
def fake_local_path(volume):
return dst_path
def fake_copy_image_to_volume(context, volume,
image_service, image_id):
pass
def fake_fetch_to_raw(ctx, image_service, image_id, path, blocksize,
size=None):
pass
def fake_clone_image(volume_ref, image_location, image_id, image_meta):
return {'provider_location': None}, True
dst_fd, dst_path = tempfile.mkstemp()
os.close(dst_fd)
self.stubs.Set(self.volume.driver, 'local_path', fake_local_path)
if fakeout_clone_image:
self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_image)
self.stubs.Set(image_utils, 'fetch_to_raw', fake_fetch_to_raw)
if fakeout_copy_image_to_volume:
self.stubs.Set(self.volume, '_copy_image_to_volume',
fake_copy_image_to_volume)
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
volume_id = tests_utils.create_volume(self.context,
**self.volume_params)['id']
# creating volume testdata
try:
self.volume.create_volume(self.context,
volume_id,
image_id=image_id)
finally:
# cleanup
os.unlink(dst_path)
volume = db.volume_get(self.context, volume_id)
return volume
def test_create_volume_from_image_cloned_status_available(self):
"""Test create volume from image via cloning.
Verify that after cloning image to volume, it is in available
state and is bootable.
"""
volume = self._create_volume_from_image()
self.assertEqual(volume['status'], 'available')
self.assertEqual(volume['bootable'], True)
self.volume.delete_volume(self.context, volume['id'])
def test_create_volume_from_image_not_cloned_status_available(self):
"""Test create volume from image via full copy.
Verify that after copying image to volume, it is in available
state and is bootable.
"""
volume = self._create_volume_from_image(fakeout_clone_image=True)
self.assertEqual(volume['status'], 'available')
self.assertEqual(volume['bootable'], True)
self.volume.delete_volume(self.context, volume['id'])
def test_create_volume_from_image_exception(self):
"""Verify that create volume from a non-existing image, the volume
status is 'error' and is not bootable.
"""
dst_fd, dst_path = tempfile.mkstemp()
os.close(dst_fd)
self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path)
# creating volume testdata
volume_id = 1
db.volume_create(self.context,
{'id': volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'host': 'dummy'})
self.assertRaises(exception.ImageNotFound,
self.volume.create_volume,
self.context,
volume_id, None, None, None,
None,
FAKE_UUID)
volume = db.volume_get(self.context, volume_id)
self.assertEqual(volume['status'], "error")
self.assertEqual(volume['bootable'], False)
# cleanup
db.volume_destroy(self.context, volume_id)
os.unlink(dst_path)
def test_create_volume_from_exact_sized_image(self):
"""Verify that an image which is exactly the same size as the
volume, will work correctly.
"""
try:
volume_id = None
volume_api = cinder.volume.api.API(
image_service=FakeImageService())
volume = volume_api.create(self.context, 2, 'name', 'description',
image_id=1)
volume_id = volume['id']
self.assertEqual(volume['status'], 'creating')
finally:
# cleanup
db.volume_destroy(self.context, volume_id)
def test_create_volume_from_oversized_image(self):
"""Verify that an image which is too big will fail correctly."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.GiB + 1,
'disk_format': 'raw',
'container_format': 'bare'}
volume_api = cinder.volume.api.API(image_service=
_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def test_create_volume_with_mindisk_error(self):
"""Verify volumes smaller than image minDisk will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
return {'size': 2 * units.GiB,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5}
volume_api = cinder.volume.api.API(image_service=
_ModifiedFakeImageService())
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context, 2,
'name', 'description', image_id=1)
def _do_test_create_volume_with_size(self, size):
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit(context, reservations, project_id=None):
pass
def fake_rollback(context, reservations, project_id=None):
pass
self.stubs.Set(QUOTAS, "reserve", fake_reserve)
self.stubs.Set(QUOTAS, "commit", fake_commit)
self.stubs.Set(QUOTAS, "rollback", fake_rollback)
volume_api = cinder.volume.api.API()
volume = volume_api.create(self.context,
size,
'name',
'description')
self.assertEqual(volume['size'], int(size))
def test_create_volume_int_size(self):
"""Test volume creation with int size."""
self._do_test_create_volume_with_size(2)
def test_create_volume_string_size(self):
"""Test volume creation with string size."""
self._do_test_create_volume_with_size('2')
def test_create_volume_with_bad_size(self):
def fake_reserve(context, expire=None, project_id=None, **deltas):
return ["RESERVATION"]
def fake_commit(context, reservations, project_id=None):
pass
def fake_rollback(context, reservations, project_id=None):
pass
self.stubs.Set(QUOTAS, "reserve", fake_reserve)
self.stubs.Set(QUOTAS, "commit", fake_commit)
self.stubs.Set(QUOTAS, "rollback", fake_rollback)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
'2Gb',
'name',
'description')
def test_begin_roll_detaching_volume(self):
"""Test begin_detaching and roll_detaching functions."""
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_api = cinder.volume.api.API()
volume_api.begin_detaching(self.context, volume)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual(volume['status'], "detaching")
volume_api.roll_detaching(self.context, volume)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual(volume['status'], "in-use")
def test_volume_api_update(self):
# create a raw vol
volume = tests_utils.create_volume(self.context, **self.volume_params)
# use volume.api to update name
volume_api = cinder.volume.api.API()
update_dict = {'display_name': 'test update name'}
volume_api.update(self.context, volume, update_dict)
# read changes from db
vol = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(vol['display_name'], 'test update name')
def test_volume_api_update_snapshot(self):
# create raw snapshot
volume = tests_utils.create_volume(self.context, **self.volume_params)
snapshot = self._create_snapshot(volume['id'])
self.assertIsNone(snapshot['display_name'])
# use volume.api to update name
volume_api = cinder.volume.api.API()
update_dict = {'display_name': 'test update name'}
volume_api.update_snapshot(self.context, snapshot, update_dict)
# read changes from db
snap = db.snapshot_get(context.get_admin_context(), snapshot['id'])
self.assertEqual(snap['display_name'], 'test update name')
@mock.patch.object(QUOTAS, 'reserve')
def test_extend_volume(self, reserve):
"""Test volume can be extended at API level."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context, size=2,
status='creating', host=CONF.host)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'in-use'
volume['host'] = 'fakehost'
volume_api = cinder.volume.api.API()
# Extend fails when status != available
self.assertRaises(exception.InvalidVolume,
volume_api.extend,
self.context,
volume,
3)
volume['status'] = 'available'
# Extend fails when new_size < orig_size
self.assertRaises(exception.InvalidInput,
volume_api.extend,
self.context,
volume,
1)
# Extend fails when new_size == orig_size
self.assertRaises(exception.InvalidInput,
volume_api.extend,
self.context,
volume,
2)
# works when new_size > orig_size
reserve.return_value = ["RESERVATION"]
volume_api.extend(self.context, volume, 3)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['status'], 'extending')
# Test the quota exceeded
volume['status'] = 'available'
reserve.side_effect = exception.OverQuota(overs=['gigabytes'],
quotas={'gigabytes': 20},
usages={'gigabytes':
{'reserved': 5,
'in_use': 15}})
self.assertRaises(exception.VolumeSizeExceedsAvailableQuota,
volume_api.extend, self.context,
volume, 3)
# clean up
self.volume.delete_volume(self.context, volume['id'])
def test_extend_volume_driver_not_initialized(self):
"""Test volume can be extended at API level."""
# create a volume and assign to host
fake_reservations = ['RESERVATION']
volume = tests_utils.create_volume(self.context, size=2,
status='available',
host=CONF.host)
self.volume.create_volume(self.context, volume['id'])
# NOTE(flaper87): Set initialized to False
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.extend_volume,
self.context, volume['id'], 3,
fake_reservations)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume.status, 'error_extending')
# NOTE(flaper87): Set initialized to True,
# lets cleanup the mess.
self.volume.driver._initialized = True
self.volume.delete_volume(self.context, volume['id'])
def test_extend_volume_manager(self):
"""Test volume can be extended at the manager level."""
def fake_extend(volume, new_size):
volume['size'] = new_size
fake_reservations = ['RESERVATION']
volume = tests_utils.create_volume(self.context, size=2,
status='creating', host=CONF.host)
self.volume.create_volume(self.context, volume['id'])
# Test driver exception
with mock.patch.object(self.volume.driver,
'extend_volume') as extend_volume:
extend_volume.side_effect =\
exception.CinderException('fake exception')
volume['status'] = 'extending'
self.volume.extend_volume(self.context, volume['id'], '4',
fake_reservations)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['size'], 2)
self.assertEqual(volume['status'], 'error_extending')
# Test driver success
with mock.patch.object(self.volume.driver,
'extend_volume') as extend_volume:
extend_volume.return_value = fake_extend
volume['status'] = 'extending'
self.volume.extend_volume(self.context, volume['id'], '4',
fake_reservations)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['size'], 4)
self.assertEqual(volume['status'], 'available')
# clean up
self.volume.delete_volume(self.context, volume['id'])
def test_create_volume_from_unelevated_context(self):
"""Test context does't change after volume creation failure."""
def fake_create_volume(*args, **kwargs):
raise exception.CinderException('fake exception')
#create context for testing
ctxt = self.context.deepcopy()
if 'admin' in ctxt.roles:
ctxt.roles.remove('admin')
ctxt.is_admin = False
#create one copy of context for future comparison
self.saved_ctxt = ctxt.deepcopy()
self.stubs.Set(self.volume.driver, 'create_volume', fake_create_volume)
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.assertRaises(exception.CinderException,
self.volume.create_volume, ctxt, volume_src['id'])
def test_create_volume_from_sourcevol(self):
"""Test volume can be created from a source volume."""
def fake_create_cloned_volume(volume, src_vref):
pass
self.stubs.Set(self.volume.driver, 'create_cloned_volume',
fake_create_cloned_volume)
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
volume_dst = tests_utils.create_volume(self.context,
source_volid=volume_src['id'],
**self.volume_params)
self.volume.create_volume(self.context, volume_dst['id'],
source_volid=volume_src['id'])
self.assertEqual('available',
db.volume_get(context.get_admin_context(),
volume_dst['id']).status)
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_volume(self.context, volume_src['id'])
def test_create_volume_from_sourcevol_fail_wrong_az(self):
"""Test volume can't be cloned from an other volume in different az."""
volume_api = cinder.volume.api.API()
def fake_list_availability_zones():
return ({'name': 'nova', 'available': True},
{'name': 'az2', 'available': True})
self.stubs.Set(volume_api,
'list_availability_zones',
fake_list_availability_zones)
volume_src = tests_utils.create_volume(self.context,
availability_zone='az2',
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
volume_src = db.volume_get(self.context, volume_src['id'])
volume_dst = volume_api.create(self.context,
size=1,
name='fake_name',
description='fake_desc',
source_volume=volume_src)
self.assertEqual(volume_dst['availability_zone'], 'az2')
self.assertRaises(exception.InvalidInput,
volume_api.create,
self.context,
size=1,
name='fake_name',
description='fake_desc',
source_volume=volume_src,
availability_zone='nova')
def test_create_volume_from_sourcevol_with_glance_metadata(self):
"""Test glance metadata can be correctly copied to new volume."""
def fake_create_cloned_volume(volume, src_vref):
pass
self.stubs.Set(self.volume.driver, 'create_cloned_volume',
fake_create_cloned_volume)
volume_src = self._create_volume_from_image()
self.volume.create_volume(self.context, volume_src['id'])
volume_dst = tests_utils.create_volume(self.context,
source_volid=volume_src['id'],
**self.volume_params)
self.volume.create_volume(self.context, volume_dst['id'],
source_volid=volume_src['id'])
self.assertEqual('available',
db.volume_get(context.get_admin_context(),
volume_dst['id']).status)
src_glancemeta = db.volume_get(context.get_admin_context(),
volume_src['id']).volume_glance_metadata
dst_glancemeta = db.volume_get(context.get_admin_context(),
volume_dst['id']).volume_glance_metadata
for meta_src in src_glancemeta:
for meta_dst in dst_glancemeta:
if meta_dst.key == meta_src.key:
self.assertEqual(meta_dst.value, meta_src.value)
self.volume.delete_volume(self.context, volume_src['id'])
self.volume.delete_volume(self.context, volume_dst['id'])
def test_create_volume_from_sourcevol_failed_clone(self):
"""Test src vol status will be restore by error handling code."""
def fake_error_create_cloned_volume(volume, src_vref):
db.volume_update(self.context, src_vref['id'], {'status': 'error'})
raise exception.CinderException('fake exception')
self.stubs.Set(self.volume.driver, 'create_cloned_volume',
fake_error_create_cloned_volume)
volume_src = tests_utils.create_volume(self.context,
**self.volume_params)
self.volume.create_volume(self.context, volume_src['id'])
volume_dst = tests_utils.create_volume(self.context,
source_volid=volume_src['id'],
**self.volume_params)
self.assertRaises(exception.CinderException,
self.volume.create_volume,
self.context,
volume_dst['id'], None, None, None, None, None,
volume_src['id'])
self.assertEqual(volume_src['status'], 'creating')
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_volume(self.context, volume_src['id'])
def test_list_availability_zones_enabled_service(self):
services = [
{'availability_zone': 'ping', 'disabled': 0},
{'availability_zone': 'ping', 'disabled': 1},
{'availability_zone': 'pong', 'disabled': 0},
{'availability_zone': 'pung', 'disabled': 1},
]
def stub_service_get_all_by_topic(*args, **kwargs):
return services
self.stubs.Set(db, 'service_get_all_by_topic',
stub_service_get_all_by_topic)
volume_api = cinder.volume.api.API()
azs = volume_api.list_availability_zones()
expected = (
{'name': 'pung', 'available': False},
{'name': 'pong', 'available': True},
{'name': 'ping', 'available': True},
)
self.assertEqual(expected, azs)
def test_migrate_volume_driver(self):
"""Test volume migration done by driver."""
# stub out driver and rpc functions
self.stubs.Set(self.volume.driver, 'migrate_volume',
lambda x, y, z, new_type_id=None: (True,
{'user_id': 'foo'}))
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host,
migration_status='migrating')
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.migrate_volume(self.context, volume['id'],
host_obj, False)
# check volume properties
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['host'], 'newhost')
self.assertIsNone(volume['migration_status'])
def test_migrate_volume_generic(self):
def fake_migr(vol, host):
raise Exception('should not be called')
def fake_delete_volume_rpc(self, ctxt, vol_id):
raise Exception('should not be called')
def fake_create_volume(self, ctxt, volume, host, req_spec, filters,
allow_reschedule=True):
db.volume_update(ctxt, volume['id'],
{'status': 'available'})
self.stubs.Set(self.volume.driver, 'migrate_volume', fake_migr)
self.stubs.Set(volume_rpcapi.VolumeAPI, 'create_volume',
fake_create_volume)
self.stubs.Set(self.volume.driver, 'copy_volume_data',
lambda x, y, z, remote='dest': True)
self.stubs.Set(volume_rpcapi.VolumeAPI, 'delete_volume',
fake_delete_volume_rpc)
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.migrate_volume(self.context, volume['id'],
host_obj, True)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['host'], 'newhost')
self.assertIsNone(volume['migration_status'])
def _retype_volume_exec(self, driver, snap=False, policy='on-demand',
migrate_exc=False, exc=None, diff_equal=False):
elevated = context.get_admin_context()
project_id = self.context.project_id
db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}})
old_vol_type = db.volume_type_get_by_name(elevated, 'old')
db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}})
vol_type = db.volume_type_get_by_name(elevated, 'new')
db.quota_create(elevated, project_id, 'volumes_new', 10)
volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host, status='retyping',
volume_type_id=old_vol_type['id'])
if snap:
self._create_snapshot(volume['id'], size=volume['size'])
host_obj = {'host': 'newhost', 'capabilities': {}}
reserve_opts = {'volumes': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(self.context,
reserve_opts,
vol_type['id'])
reservations = QUOTAS.reserve(self.context,
project_id=project_id,
**reserve_opts)
with mock.patch.object(self.volume.driver, 'retype') as _retype:
with mock.patch.object(volume_types, 'volume_types_diff') as _diff:
with mock.patch.object(self.volume, 'migrate_volume') as _mig:
_retype.return_value = driver
_diff.return_value = ({}, diff_equal)
if migrate_exc:
_mig.side_effect = KeyError
else:
_mig.return_value = True
if not exc:
self.volume.retype(self.context, volume['id'],
vol_type['id'], host_obj,
migration_policy=policy,
reservations=reservations)
else:
self.assertRaises(exc, self.volume.retype,
self.context, volume['id'],
vol_type['id'], host_obj,
migration_policy=policy,
reservations=reservations)
# get volume/quota properties
volume = db.volume_get(elevated, volume['id'])
try:
usage = db.quota_usage_get(elevated, project_id, 'volumes_new')
volumes_in_use = usage.in_use
except exception.QuotaUsageNotFound:
volumes_in_use = 0
# check properties
if not exc:
self.assertEqual(volume['volume_type_id'], vol_type['id'])
self.assertEqual(volume['status'], 'available')
self.assertEqual(volume['host'], 'newhost')
self.assertEqual(volumes_in_use, 1)
else:
self.assertEqual(volume['volume_type_id'], old_vol_type['id'])
self.assertEqual(volume['status'], 'available')
self.assertEqual(volume['host'], CONF.host)
self.assertEqual(volumes_in_use, 0)
def test_retype_volume_driver_success(self):
self._retype_volume_exec(True)
def test_retype_volume_migration_bad_policy(self):
# Test volume retype that requires migration by not allowed
self._retype_volume_exec(False, policy='never',
exc=exception.VolumeMigrationFailed)
def test_retype_volume_migration_with_snaps(self):
self._retype_volume_exec(False, snap=True, exc=exception.InvalidVolume)
def test_retype_volume_migration_failed(self):
self._retype_volume_exec(False, migrate_exc=True, exc=KeyError)
def test_retype_volume_migration_success(self):
self._retype_volume_exec(False, migrate_exc=False, exc=None)
def test_retype_volume_migration_equal_types(self):
self._retype_volume_exec(False, diff_equal=True)
def test_migrate_driver_not_initialized(self):
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.migrate_volume,
self.context, volume['id'],
host_obj, True)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume.migration_status, 'error')
# NOTE(flaper87): Set initialized to True,
# lets cleanup the mess.
self.volume.driver._initialized = True
self.volume.delete_volume(self.context, volume['id'])
def test_update_volume_readonly_flag(self):
"""Test volume readonly flag can be updated at API level."""
# create a volume and assign to host
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'in-use'
volume_api = cinder.volume.api.API()
# Update fails when status != available
self.assertRaises(exception.InvalidVolume,
volume_api.update_readonly_flag,
self.context,
volume,
False)
volume['status'] = 'available'
# works when volume in 'available' status
volume_api.update_readonly_flag(self.context, volume, False)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual(volume['status'], 'available')
admin_metadata = volume['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 1)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
self.assertEqual(admin_metadata[0]['value'], 'False')
# clean up
self.volume.delete_volume(self.context, volume['id'])
class CopyVolumeToImageTestCase(BaseVolumeTestCase):
def fake_local_path(self, volume):
return self.dst_path
def setUp(self):
super(CopyVolumeToImageTestCase, self).setUp()
self.dst_fd, self.dst_path = tempfile.mkstemp()
os.close(self.dst_fd)
self.stubs.Set(self.volume.driver, 'local_path', self.fake_local_path)
self.image_meta = {
'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'container_format': 'bare',
'disk_format': 'raw'
}
self.volume_id = 1
self.volume_attrs = {
'id': self.volume_id,
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'display_description': 'Test Desc',
'size': 20,
'status': 'uploading',
'host': 'dummy'
}
def tearDown(self):
db.volume_destroy(self.context, self.volume_id)
os.unlink(self.dst_path)
super(CopyVolumeToImageTestCase, self).tearDown()
def test_copy_volume_to_image_status_available(self):
# creating volume testdata
self.volume_attrs['instance_uuid'] = None
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual(volume['status'], 'available')
def test_copy_volume_to_image_status_use(self):
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
# creating volume testdata
self.volume_attrs['instance_uuid'] = 'b21f957d-a72f-4b93-b5a5-' \
'45b1161abb02'
db.volume_create(self.context, self.volume_attrs)
# start test
self.volume.copy_volume_to_image(self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual(volume['status'], 'in-use')
def test_copy_volume_to_image_exception(self):
self.image_meta['id'] = FAKE_UUID
# creating volume testdata
self.volume_attrs['status'] = 'in-use'
db.volume_create(self.context, self.volume_attrs)
# start test
self.assertRaises(exception.ImageNotFound,
self.volume.copy_volume_to_image,
self.context,
self.volume_id,
self.image_meta)
volume = db.volume_get(self.context, self.volume_id)
self.assertEqual(volume['status'], 'available')
class GetActiveByWindowTestCase(BaseVolumeTestCase):
def setUp(self):
super(GetActiveByWindowTestCase, self).setUp()
self.ctx = context.get_admin_context(read_deleted="yes")
self.db_attrs = [
{
'id': 1,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1),
},
{
'id': 2,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1),
},
{
'id': 3,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'deleted': True, 'status': 'deleted',
'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1),
},
{
'id': 4,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 3, 10, 1, 1, 1),
},
{
'id': 5,
'host': 'devstack',
'project_id': 'p1',
'created_at': datetime.datetime(1, 5, 1, 1, 1, 1),
}
]
def test_volume_get_active_by_window(self):
# Find all all volumes valid within a timeframe window.
# Not in window
db.volume_create(self.ctx, self.db_attrs[0])
# In - deleted in window
db.volume_create(self.ctx, self.db_attrs[1])
# In - deleted after window
db.volume_create(self.ctx, self.db_attrs[2])
# In - created in window
db.volume_create(self.context, self.db_attrs[3])
# Not of window.
db.volume_create(self.context, self.db_attrs[4])
volumes = db.volume_get_active_by_window(
self.context,
datetime.datetime(1, 3, 1, 1, 1, 1),
datetime.datetime(1, 4, 1, 1, 1, 1),
project_id='p1')
self.assertEqual(len(volumes), 3)
self.assertEqual(volumes[0].id, u'2')
self.assertEqual(volumes[1].id, u'3')
self.assertEqual(volumes[2].id, u'4')
def test_snapshot_get_active_by_window(self):
# Find all all snapshots valid within a timeframe window.
vol = db.volume_create(self.context, {'id': 1})
for i in range(5):
self.db_attrs[i]['volume_id'] = 1
# Not in window
db.snapshot_create(self.ctx, self.db_attrs[0])
# In - deleted in window
db.snapshot_create(self.ctx, self.db_attrs[1])
# In - deleted after window
db.snapshot_create(self.ctx, self.db_attrs[2])
# In - created in window
db.snapshot_create(self.context, self.db_attrs[3])
# Not of window.
db.snapshot_create(self.context, self.db_attrs[4])
snapshots = db.snapshot_get_active_by_window(
self.context,
datetime.datetime(1, 3, 1, 1, 1, 1),
datetime.datetime(1, 4, 1, 1, 1, 1),
project_id='p1')
self.assertEqual(len(snapshots), 3)
self.assertEqual(snapshots[0].id, u'2')
self.assertEqual(snapshots[0].volume.id, u'1')
self.assertEqual(snapshots[1].id, u'3')
self.assertEqual(snapshots[1].volume.id, u'1')
self.assertEqual(snapshots[2].id, u'4')
self.assertEqual(snapshots[2].volume.id, u'1')
class DriverTestCase(test.TestCase):
"""Base Test class for Drivers."""
driver_name = "cinder.volume.driver.FakeBaseDriver"
def setUp(self):
super(DriverTestCase, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(volume_driver=self.driver_name,
volumes_dir=vol_tmpdir)
self.volume = importutils.import_object(CONF.volume_manager)
self.context = context.get_admin_context()
self.output = ""
self.stubs.Set(iscsi.TgtAdm, '_get_target', self.fake_get_target)
self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
def _fake_execute(_command, *_args, **_kwargs):
"""Fake _execute."""
return self.output, None
self.volume.driver.set_execute(_fake_execute)
self.volume.driver.set_initialized()
def tearDown(self):
try:
shutil.rmtree(CONF.volumes_dir)
except OSError:
pass
super(DriverTestCase, self).tearDown()
def fake_get_target(obj, iqn):
return 1
def _attach_volume(self):
"""Attach volumes to an instance."""
return []
def _detach_volume(self, volume_id_list):
"""Detach volumes from an instance."""
for volume_id in volume_id_list:
db.volume_detached(self.context, volume_id)
self.volume.delete_volume(self.context, volume_id)
class GenericVolumeDriverTestCase(DriverTestCase):
"""Test case for VolumeDriver."""
driver_name = "cinder.tests.fake_driver.LoggingVolumeDriver"
def test_backup_volume(self):
vol = tests_utils.create_volume(self.context)
backup = {'volume_id': vol['id']}
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
backup_service = self.mox.CreateMock(backup_driver.BackupDriver)
root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf'
self.mox.StubOutWithMock(self.volume.driver.db, 'volume_get')
self.mox.StubOutWithMock(cinder.brick.initiator.connector,
'get_connector_properties')
self.mox.StubOutWithMock(self.volume.driver, '_attach_volume')
self.mox.StubOutWithMock(os, 'getuid')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(fileutils, 'file_open')
self.mox.StubOutWithMock(self.volume.driver, '_detach_volume')
self.mox.StubOutWithMock(self.volume.driver, 'terminate_connection')
self.volume.driver.db.volume_get(self.context, vol['id']).\
AndReturn(vol)
cinder.brick.initiator.connector.\
get_connector_properties(root_helper, CONF.my_ip).\
AndReturn(properties)
self.volume.driver._attach_volume(self.context, vol, properties).\
AndReturn(attach_info)
os.getuid()
utils.execute('chown', None, '/dev/null', run_as_root=True)
f = fileutils.file_open('/dev/null').AndReturn(file('/dev/null'))
backup_service.backup(backup, f)
utils.execute('chown', 0, '/dev/null', run_as_root=True)
self.volume.driver._detach_volume(self.context, attach_info, vol,
properties)
self.mox.ReplayAll()
self.volume.driver.backup_volume(self.context, backup, backup_service)
self.mox.UnsetStubs()
def test_restore_backup(self):
vol = tests_utils.create_volume(self.context)
backup = {'volume_id': vol['id'],
'id': 'backup-for-%s' % vol['id']}
properties = {}
attach_info = {'device': {'path': '/dev/null'}}
root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf'
backup_service = self.mox.CreateMock(backup_driver.BackupDriver)
self.mox.StubOutWithMock(cinder.brick.initiator.connector,
'get_connector_properties')
self.mox.StubOutWithMock(self.volume.driver, '_attach_volume')
self.mox.StubOutWithMock(os, 'getuid')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(fileutils, 'file_open')
self.mox.StubOutWithMock(self.volume.driver, '_detach_volume')
self.mox.StubOutWithMock(self.volume.driver, 'terminate_connection')
cinder.brick.initiator.connector.\
get_connector_properties(root_helper, CONF.my_ip).\
AndReturn(properties)
self.volume.driver._attach_volume(self.context, vol, properties).\
AndReturn(attach_info)
os.getuid()
utils.execute('chown', None, '/dev/null', run_as_root=True)
f = fileutils.file_open('/dev/null', 'wb').AndReturn(file('/dev/null'))
backup_service.restore(backup, vol['id'], f)
utils.execute('chown', 0, '/dev/null', run_as_root=True)
self.volume.driver._detach_volume(self.context, attach_info, vol,
properties)
self.mox.ReplayAll()
self.volume.driver.restore_backup(self.context, backup, vol,
backup_service)
self.mox.UnsetStubs()
class LVMISCSIVolumeDriverTestCase(DriverTestCase):
"""Test case for VolumeDriver"""
driver_name = "cinder.volume.drivers.lvm.LVMISCSIDriver"
def test_delete_busy_volume(self):
"""Test deleting a busy volume."""
self.stubs.Set(self.volume.driver, '_volume_not_present',
lambda x: False)
self.stubs.Set(self.volume.driver, '_delete_volume',
lambda x: False)
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.stubs.Set(self.volume.driver.vg, 'lv_has_snapshot',
lambda x: True)
self.assertRaises(exception.VolumeIsBusy,
self.volume.driver.delete_volume,
{'name': 'test1', 'size': 1024})
self.stubs.Set(self.volume.driver.vg, 'lv_has_snapshot',
lambda x: False)
self.output = 'x'
self.volume.driver.delete_volume({'name': 'test1', 'size': 1024})
def test_lvm_migrate_volume_no_loc_info(self):
host = {'capabilities': {}}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
self.assertIsNone(model_update)
def test_lvm_migrate_volume_bad_loc_info(self):
capabilities = {'location_info': 'foo'}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
self.assertIsNone(model_update)
def test_lvm_migrate_volume_diff_driver(self):
capabilities = {'location_info': 'FooDriver:foo:bar:default:0'}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
self.assertIsNone(model_update)
def test_lvm_migrate_volume_diff_host(self):
capabilities = {'location_info': 'LVMVolumeDriver:foo:bar:default:0'}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
self.assertIsNone(model_update)
def test_lvm_migrate_volume_in_use(self):
hostname = socket.gethostname()
capabilities = {'location_info': 'LVMVolumeDriver:%s:bar' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'in-use'}
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
self.assertIsNone(model_update)
def test_lvm_volume_group_missing(self):
hostname = socket.gethostname()
capabilities = {'location_info': 'LVMVolumeDriver:%s:'
'cinder-volumes-3:default:0' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
def get_all_volume_groups():
return [{'name': 'cinder-volumes-2'}]
self.stubs.Set(volutils, 'get_all_volume_groups',
get_all_volume_groups)
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, False)
self.assertIsNone(model_update)
def test_lvm_migrate_volume_proceed(self):
hostname = socket.gethostname()
capabilities = {'location_info': 'LVMVolumeDriver:%s:'
'cinder-volumes-2:default:0' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
def fake_execute(*args, **kwargs):
pass
def get_all_volume_groups():
# NOTE(flaper87) Return just the destination
# host to test the check of dest VG existence.
return [{'name': 'cinder-volumes-2'}]
def _fake_get_all_physical_volumes(obj, root_helper, vg_name):
return [{}]
self.stubs.Set(brick_lvm.LVM,
'get_all_physical_volumes',
_fake_get_all_physical_volumes)
self.stubs.Set(self.volume.driver, '_execute', fake_execute)
self.stubs.Set(volutils, 'copy_volume',
lambda x, y, z, sync=False, execute='foo',
blocksize=mox.IgnoreArg(): None)
self.stubs.Set(volutils, 'get_all_volume_groups',
get_all_volume_groups)
self.stubs.Set(self.volume.driver, '_delete_volume',
lambda x: None)
self.stubs.Set(self.volume.driver, '_create_export',
lambda x, y, vg='vg': None)
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, True)
self.assertIsNone(model_update)
@staticmethod
def _get_manage_existing_lvs(name):
"""Helper method used by the manage_existing tests below."""
lvs = [{'name': 'fake_lv', 'size': '1.75'},
{'name': 'fake_lv_bad_size', 'size': 'Not a float'}]
for lv in lvs:
if lv['name'] == name:
return lv
def _setup_stubs_for_manage_existing(self):
"""Helper to set up common stubs for the manage_existing tests."""
self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
False,
None,
'default')
self.stubs.Set(self.volume.driver.vg, 'get_volume',
self._get_manage_existing_lvs)
def test_lvm_manage_existing(self):
"""Good pass on managing an LVM volume.
This test case ensures that, when a logical volume with the
specified name exists, and the size is as expected, no error is
returned from driver.manage_existing, and that the rename_volume
function is called in the Brick LVM code with the correct arguments.
"""
self._setup_stubs_for_manage_existing()
ref = {'lv_name': 'fake_lv'}
vol = {'name': 'test', 'id': 1, 'size': 0}
def _rename_volume(old_name, new_name):
self.assertEqual(old_name, ref['lv_name'])
self.assertEqual(new_name, vol['name'])
self.stubs.Set(self.volume.driver.vg, 'rename_volume',
_rename_volume)
size = self.volume.driver.manage_existing_get_size(vol, ref)
self.assertEqual(size, 2)
model_update = self.volume.driver.manage_existing(vol, ref)
self.assertIsNone(model_update)
def test_lvm_manage_existing_bad_size(self):
"""Make sure correct exception on bad size returned from LVM.
This test case ensures that the correct exception is raised when
the information returned for the existing LVs is not in the format
that the manage_existing code expects.
"""
self._setup_stubs_for_manage_existing()
ref = {'lv_name': 'fake_lv_bad_size'}
vol = {'name': 'test', 'id': 1, 'size': 2}
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.driver.manage_existing_get_size,
vol, ref)
def test_lvm_manage_existing_bad_ref(self):
"""Error case where specified LV doesn't exist.
This test case ensures that the correct exception is raised when
the caller attempts to manage a volume that does not exist.
"""
self._setup_stubs_for_manage_existing()
ref = {'lv_name': 'fake_nonexistent_lv'}
vol = {'name': 'test', 'id': 1, 'size': 0, 'status': 'available'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.volume.driver.manage_existing_get_size,
vol, ref)
class LVMVolumeDriverTestCase(DriverTestCase):
"""Test case for VolumeDriver"""
driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver"
FAKE_VOLUME = {'name': 'test1',
'id': 'test1'}
def test_delete_volume_invalid_parameter(self):
configuration = conf.Configuration(fake_opt, 'fake_group')
configuration.volume_clear = 'zero'
configuration.volume_clear_size = 0
lvm_driver = lvm.LVMVolumeDriver(configuration=configuration)
self.mox.StubOutWithMock(os.path, 'exists')
os.path.exists(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
# Test volume without 'size' field and 'volume_size' field
self.assertRaises(exception.InvalidParameterValue,
lvm_driver._delete_volume,
self.FAKE_VOLUME)
def test_delete_volume_bad_path(self):
configuration = conf.Configuration(fake_opt, 'fake_group')
configuration.volume_clear = 'zero'
configuration.volume_clear_size = 0
volume = dict(self.FAKE_VOLUME, size=1)
lvm_driver = lvm.LVMVolumeDriver(configuration=configuration)
self.mox.StubOutWithMock(os.path, 'exists')
os.path.exists(mox.IgnoreArg()).AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.VolumeBackendAPIException,
lvm_driver._delete_volume, volume)
def test_delete_volume_thinlvm_snap(self):
configuration = conf.Configuration(fake_opt, 'fake_group')
configuration.volume_clear = 'zero'
configuration.volume_clear_size = 0
configuration.lvm_type = 'thin'
lvm_driver = lvm.LVMISCSIDriver(configuration=configuration,
vg_obj=mox.MockAnything())
# Ensures that copy_volume is not called for ThinLVM
self.mox.StubOutWithMock(volutils, 'copy_volume')
self.mox.StubOutWithMock(volutils, 'clear_volume')
self.mox.StubOutWithMock(lvm_driver, '_execute')
self.mox.ReplayAll()
uuid = '00000000-0000-0000-0000-c3aa7ee01536'
fake_snapshot = {'name': 'volume-' + uuid,
'id': uuid,
'size': 123}
lvm_driver._delete_volume(fake_snapshot, is_snapshot=True)
class ISCSITestCase(DriverTestCase):
"""Test Case for ISCSIDriver"""
driver_name = "cinder.volume.drivers.lvm.LVMISCSIDriver"
base_driver = driver.ISCSIDriver
def setUp(self):
super(ISCSITestCase, self).setUp()
self.configuration = mox.MockObject(conf.Configuration)
self.configuration.num_iscsi_scan_tries = 3
self.configuration.iscsi_num_targets = 100
self.configuration.iscsi_target_prefix = 'iqn.2010-10.org.openstack:'
self.configuration.iscsi_ip_address = '0.0.0.0'
self.configuration.iscsi_port = 3260
def _attach_volume(self):
"""Attach volumes to an instance."""
volume_id_list = []
for index in xrange(3):
vol = {}
vol['size'] = 0
vol_ref = db.volume_create(self.context, vol)
self.volume.create_volume(self.context, vol_ref['id'])
vol_ref = db.volume_get(self.context, vol_ref['id'])
# each volume has a different mountpoint
mountpoint = "/dev/sd" + chr((ord('b') + index))
instance_uuid = '12345678-1234-5678-1234-567812345678'
db.volume_attached(self.context, vol_ref['id'], instance_uuid,
mountpoint)
volume_id_list.append(vol_ref['id'])
return volume_id_list
def test_do_iscsi_discovery(self):
self.configuration.append_config_values(mox.IgnoreArg())
iscsi_driver = self.base_driver(configuration=self.configuration)
iscsi_driver._execute = lambda *a, **kw: \
("%s dummy" % CONF.iscsi_ip_address, '')
volume = {"name": "dummy",
"host": "0.0.0.0"}
iscsi_driver._do_iscsi_discovery(volume)
def test_get_iscsi_properties(self):
volume = {"provider_location": '',
"id": "0",
"provider_auth": "a b c",
"attached_mode": "rw"}
iscsi_driver = self.base_driver(configuration=self.configuration)
iscsi_driver._do_iscsi_discovery = lambda v: "0.0.0.0:0000,0 iqn:iqn 0"
result = iscsi_driver._get_iscsi_properties(volume)
self.assertEqual(result["target_portal"], "0.0.0.0:0000")
self.assertEqual(result["target_iqn"], "iqn:iqn")
self.assertEqual(result["target_lun"], 0)
def test_get_volume_stats(self):
def _fake_get_all_physical_volumes(obj, root_helper, vg_name):
return [{}]
def _fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True):
return [{'name': 'cinder-volumes',
'size': '5.52',
'available': '0.52',
'lv_count': '2',
'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}]
self.stubs.Set(brick_lvm.LVM,
'get_all_volume_groups',
_fake_get_all_volume_groups)
self.stubs.Set(brick_lvm.LVM,
'get_all_physical_volumes',
_fake_get_all_physical_volumes)
self.volume.driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo')
self.volume.driver._update_volume_stats()
stats = self.volume.driver._stats
self.assertEqual(stats['total_capacity_gb'], float('5.52'))
self.assertEqual(stats['free_capacity_gb'], float('0.52'))
def test_validate_connector(self):
iscsi_driver = self.base_driver(configuration=self.configuration)
# Validate a valid connector
connector = {'ip': '10.0.0.2',
'host': 'fakehost',
'initiator': 'iqn.2012-07.org.fake:01'}
iscsi_driver.validate_connector(connector)
# Validate a connector without the initiator
connector = {'ip': '10.0.0.2', 'host': 'fakehost'}
self.assertRaises(exception.VolumeBackendAPIException,
iscsi_driver.validate_connector, connector)
class ISERTestCase(ISCSITestCase):
"""Test Case for ISERDriver."""
driver_name = "cinder.volume.drivers.lvm.LVMISERDriver"
base_driver = driver.ISERDriver
def setUp(self):
super(ISERTestCase, self).setUp()
self.configuration = mox.MockObject(conf.Configuration)
self.configuration.num_iser_scan_tries = 3
self.configuration.iser_num_targets = 100
self.configuration.iser_target_prefix = 'iqn.2010-10.org.openstack:'
self.configuration.iser_ip_address = '0.0.0.0'
self.configuration.iser_port = 3260
def test_get_volume_stats(self):
def _fake_get_all_physical_volumes(obj, root_helper, vg_name):
return [{}]
def _fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True):
return [{'name': 'cinder-volumes',
'size': '5.52',
'available': '0.52',
'lv_count': '2',
'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}]
self.stubs.Set(brick_lvm.LVM,
'get_all_physical_volumes',
_fake_get_all_physical_volumes)
self.stubs.Set(brick_lvm.LVM,
'get_all_volume_groups',
_fake_get_all_volume_groups)
self.volume.driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo')
stats = self.volume.driver.get_volume_stats(refresh=True)
self.assertEqual(stats['total_capacity_gb'], float('5.52'))
self.assertEqual(stats['free_capacity_gb'], float('0.52'))
self.assertEqual(stats['storage_protocol'], 'iSER')
def test_get_volume_stats2(self):
iser_driver = self.base_driver(configuration=self.configuration)
stats = iser_driver.get_volume_stats(refresh=True)
self.assertEqual(stats['total_capacity_gb'], 'infinite')
self.assertEqual(stats['free_capacity_gb'], 'infinite')
self.assertEqual(stats['storage_protocol'], 'iSER')
class FibreChannelTestCase(DriverTestCase):
"""Test Case for FibreChannelDriver."""
driver_name = "cinder.volume.driver.FibreChannelDriver"
def test_initialize_connection(self):
self.driver = driver.FibreChannelDriver()
self.driver.do_setup(None)
self.assertRaises(NotImplementedError,
self.driver.initialize_connection, {}, {})
class VolumePolicyTestCase(test.TestCase):
def setUp(self):
super(VolumePolicyTestCase, self).setUp()
cinder.policy.reset()
cinder.policy.init()
self.context = context.get_admin_context()
self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
def tearDown(self):
super(VolumePolicyTestCase, self).tearDown()
cinder.policy.reset()
def _set_rules(self, rules):
cinder.common.policy.set_brain(cinder.common.policy.Brain(rules))
def test_check_policy(self):
self.mox.StubOutWithMock(cinder.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
cinder.policy.enforce(self.context, 'volume:attach', target)
self.mox.ReplayAll()
cinder.volume.api.check_policy(self.context, 'attach')
def test_check_policy_with_target(self):
self.mox.StubOutWithMock(cinder.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'id': 2,
}
cinder.policy.enforce(self.context, 'volume:attach', target)
self.mox.ReplayAll()
cinder.volume.api.check_policy(self.context, 'attach', {'id': 2})
| {
"content_hash": "3f1f6748165d6d3c3f488c7c94e7afa8",
"timestamp": "",
"source": "github",
"line_count": 3201,
"max_line_length": 79,
"avg_line_length": 43.32739768822243,
"alnum_prop": 0.5477211931560086,
"repo_name": "spring-week-topos/cinder-week",
"id": "99752f6596daa3bfd4cfeb48c42a977d1f2ccea7",
"size": "139422",
"binary": false,
"copies": "1",
"ref": "refs/heads/spring-week",
"path": "cinder/tests/test_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6134883"
},
{
"name": "Shell",
"bytes": "8998"
}
],
"symlink_target": ""
} |
from csv import DictWriter
import os
import datetime
import sys
import time
import threading
import traceback
from rdflib.compare import isomorphic
from rdflib import Graph
from bagit import Bag
from .constants import EXT_BINARY_EXTERNAL
from .iterators import FcrepoWalker, LocalWalker
from .resources import FedoraResource, LocalResource
from .model import Repository
class FedoraImportExportVerifier:
"""Contains logic for performing a verification."""
def __init__(self, config, loggers):
self.config = config
self.loggers = loggers
def verify_bag(self):
"""Verifies the structure of the bag"""
console = self.loggers.console
console.info("Verifying bag...")
bag = Bag(self.config.dir)
if bag.is_valid():
console.info("bag is valid :)")
else:
console.info("bag is invalid :(")
def execute(self):
"""Executes the verification process."""
config = self.config
output_dir = self.config.output_dir
loggers = self.loggers
logger = loggers.file_only
console = loggers.console
console_only = loggers.console_only
# Check the repository connection
repo = Repository(config, loggers)
console.info("Testing connection to {0}...".format(repo.base))
if repo.is_reachable():
console.info("Connection successful.")
else:
console.error(
"Connection to {0} failed. Exiting.".format(repo.base)
)
sys.exit(1)
# Set up csv file, if specified
os.makedirs(output_dir, exist_ok=True)
datestr = datetime.datetime.today().strftime('%Y%m%d-%H%M')
csvfilename = "{0}/report-{1}.csv".format(output_dir, datestr)
csvfile = open(csvfilename, "w")
fieldnames = ["number", "type", "original", "destination",
"verified",
"verification"]
writer = DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
console.info("Starting verification...")
if config.mode == "export":
tree = FcrepoWalker(config, logger)
elif config.mode == "import":
tree = LocalWalker(config, logger)
console.info(
"Running verification on Fedora 4 {0}".format(config.mode)
)
if config.bag:
self.verify_bag()
console.info("Commencing resource verification...")
success_count = 0
failure_count = 0
def total_count():
return success_count + failure_count
def log_summary(logger):
logger.info(
"Verified {} resources: successes = {}, failures = {}".format(
total_count(), success_count, failure_count)
)
def count_logger():
while(True):
time.sleep(10)
log_summary(console_only)
t = threading.Thread(target=count_logger)
t.daemon = True
t.start()
# Step through the tree and verify resources
for filepath in tree:
# iterator can return None, in which case skip
if filepath is not None:
try:
# path begins with repository base = fedora resource
if filepath.startswith(config.repobase):
original = FedoraResource(filepath, config, logger,
console)
if not original.is_reachable:
verified = False
verification = "original not reachable"
# path begins with local root dir = local resource
elif filepath.startswith(config.dir):
original = LocalResource(filepath, config, logger,
console)
# any other path indicates an error
else:
# TODO: Consider handling this error and continuing
logger.error(
"Resource in unexpected location."
)
sys.exit(1)
# if binaries not included in export
if not config.bin:
# skip binaries and fcr:metadata
if original.type == "binary" or \
original.origpath.endswith("/fcr:metadata"):
continue
# filter refs to binary resources from rdf resources
else:
original.filter_binary_refs()
# create object representing destination resource
if filepath.startswith(config.repobase):
destination = LocalResource(original.destpath,
config,
loggers.file_only,
loggers.console)
elif filepath.startswith(config.dir):
destination = FedoraResource(original.destpath,
config,
loggers.file_only,
loggers.console)
# analyze the resource type
if original.type == "binary":
if destination.origpath.endswith(EXT_BINARY_EXTERNAL):
if not self.config.external:
continue
if original.sha1 == destination.sha1:
verified = True
verification = original.sha1
else:
verified = False
verification = "{0} != {1}".format(
original.sha1, destination.sha1
)
elif original.type == "rdf":
# if legacyMode is set, filter graph on import
if config.legacyMode:
if config.mode == "export":
pass
elif config.mode == "import":
to_filter = destination.server_managed
for p in to_filter.predicates():
original.graph.remove(
Graph().triples((None, p, None))
)
destination.graph = destination.minimal
# compare the original and destination graphs
if isomorphic(original.graph, destination.graph):
verified = True
verification = \
"{0} triples".format(len(original.graph))
else:
verified = False
verification = ("{0}+{1} triples - mismatch"
.format(
len(original.graph),
len(destination.graph)
))
logger.info(
"RESOURCE {0}: {1} {2}".format(
total_count(), original.location, original.type)
)
except Exception as ex:
traceback.print_exc()
verified = False
verification = ("Object could not be verified: {"
"0}".format(ex))
if not verified:
logger.warn(
"Resource Mismatch \"{}\"".format(original.relpath)
)
failure_count += 1
else:
success_count += 1
if config.verbose:
logger.info(" rel => {}".format(original.relpath))
logger.info(" orig => {}".format(original.origpath))
logger.info(" dest => {}".format(original.destpath))
logger_method = logger.info
if not verified:
logger_method = logger.warn
logger_method(
" Verified original to copy... {0} -- {1}".format(
verified, verification)
)
# write csv if exists
row = {"number": str(total_count()),
"type": original.type,
"original": original.origpath,
"destination": original.destpath,
"verified": str(verified),
"verification": verification}
writer.writerow(row)
log_summary(console)
console.info("Verification complete")
csvfile.close()
| {
"content_hash": "73a92d4ce0d7d4411dfa0d8130ad18ce",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 78,
"avg_line_length": 39.932203389830505,
"alnum_prop": 0.44991511035653653,
"repo_name": "fcrepo4-labs/fcrepo-import-export-verify",
"id": "e100fd14d5fa1ed174e48474a5491cbaa36b2abf",
"size": "9424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fcrepo_verify/verifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35501"
}
],
"symlink_target": ""
} |
import os
import numpy as np
def gen_bbox(attenMapFile, thresh, bboxFile):
"""
A python wrap to generate bbox from a attention(heat) map
:param attenMapFile: map file
:param thresh: threshold para of dt_box function
:param bboxFile: file for dt_box to write the result
:return:
"""
tmp_file = bboxFile.split('.')[0] + '_tmp.txt'
thresh = thresh.astype(str)
argStr = 'bboxgenerator/./dt_box ' + attenMapFile
argStr = argStr + ' ' + thresh[0] + ' ' + thresh[1] + ' ' + thresh[2] + ' '
argStr = argStr + tmp_file
os.system(argStr)
with open(tmp_file) as f:
for line in f:
items = [int(x) for x in line.strip().split()]
boxData1 = np.array(items[0::4]).T
boxData2 = np.array(items[1::4]).T
boxData3 = np.array(items[2::4]).T
boxData4 = np.array(items[3::4]).T
boxData_formulate = np.array([boxData1, boxData2, boxData1 + boxData3, boxData2 + boxData4]).T
col1 = np.min(np.array([boxData_formulate[:, 0], boxData_formulate[:, 2]]), axis=0)
col2 = np.min(np.array([boxData_formulate[:, 1], boxData_formulate[:, 3]]), axis=0)
col3 = np.max(np.array([boxData_formulate[:, 0], boxData_formulate[:, 2]]), axis=0)
col4 = np.max(np.array([boxData_formulate[:, 1], boxData_formulate[:, 3]]), axis=0)
boxes = np.array([col1, col2, col3, col4]).T
# take the tightest box as attention box
keep_idx = 0
area = float('Inf')
for idx in range(boxes.shape[0]):
cur_eara = (boxes[idx, 2] - boxes[idx, 0])*(boxes[idx, 3] - boxes[idx, 1])
if cur_eara < area:
area = cur_eara
keep_idx = idx
return boxes[keep_idx, :]
| {
"content_hash": "2edbb9a5f2fdb9a43941dfa53a4666c9",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 98,
"avg_line_length": 37.22222222222222,
"alnum_prop": 0.6005970149253731,
"repo_name": "zhangyuygss/WSL",
"id": "c2b93bd456c6c400e8bcfe2dcddac9fea4b2a5ca",
"size": "1675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tmp/bboxgenerator/gen_bbox.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4321"
},
{
"name": "C++",
"bytes": "11687"
},
{
"name": "Jupyter Notebook",
"bytes": "113518"
},
{
"name": "M",
"bytes": "210"
},
{
"name": "Makefile",
"bytes": "195"
},
{
"name": "Matlab",
"bytes": "50860"
},
{
"name": "Objective-C",
"bytes": "224"
},
{
"name": "Python",
"bytes": "263349"
}
],
"symlink_target": ""
} |
"""Convolutional-recurrent layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers.recurrent import _standardize_args
from tensorflow.python.keras.layers.recurrent import DropoutRNNCellMixin
from tensorflow.python.keras.layers.recurrent import RNN
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.util.tf_export import keras_export
class ConvRNN2D(RNN):
"""Base class for convolutional-recurrent layers.
Arguments:
cell: A RNN cell instance. A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is
the number of channels of the recurrent state
(which should be the same as the number of channels of the cell
output). This can also be a list/tuple of integers
(one size per state). In this case, the first entry
(`state_size[0]`) should be the same as
the size of the cell output.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
input_shape: Use this argument to specify the shape of the
input when this layer is the first one in a model.
Call arguments:
inputs: A 5D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is for use with cells that use dropout.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
constants: List of constant tensors to be passed to the cell at each
timestep.
Input shape:
5D tensor with shape:
`(samples, timesteps, channels, rows, cols)`
if data_format='channels_first' or 5D tensor with shape:
`(samples, timesteps, rows, cols, channels)`
if data_format='channels_last'.
Output shape:
- If `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each 4D tensor with shape:
`(samples, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)`
if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
- If `return_sequences`: 5D tensor with shape:
`(samples, timesteps, filters, new_rows, new_cols)`
if data_format='channels_first'
or 5D tensor with shape:
`(samples, timesteps, new_rows, new_cols, filters)`
if data_format='channels_last'.
- Else, 4D tensor with shape:
`(samples, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)`
if data_format='channels_last'.
Masking:
This layer supports masking for input data with a variable number
of timesteps.
Note on using statefulness in RNNs:
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- Specify `stateful=True` in the layer constructor.
- Specify a fixed batch size for your model, by passing
- If sequential model:
`batch_input_shape=(...)` to the first layer in your model.
- If functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers,
e.g. `(32, 10, 100, 100, 32)`.
Note that the number of rows and columns should be specified
too.
- Specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
Note on specifying the initial state of RNNs:
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
Note on passing external constants to RNNs:
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
"""
def __init__(self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if unroll:
raise TypeError('Unrolling isn\'t possible with '
'convolutional RNNs.')
if isinstance(cell, (list, tuple)):
# The StackedConvRNN2DCells isn't implemented yet.
raise TypeError('It is not possible at the moment to'
'stack convolutional cells.')
super(ConvRNN2D, self).__init__(cell,
return_sequences,
return_state,
go_backwards,
stateful,
unroll,
**kwargs)
self.input_spec = [InputSpec(ndim=5)]
self.states = None
self._num_constants = None
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
cell = self.cell
if cell.data_format == 'channels_first':
rows = input_shape[3]
cols = input_shape[4]
elif cell.data_format == 'channels_last':
rows = input_shape[2]
cols = input_shape[3]
rows = conv_utils.conv_output_length(rows,
cell.kernel_size[0],
padding=cell.padding,
stride=cell.strides[0],
dilation=cell.dilation_rate[0])
cols = conv_utils.conv_output_length(cols,
cell.kernel_size[1],
padding=cell.padding,
stride=cell.strides[1],
dilation=cell.dilation_rate[1])
if cell.data_format == 'channels_first':
output_shape = input_shape[:2] + (cell.filters, rows, cols)
elif cell.data_format == 'channels_last':
output_shape = input_shape[:2] + (rows, cols, cell.filters)
if not self.return_sequences:
output_shape = output_shape[:1] + output_shape[2:]
if self.return_state:
output_shape = [output_shape]
if cell.data_format == 'channels_first':
output_shape += [(input_shape[0], cell.filters, rows, cols)
for _ in range(2)]
elif cell.data_format == 'channels_last':
output_shape += [(input_shape[0], rows, cols, cell.filters)
for _ in range(2)]
return output_shape
@tf_utils.shape_type_conversion
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:] # pylint: disable=E1130
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
batch_size = input_shape[0] if self.stateful else None
self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:5])
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
step_input_shape = (input_shape[0],) + input_shape[2:]
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
# set or validate state_spec
if hasattr(self.cell.state_size, '__len__'):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
if self.cell.data_format == 'channels_first':
ch_dim = 1
elif self.cell.data_format == 'channels_last':
ch_dim = 3
if [spec.shape[ch_dim] for spec in self.state_spec] != state_size:
raise ValueError(
'An initial_state was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'However `cell.state_size` is '
'{}'.format([spec.shape for spec in self.state_spec],
self.cell.state_size))
else:
if self.cell.data_format == 'channels_first':
self.state_spec = [InputSpec(shape=(None, dim, None, None))
for dim in state_size]
elif self.cell.data_format == 'channels_last':
self.state_spec = [InputSpec(shape=(None, None, None, dim))
for dim in state_size]
if self.stateful:
self.reset_states()
self.built = True
def get_initial_state(self, inputs):
# (samples, timesteps, rows, cols, filters)
initial_state = K.zeros_like(inputs)
# (samples, rows, cols, filters)
initial_state = K.sum(initial_state, axis=1)
shape = list(self.cell.kernel_shape)
shape[-1] = self.cell.filters
initial_state = self.cell.input_conv(initial_state,
array_ops.zeros(tuple(shape),
initial_state.dtype),
padding=self.cell.padding)
if hasattr(self.cell.state_size, '__len__'):
return [initial_state for _ in self.cell.state_size]
else:
return [initial_state]
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = _standardize_args(
inputs, initial_state, constants, self._num_constants)
if initial_state is None and constants is None:
return super(ConvRNN2D, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
kwargs['initial_state'] = initial_state
additional_inputs += initial_state
self.state_spec = []
for state in initial_state:
shape = K.int_shape(state)
self.state_spec.append(InputSpec(shape=shape))
additional_specs += self.state_spec
if constants is not None:
kwargs['constants'] = constants
additional_inputs += constants
self.constants_spec = [InputSpec(shape=K.int_shape(constant))
for constant in constants]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# at this point additional_inputs cannot be empty
for tensor in additional_inputs:
if K.is_keras_tensor(tensor) != K.is_keras_tensor(additional_inputs[0]):
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors')
if K.is_keras_tensor(additional_inputs[0]):
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
full_input_spec = self.input_spec + additional_specs
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(ConvRNN2D, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(ConvRNN2D, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
inputs = inputs[0]
if initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' +
str(len(initial_state)) +
' initial states.')
timesteps = K.int_shape(inputs)[1]
kwargs = {}
if generic_utils.has_arg(self.cell.call, 'training'):
kwargs['training'] = training
if constants:
if not generic_utils.has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:]
states = states[:-self._num_constants]
return self.cell.call(inputs, states, constants=constants,
**kwargs)
else:
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
input_length=timesteps)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append(K.update(self.states[i], states[i]))
self.add_update(updates)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [output] + states
else:
return output
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
input_shape = self.input_spec[0].shape
state_shape = self.compute_output_shape(input_shape)
if self.return_state:
state_shape = state_shape[0]
if self.return_sequences:
state_shape = state_shape[:1].concatenate(state_shape[2:])
if None in state_shape:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the time dimension by passing a '
'`batch_shape` argument to your Input layer.\n'
'The same thing goes for the number of rows and '
'columns.')
# helper function
def get_tuple_shape(nb_channels):
result = list(state_shape)
if self.cell.data_format == 'channels_first':
result[1] = nb_channels
elif self.cell.data_format == 'channels_last':
result[3] = nb_channels
else:
raise KeyError
return tuple(result)
# initialize state if None
if self.states[0] is None:
if hasattr(self.cell.state_size, '__len__'):
self.states = [K.zeros(get_tuple_shape(dim))
for dim in self.cell.state_size]
else:
self.states = [K.zeros(get_tuple_shape(self.cell.state_size))]
elif states is None:
if hasattr(self.cell.state_size, '__len__'):
for state, dim in zip(self.states, self.cell.state_size):
K.set_value(state, np.zeros(get_tuple_shape(dim)))
else:
K.set_value(self.states[0],
np.zeros(get_tuple_shape(self.cell.state_size)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, ' +
'but it received ' + str(len(states)) +
' state values. Input received: ' + str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if hasattr(self.cell.state_size, '__len__'):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != get_tuple_shape(dim):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' +
self.name + ': expected shape=' +
str(get_tuple_shape(dim)) +
', found shape=' + str(value.shape))
# TODO(anjalisridhar): consider batch calls to `set_value`.
K.set_value(state, value)
class ConvLSTM2DCell(DropoutRNNCellMixin, Layer):
"""Cell class for the ConvLSTM2D layer.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.]
(http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Call arguments:
inputs: A 4D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(ConvLSTM2DCell, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
'dilation_rate')
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = (self.filters, self.filters)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters * 4)
self.kernel_shape = kernel_shape
recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=recurrent_kernel_shape,
initializer=self.recurrent_initializer,
name='recurrent_kernel',
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.filters,), *args, **kwargs),
initializers.Ones()((self.filters,), *args, **kwargs),
self.bias_initializer((self.filters * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.filters * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
# dropout matrices for input units
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
# dropout matrices for recurrent units
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=4)
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
(kernel_i, kernel_f,
kernel_c, kernel_o) = array_ops.split(self.kernel, 4, axis=3)
(recurrent_kernel_i,
recurrent_kernel_f,
recurrent_kernel_c,
recurrent_kernel_o) = array_ops.split(self.recurrent_kernel, 4, axis=3)
if self.use_bias:
bias_i, bias_f, bias_c, bias_o = array_ops.split(self.bias, 4)
else:
bias_i, bias_f, bias_c, bias_o = None, None, None, None
x_i = self.input_conv(inputs_i, kernel_i, bias_i, padding=self.padding)
x_f = self.input_conv(inputs_f, kernel_f, bias_f, padding=self.padding)
x_c = self.input_conv(inputs_c, kernel_c, bias_c, padding=self.padding)
x_o = self.input_conv(inputs_o, kernel_o, bias_o, padding=self.padding)
h_i = self.recurrent_conv(h_tm1_i, recurrent_kernel_i)
h_f = self.recurrent_conv(h_tm1_f, recurrent_kernel_f)
h_c = self.recurrent_conv(h_tm1_c, recurrent_kernel_c)
h_o = self.recurrent_conv(h_tm1_o, recurrent_kernel_o)
i = self.recurrent_activation(x_i + h_i)
f = self.recurrent_activation(x_f + h_f)
c = f * c_tm1 + i * self.activation(x_c + h_c)
o = self.recurrent_activation(x_o + h_o)
h = o * self.activation(c)
return h, [h, c]
def input_conv(self, x, w, b=None, padding='valid'):
conv_out = K.conv2d(x, w, strides=self.strides,
padding=padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if b is not None:
conv_out = K.bias_add(conv_out, b,
data_format=self.data_format)
return conv_out
def recurrent_conv(self, x, w):
conv_out = K.conv2d(x, w, strides=(1, 1),
padding='same',
data_format=self.data_format)
return conv_out
def get_config(self):
config = {'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(
self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(
self.kernel_initializer),
'recurrent_initializer': initializers.serialize(
self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(
self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(
self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(
self.kernel_constraint),
'recurrent_constraint': constraints.serialize(
self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(ConvLSTM2DCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.ConvLSTM2D')
class ConvLSTM2D(ConvRNN2D):
"""Convolutional LSTM.
It is similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
By default hyperbolic tangent activation function is applied
(`tanh(x)`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.]
(http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, process the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Call arguments:
inputs: A 5D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or `recurrent_dropout`
are set.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
Input shape:
- If data_format='channels_first'
5D tensor with shape:
`(samples, time, channels, rows, cols)`
- If data_format='channels_last'
5D tensor with shape:
`(samples, time, rows, cols, channels)`
Output shape:
- If `return_sequences`
- If data_format='channels_first'
5D tensor with shape:
`(samples, time, filters, output_row, output_col)`
- If data_format='channels_last'
5D tensor with shape:
`(samples, time, output_row, output_col, filters)`
- Else
- If data_format ='channels_first'
4D tensor with shape:
`(samples, filters, output_row, output_col)`
- If data_format='channels_last'
4D tensor with shape:
`(samples, output_row, output_col, filters)`
where `o_row` and `o_col` depend on the shape of the filter and
the padding
Raises:
ValueError: in case of invalid constructor arguments.
References:
- [Convolutional LSTM Network: A Machine Learning Approach for
Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1)
The current implementation does not include the feedback loop on the
cells output.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
go_backwards=False,
stateful=False,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
cell = ConvLSTM2DCell(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
dtype=kwargs.get('dtype'))
super(ConvLSTM2D, self).__init__(cell,
return_sequences=return_sequences,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
def call(self, inputs, mask=None, training=None, initial_state=None):
self._maybe_reset_cell_dropout_mask(self.cell)
return super(ConvLSTM2D, self).call(inputs,
mask=mask,
training=training,
initial_state=initial_state)
@property
def filters(self):
return self.cell.filters
@property
def kernel_size(self):
return self.cell.kernel_size
@property
def strides(self):
return self.cell.strides
@property
def padding(self):
return self.cell.padding
@property
def data_format(self):
return self.cell.data_format
@property
def dilation_rate(self):
return self.cell.dilation_rate
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(
self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(
self.kernel_initializer),
'recurrent_initializer': initializers.serialize(
self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(
self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(
self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(
self.activity_regularizer),
'kernel_constraint': constraints.serialize(
self.kernel_constraint),
'recurrent_constraint': constraints.serialize(
self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(ConvLSTM2D, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| {
"content_hash": "fe3ad7ec95f916d7e205ade379a0a8be",
"timestamp": "",
"source": "github",
"line_count": 1047,
"max_line_length": 83,
"avg_line_length": 41.10315186246418,
"alnum_prop": 0.6176135703497153,
"repo_name": "jhseu/tensorflow",
"id": "e5fb30083a481144563449988b97b062e1637bd1",
"size": "43759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/layers/convolutional_recurrent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "27480"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "875455"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "80051513"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112748"
},
{
"name": "Go",
"bytes": "1853641"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1729057"
},
{
"name": "Makefile",
"bytes": "62498"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "304661"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "19515"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "36791185"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "56741"
},
{
"name": "Shell",
"bytes": "685877"
},
{
"name": "Smarty",
"bytes": "35147"
},
{
"name": "Starlark",
"bytes": "3504187"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myinventory.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "c6ba40da3875e81d6b30b486695ef334",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 75,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.7155172413793104,
"repo_name": "tianz/MyInventory",
"id": "919d5a67ccdd5ac20a6e8c5588bcb5be13c38bcb",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5520"
},
{
"name": "HTML",
"bytes": "36586"
},
{
"name": "JavaScript",
"bytes": "18174"
},
{
"name": "Python",
"bytes": "32870"
}
],
"symlink_target": ""
} |
from django import forms
from django.db import models
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.admin import widgets
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.contrib.auth.models import User
from src.players.models import PlayerDB, PlayerAttribute
from src.utils import logger, create
# remove User itself from admin site
admin.site.unregister(User)
# handle the custom User editor
class CustomUserChangeForm(UserChangeForm):
username = forms.RegexField(label="Username",
max_length=30,
regex=r'^[\w. @+-]+$',
widget=forms.TextInput(attrs={'size':'30'}),
error_messages = {'invalid': "This value may contain only letters, spaces, numbers and @/./+/-/_ characters."},
help_text = "30 characters or fewer. Letters, spaces, digits and @/./+/-/_ only.")
class CustomUserCreationForm(UserCreationForm):
username = forms.RegexField(label="Username",
max_length=30,
regex=r'^[\w. @+-]+$',
widget=forms.TextInput(attrs={'size':'30'}),
error_messages = {'invalid': "This value may contain only letters, spaces, numbers and @/./+/-/_ characters."},
help_text = "30 characters or fewer. Letters, spaces, digits and @/./+/-/_ only.")
# # The Player editor
# class PlayerAttributeForm(forms.ModelForm):
# "Defines how to display the atttributes"
# class Meta:
# model = PlayerAttribute
# db_key = forms.CharField(label="Key",
# widget=forms.TextInput(attrs={'size':'15'}))
# db_value = forms.CharField(label="Value",
# widget=forms.Textarea(attrs={'rows':'2'}))
# class PlayerAttributeInline(admin.TabularInline):
# "Inline creation of player attributes"
# model = PlayerAttribute
# extra = 0
# form = PlayerAttributeForm
# fieldsets = (
# (None, {'fields' : (('db_key', 'db_value'))}),)
class PlayerForm(forms.ModelForm):
"Defines how to display Players"
class Meta:
model = PlayerDB
db_key = forms.RegexField(label="Username",
initial="PlayerDummy",
max_length=30,
regex=r'^[\w. @+-]+$',
required=False,
widget=forms.TextInput(attrs={'size':'30'}),
error_messages = {'invalid': "This value may contain only letters, spaces, numbers and @/./+/-/_ characters."},
help_text = "This should be the same as the connected Player's key name. 30 characters or fewer. Letters, spaces, digits and @/./+/-/_ only.")
db_typeclass_path = forms.CharField(label="Typeclass",
initial=settings.BASE_PLAYER_TYPECLASS,
widget=forms.TextInput(attrs={'size':'78'}),
help_text="Required. Defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass. Defaults to settings.BASE_PLAYER_TYPECLASS.")
db_permissions = forms.CharField(label="Permissions",
initial=settings.PERMISSION_PLAYER_DEFAULT,
required=False,
widget=forms.TextInput(attrs={'size':'78'}),
help_text="In-game permissions. A comma-separated list of text strings checked by certain locks. They are often used for hierarchies, such as letting a Player have permission 'Wizards', 'Builders' etc. A Player permission can be overloaded by the permissions of a controlled Character. Normal players use 'Players' by default.")
db_lock_storage = forms.CharField(label="Locks",
widget=forms.Textarea(attrs={'cols':'100', 'rows':'2'}),
required=False,
help_text="In-game lock definition string. If not given, defaults will be used. This string should be on the form <i>type:lockfunction(args);type2:lockfunction2(args);...")
db_cmdset_storage = forms.CharField(label="cmdset",
initial=settings.CMDSET_OOC,
widget=forms.TextInput(attrs={'size':'78'}),
required=False,
help_text="python path to player cmdset class (settings.CMDSET_OOC by default)")
class PlayerInline(admin.StackedInline):
"Inline creation of Player"
model = PlayerDB
template = "admin/players/stacked.html"
form = PlayerForm
fieldsets = (
("In-game Permissions and Locks",
{'fields': ('db_permissions', 'db_lock_storage'),
'description':"<i>These are permissions/locks for in-game use. They are unrelated to website access rights.</i>"}),
("In-game Player data",
{'fields':('db_typeclass_path', 'db_cmdset_storage'),
'description':"<i>These fields define in-game-specific properties for the Player object in-game.</i>"}),
("Evennia In-game Character",
{'fields':('db_obj',),
'description': "<i>To actually play the game, a Player must control a Character. This could be added in-game instead of from here if some sort of character creation system is in play. If not, you should normally create a new Character here rather than assigning an existing one. Observe that the admin does not check for puppet-access rights when assigning Characters! If not creating a new Character, make sure the one you assign is not puppeted by someone else!</i>"}))
extra = 1
max_num = 1
class UserAdmin(BaseUserAdmin):
"This is the main creation screen for Users/players"
list_display = ('username','email', 'is_staff', 'is_superuser')
form = CustomUserChangeForm
add_form = CustomUserCreationForm
inlines = [PlayerInline]
add_form_template = "admin/players/add_form.html"
change_form_template = "admin/players/change_form.html"
change_list_template = "admin/players/change_list.html"
fieldsets = (
(None, {'fields': ('username', 'password', 'email')}),
('Website profile', {'fields': ('first_name', 'last_name'),
'description':"<i>These are not used in the default system.</i>"}),
('Website dates', {'fields': ('last_login', 'date_joined'),
'description':'<i>Relevant only to the website.</i>'}),
('Website Permissions', {'fields': ('is_active', 'is_staff', 'is_superuser', 'user_permissions','groups'),
'description': "<i>These are permissions/permission groups for accessing the admin site. They are unrelated to in-game access rights.</i>"}),)
add_fieldsets = (
(None,
{'fields': ('username', 'password1', 'password2', 'email'),
'description':"<i>These account details are shared by the admin system and the game.</i>"},),)
def save_formset(self, request, form, formset, change):
"Run all hooks on the player object"
super(UserAdmin, self).save_formset(request, form, formset, change)
userobj = form.instance
playerobj = userobj.get_profile()
if not change:
#uname, passwd, email = str(request.POST.get(u"username")), \
# str(request.POST.get(u"password1")), str(request.POST.get(u"email"))
typeclass = str(request.POST.get(u"playerdb_set-0-db_typeclass_path"))
create.create_player("","","",
user=userobj,
typeclass=typeclass,
player_dbobj=playerobj,
create_character=False)
# if playerdb.db_obj:
# playerdb.db_obj.db_player = playerdb
# playerdb.db_obj.save()
#assert False, (form.instance, form.instance.get_profile())
admin.site.register(User, UserAdmin)
| {
"content_hash": "e25eefe71f5ddb2bb8ce4fda95988cad",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 481,
"avg_line_length": 56.34,
"alnum_prop": 0.5807596734114306,
"repo_name": "YourCyborg/Sun-RPI",
"id": "ab63684cca2615cbacaf0e58da9225d2c03ca50e",
"size": "8526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/players/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "JavaScript",
"bytes": "10522"
},
{
"name": "Python",
"bytes": "2151966"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._skus_operations import build_list_by_subscription_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SkusOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.devcenter.aio.DevCenterMgmtClient`'s
:attr:`skus` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_subscription(self, top: Optional[int] = None, **kwargs: Any) -> AsyncIterable["_models.DevCenterSku"]:
"""Lists the Microsoft.DevCenter SKUs available in a subscription.
:param top: The maximum number of resources to return from the operation. Example: '$top=10'.
Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DevCenterSku or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.devcenter.models.DevCenterSku]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-11-11-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SkuListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
top=top,
api_version=api_version,
template_url=self.list_by_subscription.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SkuListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_subscription.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.DevCenter/skus"} # type: ignore
| {
"content_hash": "1822eb755292f430d38eb07f910cb45d",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 129,
"avg_line_length": 43.266666666666666,
"alnum_prop": 0.6319123437767505,
"repo_name": "Azure/azure-sdk-for-python",
"id": "6cc845ed67e427bbf7df42880aae79a406d21862",
"size": "6341",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/devcenter/azure-mgmt-devcenter/azure/mgmt/devcenter/aio/operations/_skus_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import sys
import re
import urllib2
import logging
import lighter.util as util
class VersionRange(object):
SPLIT = re.compile('[^\d\w_]+')
def __init__(self, expression):
result = VersionRange.parseExpression(expression)
if not result:
raise ValueError('%s is not a valid version range' % expression)
self._lbound, lversion, rversion, self._rbound = result.groups()
self._lversion = VersionRange.parseVersion(lversion)
self._rversion = VersionRange.parseVersion(rversion)
self._suffix = VersionRange.suffix(expression)
@staticmethod
def parseExpression(expression):
return re.match('([\(\[])\s*((?:\d+\.)*\d+)?\s*,\s*((?:\d+\.)*\d+)?\s*([\)\]])', expression)
@staticmethod
def isExpression(expression):
return bool(VersionRange.parseExpression(expression))
def accepts(self, version):
parsed = self.parseVersion(version)
return (self._lversion is None or self._lbound == '[' and self._lversion <= parsed or self._lbound == '(' and self._lversion < parsed) and \
(self._rversion is None or self._rbound == ']' and parsed <= self._rversion or self._rbound == ')' and parsed < self._rversion) and \
VersionRange.suffix(version) == self._suffix
@staticmethod
def suffix(version):
parts = version.split('-', 1)
return len(parts) == 2 and parts[1] or ''
@staticmethod
def issnapshot(version):
return version.endswith('-SNAPSHOT')
@staticmethod
def parseVersion(version):
if version:
return tuple(int(digit) for digit in VersionRange.SPLIT.split(version.split('-')[0]) if digit.isdigit())
return None
@staticmethod
def compareVersions(a, b):
av = VersionRange.parseVersion(a)
bv = VersionRange.parseVersion(b)
result = cmp(av, bv)
# Snapshots are less than a release with the same version number
if result == 0:
if VersionRange.issnapshot(a) and not VersionRange.issnapshot(b):
result = -1
elif not VersionRange.issnapshot(a) and VersionRange.issnapshot(b):
result = 1
return result
class Artifact(object):
def __init__(self, version, uniqueVersion, classifier, body):
self.version = version
self.uniqueVersion = (uniqueVersion or version) + (classifier and ('-' + classifier) or '')
self.body = body
class ArtifactVariables(object):
def __init__(self, wrappedResolver, artifact):
self._wrappedResolver = wrappedResolver
self._artifact = artifact
def clone(self):
return ArtifactVariables(self._wrappedResolver.clone(), self._artifact)
def pop(self, name):
if name == 'lighter.version':
return self._artifact.version
if name == 'lighter.uniqueVersion':
return self._artifact.uniqueVersion
return self._wrappedResolver.pop(name)
class ArtifactResolver(object):
def __init__(self, url, groupid, artifactid, classifier=None):
self._url = url
self._groupid = groupid
self._artifactid = artifactid
self._classifier = classifier
def get(self, version):
return self.fetch(version).body
def fetch(self, version):
trailer = '-SNAPSHOT'
if not version.endswith(trailer):
return self._fetch(version)
# Try to resolve unique/timestamped snapshot versions from maven-metadata.xml
logging.debug('Trying to resolve %s to a unique timestamp-buildnumber version', version)
url = '{0}/{1}/{2}/{3}/maven-metadata.xml'.format(self._url, self._groupid.replace('.', '/'), self._artifactid, version)
metadata = {}
try:
metadata = util.xmlRequest(url)
except urllib2.URLError:
logging.debug('Failed to fetch %s', url)
# Find a matching snapshot version (Gradle doesn't create <snapshotVersions> but Maven does)
timestamp = util.rget(metadata, 'versioning', 'snapshot', 'timestamp')
buildNumber = util.rget(metadata, 'versioning', 'snapshot', 'buildNumber')
snapshot = '-'.join(filter(bool, [version[0:len(version) - len(trailer)], timestamp, buildNumber])
) if (timestamp is not None and buildNumber is not None) else None
return self._fetch(version, snapshot, metadata)
def _fetch(self, version, uniqueVersion=None, metadata={}):
url = '{0}/{1}/{2}/{3}/{2}-{4}'.format(self._url, self._groupid.replace('.', '/'), self._artifactid, version, uniqueVersion or version)
if self._classifier is not None:
url += '-' + self._classifier
url += '.json'
# Extract unique version number from metadata
if not uniqueVersion:
timestamp = util.rget(metadata, 'versioning', 'snapshot', 'timestamp') or util.rget(metadata, 'versioning', 'lastUpdated')
buildNumber = util.rget(metadata, 'versioning', 'snapshot', 'buildNumber')
if timestamp or buildNumber:
uniqueVersion = '-'.join(filter(bool, [version.replace('-SNAPSHOT', ''), timestamp, buildNumber]))
try:
return Artifact(version, uniqueVersion, self._classifier, util.jsonRequest(url))
except urllib2.HTTPError as e:
raise RuntimeError("Failed to retrieve %s HTTP %d (%s)" % (url, e.code, e)), None, sys.exc_info()[2]
except urllib2.URLError as e:
raise RuntimeError("Failed to retrieve %s (%s)" % (url, e)), None, sys.exc_info()[2]
def resolve(self, expression):
# If it's not a valid version range expression, assume it's a specific version
if not VersionRange.isExpression(expression):
return expression
# Fetch the available versions for this artifact
metadata = util.xmlRequest('{0}/{1}/{2}/maven-metadata.xml'.format(self._url, self._groupid.replace('.', '/'), self._artifactid))
versions = util.toList(util.rget(metadata, 'versioning', 'versions', 'version'))
logging.debug('%s:%s candidate versions %s', self._groupid, self._artifactid, versions)
# Select the version that best matches the version range expression
return self.selectVersion(expression, versions)
def selectVersion(self, expression, versions):
matcher = VersionRange(expression)
matches = [version for version in versions if matcher.accepts(version)]
matches.sort(VersionRange.compareVersions)
logging.debug('%s:%s matched %s to versions %s', self._groupid, self._artifactid, expression, matches)
if not matches:
raise RuntimeError('Failed to find a version that matches %s' % expression)
return matches[-1]
| {
"content_hash": "e5781ce17b22e9b9ed3ff02cb0684d03",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 148,
"avg_line_length": 43.03164556962025,
"alnum_prop": 0.6331813501985586,
"repo_name": "meltwater/lighter",
"id": "d90b5c1a4a8e3dc2304c032bf8f14d140289e345",
"size": "6799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lighter/maven.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "272"
},
{
"name": "Makefile",
"bytes": "247"
},
{
"name": "Python",
"bytes": "101880"
},
{
"name": "Shell",
"bytes": "3259"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core import exceptions
import unittest
import utils
class TestExceptionHandling(unittest.TestCase):
def setUp(self):
# these bogus values should be handled before raising a 500 error
settings.FOG_API_ROOT = 'http://1234.org'
settings.FOG_EMAIL = 'fake@fake.com'
# properly formatted test data
self.cleaned_data = {'priority': u'3', 'message': u'TEST MESSAGE', 'title': u'TEST'}
def test_get_priorities(self):
self.assertRaises(utils.GadgetError, utils.get_priorities)
def test_submit_ticket(self):
self.assertRaises(utils.GadgetError, utils.submit_ticket, self.cleaned_data)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "5de1db340393c66bddcdf71d16895c39",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 92,
"avg_line_length": 33.72727272727273,
"alnum_prop": 0.6846361185983828,
"repo_name": "sandersnewmedia/django-fogbugz-gadget",
"id": "238db4e36b8970ccccddc7cef313f5739e4680d6",
"size": "742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_fogbugz_gadget/tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2038"
},
{
"name": "Python",
"bytes": "6794"
}
],
"symlink_target": ""
} |
import json
from django.db import transaction
from django.db.transaction import TransactionManagementError
from rest_framework import viewsets
from rest_framework import status
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from billing.models import TaskExpense
from .serializers import UserSerializer, TaskSerializer
from django_example.users.models import User
from task.models import Task
class ExecutorViewSet(viewsets.ModelViewSet):
queryset = User.objects.filter(user_type=User.EXECUTOR)
serializer_class = UserSerializer
class CustomerViewSet(viewsets.ModelViewSet):
queryset = User.objects.filter(user_type=User.CUSTOMER)
serializer_class = UserSerializer
class TaskViewSet(viewsets.ModelViewSet):
queryset = Task.objects.all()
serializer_class = TaskSerializer
def create(self, request, *args, **kwargs):
request.data['created_by'] = request.user.pk
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@detail_route(methods=['POST', 'GET'])
def assign(self, request, pk):
try:
task = Task.objects.get(pk=pk, assignee=None)
except Task.DoesNotExist:
return Response(json.dumps({"message": "Already taken"}), status=status.HTTP_400_BAD_REQUEST)
expense, created = TaskExpense.objects.get_or_create(
task=task,
executor_id=request.user.pk,
money=task.money)
if created:
with transaction.atomic():
request.user.update_balance(u"Взял задачу", task.money, task=task)
Task.objects.filter(pk=pk, assignee=None).update(assignee=request.user)
return Response(json.dumps({'message': "Taken"}), status=status.HTTP_200_OK)
| {
"content_hash": "c29834d5f44fa6cc18480fa402a9dd9f",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 105,
"avg_line_length": 37.2037037037037,
"alnum_prop": 0.7142857142857143,
"repo_name": "gen1us2k/django-example",
"id": "5c615bdae89b3e2c2cfa4df96297320f6a93a39d",
"size": "2043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1775"
},
{
"name": "HTML",
"bytes": "20224"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "49578"
},
{
"name": "Shell",
"bytes": "3944"
}
],
"symlink_target": ""
} |
import logging
import socket
from functools import partial
import msgpack as packer
from tornado import gen
from tornado import ioloop
from tornado.concurrent import Future
from tornado.iostream import StreamClosedError
from tornado.tcpclient import TCPClient
from trpc.util import _IDGenerator, LoadBance
log = logging.getLogger(__name__)
class ClientConnection(object):
timeout = 5
weight = 10
EOF = b't\nr\np\nc'
_req_id = {}
_id = _IDGenerator()
def __init__(self,
conn=None,
lb=None,
addr=None,
name=None,
af=socket.AF_UNSPEC,
ssl_options=None,
max_buffer_size=None,
retry=5):
self.lb = lb
self.stream = None
self.retry = retry
self.conn = conn
self.addr = addr
self.host, self.port = addr
self.name = name
self.af = af
self.ssl_options = ssl_options
self.max_buffer_size = max_buffer_size
self.__try_connect_task = ioloop.PeriodicCallback(self.__conn, 1000)
@gen.coroutine
def start(self):
self._conn()
def on_close(self):
self.stream = None
self.lb.del_weight(self.name, self.addr)
log.error("service {} {}:{} closed".format(self.name, self.host, self.port))
@gen.coroutine
def __conn(self):
try:
log.info("try to connect service {} {}:{}".format(self.name, self.host, self.port))
self.stream = yield self.conn(
self.host,
self.port,
af=self.af,
ssl_options=self.ssl_options,
max_buffer_size=self.max_buffer_size
)
self.stream.read_until(self.EOF, self._on_message)
self.stream.set_close_callback(self.on_close)
self.lb.set_weight(self.name, self.addr)
if self.__try_connect_task.is_running():
self.__try_connect_task.stop()
log.info("connect to service {} {}:{} ok".format(self.name, self.host, self.port))
return
except StreamClosedError, e:
self.lb.del_weight(self.name, self.addr)
log.error(e)
except Exception, e:
self.lb.decrease(self.name, self.addr)
log.error(e)
if not self.__try_connect_task.is_running():
self.__try_connect_task.start()
self.stream = None
def _conn(self):
i = self.retry
while i > 0:
if self.stream:
return
self.__conn()
i -= 1
def _on_message(self, _data):
try:
self.on_data(_data)
self.stream.read_until(self.EOF, self._on_message)
except StreamClosedError, e:
self.lb.del_weight(self.name, self.addr)
log.error(e)
except Exception as e:
self.lb.decrease(self.name, self.addr)
log.error(e)
def __write_callback(self, _id):
log.info("{} write ok".format(_id))
def __call(self, _method, _data):
_id = next(self._id)
self._req_id[_id] = Future()
_x = lambda: self.stream.write(
packer.dumps((_id, _method, _data)) + self.EOF,
partial(self.__write_callback, _id)
)
if self.stream:
_x()
self.lb.increase(self.name, self.addr)
else:
self.__conn()
if self.stream:
_x()
self.lb.increase(self.name, self.addr)
else:
self._req_id[_id].set_result(None)
return self._req_id[_id]
def __call__(self, _method, _data):
return self.__call(_method, _data)
def on_data(self, _data):
try:
data, _ = _data.split(self.EOF)
_id, _data = packer.loads(data)
self._req_id[_id].set_result(_data)
except Exception, e:
log.error(e)
class ClientEntity(object):
def __init__(self,
name=None,
conns=None):
self.name = name
self.conns = conns
class RPCClient(TCPClient):
client_entitys = {}
lb = LoadBance()
def add_service(self,
name=None,
address=None,
af=socket.AF_UNSPEC,
ssl_options=None,
max_buffer_size=None):
self.client_entitys[name] = ClientEntity(
name=name,
conns={addr: ClientConnection(
conn=self.connect,
lb=self.lb,
addr=addr,
name=name,
af=af,
ssl_options=ssl_options,
max_buffer_size=max_buffer_size
) for addr in address}
)
@gen.coroutine
def start_service(self):
for name, client_entity in self.client_entitys.iteritems():
for addr, conn in client_entity.conns.iteritems():
self.lb.set_weight(name, addr)
conn.start()
@gen.coroutine
def __call__(self, service_name, _method, _data):
addr = self.lb.get_key(service_name)
if not addr:
log.error("can not find service {}".format(service_name))
raise gen.Return()
_conn = self.client_entitys[service_name].conns.get(addr)
_res = yield _conn(_method, _data)
if not _res:
raise gen.Return()
raise gen.Return(_res)
| {
"content_hash": "d264072cd4296a2be5fa01f5dd518d91",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 95,
"avg_line_length": 28.223350253807105,
"alnum_prop": 0.5181654676258993,
"repo_name": "augustand/trpc",
"id": "4367fd99920134767fa9f71ad2d676e79f245c0b",
"size": "5584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trpc/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13808"
}
],
"symlink_target": ""
} |
from django.db import models
import random
from django.conf import settings
from ckeditor.fields import RichTextField
from ckeditor_uploader.fields import RichTextUploadingField
from mptt.models import MPTTModel, TreeForeignKey
import datetime
import mptt
from mptt.fields import TreeForeignKey
from easy_thumbnails.fields import ThumbnailerImageField
def make_upload_path(instance, filename, prefix = False):
# Переопределение имени загружаемого файла.
n1 = random.randint(0,10000)
n2 = random.randint(0,10000)
n3 = random.randint(0,10000)
filename = str(n1)+"_"+str(n2)+"_"+str(n3) + '.jpg'
return u"%s/%s" % (settings.IMAGE_UPLOAD_DIR, filename)
class Menu(models.Model):
name = models.CharField(max_length=200, verbose_name="Name Menu")
def __str__(self):
return self.name
class Meta:
verbose_name_plural = u"Меню"
class MenuItem(MPTTModel):
menu = models.ForeignKey(Menu,null=True, blank=True, verbose_name="Name menu")
name = models.CharField(max_length=200, verbose_name="Name")
slug = models.CharField(max_length=250, blank=True, verbose_name="URL")
# full_text = RichTextField(blank=True, verbose_name="Полное описание")
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', verbose_name="Parent menu item")
published = models.BooleanField(verbose_name="Published")
ordering = models.IntegerField(verbose_name="Ordering", default=0, blank=True, null=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Menu items"
class MPTTMeta:
order_insertion_by = ['name']
class Category(models.Model):
name = models.CharField(max_length=250, verbose_name="Category")
# title = models.CharField(max_length=250, blank=True, verbose_name="Заголовок в браузере")
# metakey = models.CharField(max_length=250, blank=True, verbose_name="Ключевые слова")
# metadesc = models.CharField(max_length=250, blank=True, verbose_name="Мета описание")
# slug = models.CharField(max_length=250, blank=True, verbose_name="Урл")
# parent = TreeForeignKey('self', null=True, blank=True, related_name='children', verbose_name=u"Родительская категория")
# published = models.BooleanField(verbose_name="Опубликован")
# ordering = models.IntegerField(verbose_name="Порядок сортировки", default=0, blank=True, null=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Categories"
verbose_name = "Category"
class Meta(models.Model):
meta_description = RichTextField(blank=True, verbose_name="Meta description")
meta_keywords = models.CharField(max_length=250, blank=True, verbose_name="Meta keywords")
meta_title = models.CharField(max_length=250, blank=True, verbose_name="Meta Title")
meta_author = models.CharField(max_length=250, blank=True, verbose_name="Meta Author")
favicon = models.ImageField(upload_to=make_upload_path, blank=True, verbose_name="favicon.ico")
favicon_slug = models.CharField(max_length=250, blank=True, verbose_name="URL favicon")
published = models.BooleanField(verbose_name="Published", blank=True, default=0)
def __str__(self):
return self.meta_title
class Meta:
verbose_name_plural = "Meta descriptions"
verbose_name = "Meta description"
def pic(self):
if self.favicon:
return u'<img src="%s" width="70"/>' % self.favicon.url
else:
return '(none)'
pic.short_description = 'favicon'
pic.allow_tags = True
def pic_slug(self):
if self.favicon_slug:
return u'<img src="%s" width="70"/>' % self.favicon_slug
else:
return '(none)'
pic_slug.short_description = 'favicon slug'
pic_slug.allow_tags = True
# class Snipet(models.Model):
# name = models.CharField(max_length=250, verbose_name="Название")
# text = RichTextField(blank=True, verbose_name="Код снипета")
# published = models.BooleanField(verbose_name="Опубликован")
# ordering = models.IntegerField(verbose_name="Порядок сортировки", default=0, blank=True, null=True)
# def __str__(self):
# return self.name
# class Meta:
# verbose_name_plural = "Сниппеты"
# verbose_name = "Сниппет"
# class Top(models.Model):
# # image_back = models.ImageField(upload_to=make_upload_path, blank=True, verbose_name="Изображение_1200x118")
# slug = models.CharField(max_length=250, blank=True, verbose_name="URL")
# text_small = models.CharField(max_length=250, blank=True, verbose_name="Обещание")
# text_big = models.CharField(max_length=250, blank=True, verbose_name="Заявка на победу")
# published = models.BooleanField(verbose_name="Опубликован")
# def __str__(self):
# return self.text_small
# class Meta:
# verbose_name_plural = "Шапки"
# verbose_name = "Шапка"
# def pic(self):
# if self.image_back:
# return u'<img src="%s" width="70"/>' % self.image_back.url
# else:
# return '(none)'
# pic.short_description = u'Большая картинка'
# pic.allow_tags = True
# def pic_slug(self):
# if self.slug:
# return u'<img src="%s" width="70"/>' % self.slug
# else:
# return '(none)'
# pic_slug.short_description = u'Картинка шапки'
# pic_slug.allow_tags = True
class Slide(models.Model):
category = models.ForeignKey(Category, related_name="slides", verbose_name="Category", default="", blank=True, null=True)
name = models.CharField(max_length=250, verbose_name="Name")
image = ThumbnailerImageField(upload_to=make_upload_path, blank=True, verbose_name="Image")
isHorizont = models.BooleanField(verbose_name="isHorizont", default="", blank=True)
slug = models.CharField(max_length=250, blank=True, verbose_name="Url pic")
text1 = RichTextUploadingField(blank=True, verbose_name="Text1")
text2 = RichTextUploadingField(blank=True, verbose_name="Text2")
published = models.BooleanField(verbose_name="Published", blank=True)
published_main = models.BooleanField(verbose_name="Carousel", default="", blank=True)
published_all_prod = models.BooleanField(verbose_name="Products", default="", blank=True)
published_news = models.BooleanField(verbose_name="News", default="", blank=True)
header_about = models.BooleanField(verbose_name="About", default="", blank=True)
published_portfolio = models.BooleanField(verbose_name="Portfolio", default="", blank=True)
ordering = models.IntegerField(verbose_name="Ordering", default=0, blank=True, null=True)
def __str__(self):
return self.name
def pic(self):
if self.image:
return u'<img src="%s" width="70"/>' % self.image.url
else:
return '(none)'
pic.short_description = 'Slide'
pic.allow_tags = True
def pic_slug(self):
if self.slug:
return u'<img src="%s" width="70"/>' % self.slug
else:
return '(none)'
pic_slug.short_description = 'Slide slug'
pic_slug.allow_tags = True
class Meta:
verbose_name_plural = "Slides"
verbose_name = "Slide"
| {
"content_hash": "b0af59367fe44deef04c9f8f7fad7161",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 125,
"avg_line_length": 36.556650246305416,
"alnum_prop": 0.6530117234874007,
"repo_name": "skylifewww/pangolin-fog",
"id": "f994964cb338e3c9a3c69b954951a39a5a5653c7",
"size": "7738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "content/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "126434"
},
{
"name": "HTML",
"bytes": "154546"
},
{
"name": "JavaScript",
"bytes": "174324"
},
{
"name": "Makefile",
"bytes": "1483"
},
{
"name": "Nginx",
"bytes": "641"
},
{
"name": "Python",
"bytes": "177394"
}
],
"symlink_target": ""
} |
"""
To contain language-related ops.
"""
from collections import OrderedDict
import babel
from babel import Locale, UnknownLocaleError
from labmanager.babel import gettext
def obtain_groups():
"""
Obtains the groups that are available for translation, as an Ordered Dictionary.
:return: Ordered dictionary with the name of the groups identified by each key.
:rtype: OrderedDict
"""
groups = OrderedDict()
groups["ALL"] = "ALL"
groups["10-13"] = gettext("Preadolescence (age 10-13)")
groups["14-18"] = gettext("Adolescence (age 14-18)")
return groups
# Taken from http://en.wikipedia.org/wiki/Languages_of_the_European_Union, April 2015
OFFICIAL_EUROPEAN_UNION_LANGUAGES = ['bg', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fi', 'fr', 'de', 'el', 'hu', 'ga', 'it', 'lv', 'lt', 'mt', 'pl', 'pt', 'ro', 'sk', 'sl', 'es', 'sv']
SEMIOFFICIAL_EUROPEAN_UNION_LANGUAGES = ['eu', 'ca', 'gl', 'gd', 'cy']
OTHER_LANGUAGES = [
# The following languages are in Graasp
'uk', # Ukranian
'tr', # Turkish
'sr', # Serbian language
'ru', # Russian language
'be', # Belarussian
# The following languages are too widely used
'ar', # Arabic
'zh', # Chinese
'hi', # Hindi
# The following were available in the Go-Lab portal
'bs', # Bosnian
'sh', # Serbo-Croatian,
'lb', # Luxembourgish,
'se', # Northern Sami
# The following have been selected to be interesting for Go-Lab
'no', # Norwegian
'id', # Indonesian
'ja', # Japanese
'my', # Burmese
'mk', # Macedonian
]
ALL_LANGUAGES = OFFICIAL_EUROPEAN_UNION_LANGUAGES + SEMIOFFICIAL_EUROPEAN_UNION_LANGUAGES + OTHER_LANGUAGES
def obtain_languages():
"""
Obtains the languages (without the groups) that are available for translation,
as a Dictionary. The format is code:language_name
TO-DO: This method can probably be optimized.
:return:
"""
babel_supported_languages = babel.core.Locale("en", "US").languages.items()
languages = []
for code, lang in babel_supported_languages:
golab_supported = False
for supported_code in ALL_LANGUAGES:
if code == supported_code:
golab_supported = True
break
if golab_supported:
languages.append( (code, lang) )
if False:
print "Babel Supported languages after filter: %s" % len(languages)
print "Go-Lab Supported languages: %s" % len(ALL_LANGUAGES)
languages.sort(key=lambda it: it[1])
# TODO: Currently, we filter languages which contain "_" in their code so as to simplify.
# Because we use _ throughout the composer as a separator character, trouble is caused otherwise.
# Eventually we should consider whether we need to support special languages with _
# on its code.
targetlangs_codes = [lang[0] + "_ALL" for lang in languages if "_" not in lang[0]]
targetlangs_list = [{"pcode": code, "repr": get_locale_english_name(
*get_locale_info_from_code(code))} for code in targetlangs_codes]
d = {lang["pcode"]: lang["repr"] for lang in targetlangs_list}
d["all_ALL"] = "DEFAULT"
return d
def get_locale_info_from_code(code):
"""
Retrieves the lang, country and group from a full or partial locale code.
@param code: Locale code. It can be a full code (ca_ES_ALL) or partial code (ca_ES).
@return: (lang, country, group) or (lang, country), depending if it's full or partial.
"""
splits = code.split("_")
# If our code is only "ca_ES" style (doesn't include group).
if len(splits) == 2:
lang, country = splits
return lang, country
# If we have 3 splits then it is probably "ca_ES_ALL" style (includes group).
elif len(splits) == 3:
lang, country, group = splits
return lang, country, group
# Unknown number of splits. Throw an exception, it is not a recognized code.
else:
raise UnrecognizedLocaleCodeException("The locale code can't be recognized: " + code)
def get_locale_english_name(lang, country):
"""
Retrieves a string representation of a Locale.
@param lang: Lang code.
@param country: Country code.
@return: String representation for the locale.
"""
try:
if country.upper() == 'ALL':
country = ""
return Locale(lang, country).english_name
except UnknownLocaleError:
return Locale("en", "US").languages.get(lang)
class UnrecognizedLocaleCodeException(Exception):
"""
Exception thrown when the format of a locale code does not seem to be
as expected.
"""
| {
"content_hash": "4cc39ff61d0dd58a619330ea5e4c134a",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 180,
"avg_line_length": 35.47692307692308,
"alnum_prop": 0.6433217692974849,
"repo_name": "labsland/labmanager",
"id": "0d1c607126e730e1b32a60a70904bc519797d1ac",
"size": "4612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "labmanager/translator/languages.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1428"
},
{
"name": "HTML",
"bytes": "125702"
},
{
"name": "JavaScript",
"bytes": "68509"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "450278"
},
{
"name": "Shell",
"bytes": "1807"
}
],
"symlink_target": ""
} |
"""
Position Tracking
=================
+-----------------+----------------------------------------------------+
| key | value |
+=================+====================================================+
| asset | the asset held in this position |
+-----------------+----------------------------------------------------+
| amount | whole number of shares in the position |
+-----------------+----------------------------------------------------+
| last_sale_price | price at last sale of the asset on the exchange |
+-----------------+----------------------------------------------------+
| cost_basis | the volume weighted average price paid per share |
+-----------------+----------------------------------------------------+
"""
from __future__ import division
from math import copysign
from collections import OrderedDict
import numpy as np
import logbook
from catalyst.assets import Future, Asset
from catalyst.utils.input_validation import expect_types
from catalyst.constants import LOG_LEVEL
log = logbook.Logger('Performance', level=LOG_LEVEL)
class Position(object):
@expect_types(asset=Asset)
def __init__(self, asset, amount=0, cost_basis=0.0,
last_sale_price=0.0, last_sale_date=None):
self.asset = asset
self.amount = amount
self.cost_basis = cost_basis # per share
self.last_sale_price = last_sale_price
self.last_sale_date = last_sale_date
def earn_dividend(self, dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
return {
'amount': self.amount * dividend.amount
}
def earn_stock_dividend(self, stock_dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
return {
'payment_asset': stock_dividend.payment_asset,
'share_count': np.floor(
self.amount * float(stock_dividend.ratio)
)
}
@expect_types(asset=Asset)
def handle_split(self, asset, ratio):
"""
Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash.
"""
if self.asset != asset:
raise Exception("updating split with the wrong asset!")
# adjust the # of shares by the ratio
# (if we had 100 shares, and the ratio is 3,
# we now have 33 shares)
# (old_share_count / ratio = new_share_count)
# (old_price * ratio = new_price)
# e.g., 33.333
raw_share_count = self.amount / float(ratio)
# e.g., 33
full_share_count = np.floor(raw_share_count)
# e.g., 0.333
fractional_share_count = raw_share_count - full_share_count
# adjust the cost basis to the nearest cent, e.g., 60.0
new_cost_basis = round(self.cost_basis * ratio, 2)
self.cost_basis = new_cost_basis
self.amount = full_share_count
return_cash = round(float(fractional_share_count * new_cost_basis), 2)
log.info("after split: " + str(self))
log.info("returning cash: " + str(return_cash))
# return the leftover cash, which will be converted into cash
# (rounded to the nearest cent)
return return_cash
def update(self, txn):
if self.asset != txn.asset:
raise Exception('updating position with txn for a '
'different asset')
total_shares = self.amount + txn.amount
if total_shares == 0:
self.cost_basis = 0.0
else:
prev_direction = copysign(1, self.amount)
txn_direction = copysign(1, txn.amount)
if prev_direction != txn_direction:
# we're covering a short or closing a position
if abs(txn.amount) > abs(self.amount):
# we've closed the position and gone short
# or covered the short position and gone long
self.cost_basis = txn.price
else:
prev_cost = self.cost_basis * self.amount
txn_cost = txn.amount * txn.price
total_cost = prev_cost + txn_cost
self.cost_basis = total_cost / total_shares
# Update the last sale price if txn is
# best data we have so far
if self.last_sale_date is None or txn.dt > self.last_sale_date:
self.last_sale_price = txn.price
self.last_sale_date = txn.dt
# on live mode, if the fee currency exists, reduce the commission
# from the position if necessary.
# Notice! the fee_currency is compared to the base_currency- once it
# is allowed to have more than one quote currency, the comparison is
# needed to be changed
if txn.commission is not None and \
txn.fee_currency == self.asset.base_currency:
total_shares -= txn.commission
self.amount = total_shares
@expect_types(asset=Asset)
def adjust_commission_cost_basis(self, asset, cost):
"""
A note about cost-basis in catalyst: all positions are considered
to share a cost basis, even if they were executed in different
transactions with different commission costs, different prices, etc.
Due to limitations about how catalyst handles positions, catalyst will
currently spread an externally-delivered commission charge across
all shares in a position.
"""
if asset != self.asset:
raise Exception('Updating a commission for a different asset?')
if cost == 0.0:
return
# If we no longer hold this position, there is no cost basis to
# adjust.
if self.amount == 0:
return
prev_cost = self.cost_basis * self.amount
if isinstance(asset, Future):
cost_to_use = cost / asset.multiplier
else:
cost_to_use = cost
new_cost = prev_cost + cost_to_use
self.cost_basis = new_cost / self.amount
def __repr__(self):
template = "asset: {asset}, amount: {amount}, cost_basis: {cost_basis}, \
last_sale_price: {last_sale_price}"
return template.format(
asset=self.asset,
amount=self.amount,
cost_basis=self.cost_basis,
last_sale_price=self.last_sale_price
)
def to_dict(self):
"""
Creates a dictionary representing the state of this position.
Returns a dict object of the form:
"""
return {
'sid': self.asset,
'amount': self.amount,
'cost_basis': self.cost_basis,
'last_sale_price': self.last_sale_price
}
class positiondict(OrderedDict):
def __missing__(self, key):
return None
| {
"content_hash": "6dcc99448e9e1c471b00044773a52c75",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 81,
"avg_line_length": 35.622549019607845,
"alnum_prop": 0.5420393559928444,
"repo_name": "enigmampc/catalyst",
"id": "c263f53c9ca127bbb11c83df2a59e17fd808fc66",
"size": "7850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catalyst/finance/performance/position.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7014"
},
{
"name": "Dockerfile",
"bytes": "2510"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Jupyter Notebook",
"bytes": "229701"
},
{
"name": "PowerShell",
"bytes": "3269"
},
{
"name": "Python",
"bytes": "4279642"
},
{
"name": "Shell",
"bytes": "7469"
}
],
"symlink_target": ""
} |
import tornado.web
class Entry(tornado.web.UIModule):
def render(self, entry, show_comments=False):
print 'entry render'
return self.render_string('module-entry.html', entry=entry,
show_comments=show_comments)
| {
"content_hash": "a5a1459a78c4422500e3521461a47f73",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 67,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.6217228464419475,
"repo_name": "tao12345666333/Talk-Is-Cheap",
"id": "86e813ea8a39c95fb32a3b83ef38a3c75d8dad30",
"size": "305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/tornado/simple/uimodules.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103"
},
{
"name": "CSS",
"bytes": "21381"
},
{
"name": "Dockerfile",
"bytes": "1082"
},
{
"name": "Go",
"bytes": "8982"
},
{
"name": "HTML",
"bytes": "47807"
},
{
"name": "JavaScript",
"bytes": "88596"
},
{
"name": "Lua",
"bytes": "304"
},
{
"name": "Makefile",
"bytes": "80"
},
{
"name": "PHP",
"bytes": "1858"
},
{
"name": "Perl",
"bytes": "1331"
},
{
"name": "Python",
"bytes": "253685"
},
{
"name": "Ruby",
"bytes": "1510"
},
{
"name": "Rust",
"bytes": "45"
},
{
"name": "Shell",
"bytes": "6975"
},
{
"name": "Smarty",
"bytes": "319"
},
{
"name": "Vue",
"bytes": "40435"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.db import IntegrityError
from users.models import User
from profiles.models import GigPoster
from gigs.models import Gig
class TestGig(TestCase):
def setUp(self):
self.user = User.objects.create(username='myname',
email='me@me.com',
password='mypw')
self.poster = GigPoster.objects.create(user=self.user)
def test_null_create_fails(self):
self.assertRaises(IntegrityError, Gig.objects.create)
def test_poster_required(self):
gig = Gig.objects.create(poster=self.poster)
self.assertIsNotNone(gig)
def test_has_title(self):
gig = Gig.objects.create(poster=self.poster, title='thing to do')
self.assertEqual(gig.title, 'thing to do')
def test_has_description(self):
gig = Gig.objects.create(poster=self.poster, description='info about gig')
self.assertEqual(gig.description, 'info about gig')
def test_has_timestamps(self):
gig = Gig.objects.create(poster=self.poster)
self.assertLessEqual(gig.created, gig.modified)
def test_has_status(self):
# status is 'draft' by default
gig = Gig.objects.create(poster=self.poster)
self.assertEqual(gig.status, 'draft')
self.assertGreaterEqual(gig.status_changed, gig.created)
def test_has_skills_desired(self):
gig = Gig.objects.create(poster=self.poster)
self.assertEqual(gig.skills_desired.all().count(), 0)
gig.skills_desired.add('python')
self.assertItemsEqual(gig.skills_desired.names(), ['python'])
def test_str_is_title(self):
gig = Gig.objects.create(poster=self.poster, title='thing to do')
self.assertEqual(gig.title, str(gig))
| {
"content_hash": "5de531e19724a7fc2928faeed5e5162c",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 82,
"avg_line_length": 37.02040816326531,
"alnum_prop": 0.6527012127894156,
"repo_name": "aschn/goodtechgigs",
"id": "900fbab7417a8c5c4bb23115223225a449202db6",
"size": "1814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "goodtechgigs/gigs/tests/test_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1212"
},
{
"name": "HTML",
"bytes": "23214"
},
{
"name": "JavaScript",
"bytes": "2387"
},
{
"name": "Python",
"bytes": "48935"
}
],
"symlink_target": ""
} |
"""Work with ellipe-relative spaces."""
import numpy
import mel.lib.moleimaging
class Transform:
def __init__(self, ellipse):
self.ellipse = ellipse
# center, up, right, umag, rmag = ellipse_center_up_right(ellipse)
# self.center, self.up, self.right = (
# numpy.array(x) for x in (center, up, right))
# self.mag = numpy.array((rmag, umag))
# self.inv_mag = 1 / self.mag
def to_space(self, pos):
return to_ellipse_space(self.ellipse, pos)
# pos = numpy.array(pos)
# pos -= self.center
# pos = numpy.array(
# numpy.dot(pos, self.right),
# numpy.dot(pos, self.up),
# )
# return pos * self.inv_mag
def from_space(self, pos):
return from_ellipse_space(self.ellipse, pos)
# pos = numpy.array(pos)
# return (self.right * pos[0] * self.mag[0]
# + self.up * pos[1] * self.mag[1]
# + self.center)
def from_ellipse_space(ellipse, pos):
center, up, right, umag, rmag = ellipse_center_up_right(ellipse)
p = (
int(right[0] * pos[0] * rmag + up[0] * pos[1] * umag + center[0]),
int(right[1] * pos[0] * rmag + up[1] * pos[1] * umag + center[1]),
)
return numpy.array(p)
def to_ellipse_space(ellipse, pos):
center, up, right, umag, rmag = ellipse_center_up_right(ellipse)
pos = (
pos[0] - center[0],
pos[1] - center[1],
)
pos = (
pos[0] * right[0] + pos[1] * right[1],
pos[0] * up[0] + pos[1] * up[1],
)
return numpy.array(
(
pos[0] / rmag,
pos[1] / umag,
)
)
def ellipse_center_up_right(ellipse):
center = ellipse[0]
center = mel.lib.moleimaging.point_to_int_point(center)
angle_degs = ellipse[2]
# TODO: do this properly
if angle_degs > 90:
angle_degs -= 180
up = (0, -1)
up = mel.lib.moleimaging.rotate_point_around_pivot(up, (0, 0), angle_degs)
right = (1, 0)
right = mel.lib.moleimaging.rotate_point_around_pivot(
right, (0, 0), angle_degs
)
umag = ellipse[1][1] / 2
rmag = ellipse[1][0] / 2
return center, up, right, umag, rmag
# -----------------------------------------------------------------------------
# Copyright (C) 2018 Angelos Evripiotis.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"content_hash": "9317f055c904c61cdb9dc97170d26446",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 79,
"avg_line_length": 28,
"alnum_prop": 0.5510680907877169,
"repo_name": "aevri/mel",
"id": "c390554bffb55905bb237629eaf55af48cba5b79",
"size": "2996",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mel/lib/ellipsespace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "355692"
},
{
"name": "Shell",
"bytes": "2041"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
from django import forms
from django.conf import settings as django_settings
from django.contrib.auth.models import User
from django.contrib.sites.models import get_current_site
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse, NoReverseMatch
from django.core.validators import validate_email
from avocado.models import DataContext, DataView, DataQuery
from serrano import utils
from serrano.conf import settings
log = logging.getLogger(__name__)
SHARED_QUERY_EMAIL_TITLE = '{site_name}: {query_name} has been shared with '\
'you!'
SHARED_QUERY_EMAIL_BODY = 'View the query at {query_url}'
class ContextForm(forms.ModelForm):
def __init__(self, request, *args, **kwargs):
self.request = request
super(ContextForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
instance = super(ContextForm, self).save(commit=False)
request = self.request
if getattr(request, 'user', None) and request.user.is_authenticated():
instance.user = request.user
else:
instance.session_key = request.session.session_key
if commit:
instance.save()
return instance
class Meta(object):
model = DataContext
fields = ('name', 'description', 'keywords', 'json', 'session')
class ViewForm(forms.ModelForm):
def __init__(self, request, *args, **kwargs):
self.request = request
super(ViewForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
instance = super(ViewForm, self).save(commit=False)
request = self.request
if getattr(request, 'user', None) and request.user.is_authenticated():
instance.user = request.user
else:
instance.session_key = request.session.session_key
if commit:
instance.save()
return instance
class Meta(object):
model = DataView
fields = ('name', 'description', 'keywords', 'json', 'session')
class QueryForm(forms.ModelForm):
# A list of the usernames or email addresses of the User's who the query
# should be shared with. This is a string where each email/username is
# separated by a ','.
usernames_or_emails = forms.CharField(widget=forms.Textarea,
required=False)
message = forms.CharField(widget=forms.Textarea, required=False)
def __init__(self, request, *args, **kwargs):
self.request = request
super(QueryForm, self).__init__(*args, **kwargs)
def clean_usernames_or_emails(self):
"""
Cleans and validates the list of usernames and email address. This
method returns a list of email addresses containing the valid emails
and emails of valid users in the cleaned_data value for the
usernames_or_emails field.
"""
user_labels = self.cleaned_data.get('usernames_or_emails')
emails = set()
for label in user_labels.split(','):
# Remove whitespace from the label, there should not be whitespace
# in usernames or email addresses. This use of split is somewhat
# non-obvious, see the link below:
# http://docs.python.org/2/library/stdtypes.html#str.split
label = "".join(label.split())
if not label:
continue
try:
validate_email(label)
emails.add(label)
except ValidationError:
# If this user lookup label is not an email address, try to
# find the user with the supplied username and get the
# email that way. If no user with this username is found
# then give up since we only support email and username
# lookups.
try:
user = User.objects.only('email').get(username=label)
emails.add(user.email)
except User.DoesNotExist:
log.warning("Unable to share query with '{0}'. It is not "
"a valid email or username.".format(label))
return emails
def save(self, commit=True):
instance = super(QueryForm, self).save(commit=False)
request = self.request
if getattr(request, 'user', None) and request.user.is_authenticated():
instance.user = request.user
else:
instance.session_key = request.session.session_key
if commit:
instance.save()
script_name = getattr(django_settings, 'SCRIPT_NAME', '')
# The code to update the shared_users field on the Query model
# included inside this if statement because the shared_users
# field in inaccessible until the instance is saved which is only
# done in the case of commit being True. Using commit=False when
# saving the super class was not enough. That is the reason for
# this being embedded within the commit if and for the explicit
# save_m2m call below.
all_emails = self.cleaned_data.get('usernames_or_emails')
# Get the list of existing email addresses for users this query is
# already shared with. We only want to email users the first time
# a query is shared with them so we get the existing list of email
# addresses to avoid repeated emails to users about the same query.
existing_emails = set(instance.shared_users.all().values_list(
'email', flat=True))
new_emails = all_emails - existing_emails
site = get_current_site(request)
try:
site_url = request.build_absolute_uri(script_name + '/')
except KeyError:
site_url = site.domain + script_name
# Use the site url as the default query url in case there are
# issues generating the query url.
query_url = site_url
reverse_name = settings.QUERY_REVERSE_NAME
if reverse_name:
try:
query_url = reverse(reverse_name,
kwargs={'pk': instance.pk})
# Since reverse will just return the path to the query
# we need to prepend the site url to make it a valid
# link that people can follow.
try:
query_url = request.build_absolute_uri(query_url)
except KeyError:
query_url = site.domain + script_name + query_url
except NoReverseMatch:
log.warn("Could not reverse '{0}'. ".format(reverse_name))
else:
log.warn('SERRANO_QUERY_REVERSE_NAME not found in settings.')
title = SHARED_QUERY_EMAIL_TITLE.format(query_name=instance.name,
site_name=site.name)
body = SHARED_QUERY_EMAIL_BODY.format(query_url=query_url)
if self.cleaned_data.get('message'):
body = '{0}\n\n--\n{1}'.format(
self.cleaned_data.get('message'), body)
# Email and register all the new email addresses
utils.send_mail(new_emails, title, body)
for email in new_emails:
instance.share_with_user(email)
# Find and remove users who have had their query share revoked
removed_emails = existing_emails - all_emails
for user in User.objects.filter(email__in=removed_emails):
instance.shared_users.remove(user)
self.save_m2m()
return instance
class Meta(object):
model = DataQuery
fields = ('name', 'description', 'keywords', 'context_json',
'view_json', 'session', 'public')
| {
"content_hash": "8b11d49b689773387582bbf8266d79c3",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 79,
"avg_line_length": 38.45238095238095,
"alnum_prop": 0.5877399380804954,
"repo_name": "chop-dbhi/serrano",
"id": "0f5565f2847d929ffa628805c1acc1434bb77953",
"size": "8075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "serrano/forms.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "84"
},
{
"name": "Python",
"bytes": "351235"
},
{
"name": "Shell",
"bytes": "2355"
}
],
"symlink_target": ""
} |
from django.contrib.auth import get_user_model
from django.db.models.signals import pre_delete, post_save, post_delete
from django.dispatch.dispatcher import receiver
from squads.models import SquadMember, Squad as SquadProfile
from ..models import Profile, Squad, Tour
User = get_user_model()
@receiver(post_save, sender=User)
def user_post_save(sender, instance, created, **kwargs):
if instance.is_active and not hasattr(instance, 'profile'):
try:
profile = Profile.objects.get(nickname=instance.username)
profile.connect_with_user(user=instance)
except Profile.DoesNotExist:
pass
@receiver(post_delete, sender=SquadMember)
def user_leave_squad(sender, instance, **kwargs):
user = instance.member
# проверяем есть ли профиль игрока, т.е. были ли вылеты на сервере
if hasattr(user, 'profile'):
user.profile.squad = None
user.profile.save()
# находим все стат профили игрока в неоконченных турах и убираем сквад у профиля
user.profile.players.filter(tour__is_ended=False).update(squad=None)
# находим все стат профили сквада в неоконченных турах и пересчитываем кол-во игроков
for squad in instance.squad.stats.filter(tour__is_ended=False):
squad.save()
@receiver(post_save, sender=SquadMember)
def user_join_squad(sender, instance, created, **kwargs):
user = instance.member
# проверяем есть ли профиль игрока, т.е. были ли вылеты на сервере
if hasattr(user, 'profile'):
user.profile.squad = instance.squad
user.profile.save()
# добавление в сквад производиться во время обработки миссии
@receiver(post_save, sender=SquadProfile)
def new_squad(sender, instance, created, **kwargs):
if created:
tour = Tour.objects.filter(is_ended=False).order_by('-id')[0]
squad = Squad.objects.create(tour_id=tour.pk, profile_id=instance.pk)
@receiver(post_save, sender=Profile)
def profile_post_save(sender, instance, created, **kwargs):
if instance.user and instance.nickname != instance.user.username:
# на всякий случай проверяем нет ли другого юзера с таким новым именем
another_user = User.objects.exclude(id=instance.user.id).filter(username=instance.nickname)
# если есть - переименовываем его
if another_user:
another_user[0].username = 'renamed_user_{id}'.format(id=another_user[0].id)
another_user[0].save()
instance.user.username = instance.nickname
instance.user.save()
| {
"content_hash": "6f4c9a0435d9876f4b7eb899d571479d",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 99,
"avg_line_length": 39.65625,
"alnum_prop": 0.6985815602836879,
"repo_name": "Flyingfox646/flyingfox",
"id": "b7f9438f2b86ced1a1673c6849243c051c724516",
"size": "2911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/stats/signals/handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1515"
},
{
"name": "CSS",
"bytes": "103959"
},
{
"name": "HTML",
"bytes": "317380"
},
{
"name": "JavaScript",
"bytes": "17458"
},
{
"name": "Python",
"bytes": "415174"
},
{
"name": "Shell",
"bytes": "1759"
}
],
"symlink_target": ""
} |
import click
import os
import os.path
import sys
import shutil
import yaml
import shutil
import random
import string
import time
import re
from pkg_resources import get_distribution
from subprocess import call, check_call
from .hichipperHelp import *
from .hichipperProjectClass import *
from .hicproHelper import *
@click.command()
@click.version_option()
# User output / mode
@click.argument('mode')
@click.option('--out', "-o", default="hichipper_out", required=True, help='Output directory name; must not be already existing [Required]')
@click.option('--keep-temp-files', "-z", is_flag=True, help='Keep temporary files?')
# User input
@click.option('--input-vi', '-ii', default = "", help='Comma-separted list of interactions files for loop calling; option valid only in `call` mode')
@click.option('--restriction-frags', '-rf', default = "", help='Filepath to restriction fragment files; will overwrite specification of this file when a .yaml is supplied for mode')
@click.option('--peaks', '-p', default = "", help='Bed (or similary formatted) file; defers to what is in the .yaml')
# Essential options
@click.option('--keep-samples', "-k", default="ALL", help='Comma separated list of sample names to keep; ALL (special string) by default')
@click.option('--ignore-samples', "-x", default="NONE", help='Comma separated list of sample names to ignore; NONE (special string) by default')
@click.option('--read-length', "-l", default="75", help='Length of reads from sequencing runs; default = 75')
# Loop Distance options
@click.option('--min-dist', "-mi", default="5000", help='Minimum distance for loop calls; default = 5000')
@click.option('--max-dist', "-ma", default="2000000", help='Maximum distance for loop calls; default = 2000000')
# MACS2 Configurations
@click.option('--macs2-string', default="-q 0.01 --extsize 147 --nomodel", help='String of arguments to pass to MACS2; only is called when peaks are set to be called; default = "-q 0.01 --extsize 147 --nomodel"')
@click.option('--macs2-genome', default="hs", help='Argument to pass to the -g variable in MACS2 (mm for mouse genome; hs for human genome); default = "hs"')
# Loop anchor options
@click.option('--peak-pad', "-pp", default="500", help='Peak padding width (applied on both left and right); default = 500')
@click.option('--merge-gap', "-mg", default="500", help='Merge nearby peaks (after all padding is complete; default = 500')
@click.option('--no-merge', "-nm", is_flag=True, help='Completely skip anchor merging; will affect summary statistics. Not recommended unless understood what is happening.')
@click.option('--skip-resfrag-pad', is_flag=True, help='Skip restriction fragment aware padding')
@click.option('--skip-background-correction', is_flag=True, help='Skip restriction fragment aware background correction?')
# background options (IgG)
@click.option('--background-hicpro-dir', "-bh", default="", help='Path to the HiC-Pro directory containing the background sample')
@click.option('--background-sample', "-bs", default="", help='Sample name of the background HiC-Pro sample in the background directory.')
# External Dependencies
@click.option('--make-ucsc', "-mu", is_flag=True, help='Make additional output files that can support viewing in UCSC genome browser; requires tabix and bgzip; does the same thing as --make-washu.')
@click.option('--make-washu', "-mw", is_flag=True, help='Make additional output files that can support viewing in WashU genome browser; requires tabix and bgzip; does the same thing as --make-ucsc.')
@click.option('--basic-qc', is_flag=True, help='Create a simple QC report without Pandoc')
@click.option('--skip-diffloop', is_flag=True, help='Skip analyses in diffloop (e.g. Mango loop calling; .rds generation)')
# Software options
@click.option('--bedtools-path', default = "", help='Path to bedtools; by default, assumes that bedtools is in PATH')
@click.option('--macs2-path', default = "", help='Path to macs2; by default, assumes that macs2 is in PATH')
@click.option('--tabix-path', default = "", help='Path to samtools; by default, assumes that tabix is in PATH')
@click.option('--bgzip-path', default = "", help='Path to macs2; by default, assumes that bgzip is in PATH')
@click.option('--r-path', default = "", help='Path to R; by default, assumes that R is in PATH')
def main(mode, out, keep_temp_files,
input_vi, restriction_frags, peaks,
keep_samples, ignore_samples, read_length,
min_dist, max_dist,
macs2_string, macs2_genome,
peak_pad, merge_gap, no_merge, skip_resfrag_pad, skip_background_correction,
background_hicpro_dir, background_sample,
basic_qc, skip_diffloop, make_ucsc, make_washu,
bedtools_path, macs2_path, tabix_path, bgzip_path, r_path):
"""
hichipper: a preprocessing and QC pipeline for HiChIP data. \n
(c) Aryee Lab, 2019 \n
See https://hichipper.readthedocs.io for more details.\n
hichipper mode: [call, *.yaml]
^ either specify the word `call` and feed in a valid interactions file
OR specify the .yaml format for options to be parsed from a manifest file (see documentation)
"""
# Staples
__version__ = get_distribution('hichipper').version
#-------------------------------------------
# Initial verification for external software
#-------------------------------------------
if(make_ucsc or make_washu):
tabix = get_software_path("tabix", tabix_path)
bgzip = get_software_path("bgzip", bgzip_path)
else:
tabix = ""
bgzip = ""
bedtools = get_software_path("bedtools", bedtools_path)
# See if R is necessary
if( not (skip_background_correction and skip_resfrag_pad and basic_qc and skip_diffloop)):
Rscript = get_software_path("R", r_path) + "script"
else:
Rscript = ""
halfLength = int(float(read_length)/2)
if not halfLength> 0:
sys.exit('ERROR: Specify appropriate read length > 1; QUITTING')
#------------------------------
# Handle initial QC reporting
#------------------------------
if os.path.exists(out):
sys.exit("ERROR: Output path (%s) already exists; remove it or specify a new location." % out)
os.mkdir(out)
logf = open(out + "/" + out + ".hichipper.log", 'w')
click.echo(gettime() + "Starting hichipper pipeline v%s" % __version__, file = logf)
click.echo(gettime() + "Starting hichipper pipeline v%s" % __version__)
# Handle directories
script_dir = os.path.dirname(os.path.realpath(__file__))
outfolder = os.path.abspath(out)
cwd = os.getcwd()
click.echo(gettime() + "Executed from: %s" % cwd, logf)
click.echo(gettime() + "Output folder: %s" % outfolder, logf)
click.echo(gettime() + "Parsing user parameters")
# Check for UCSC/WASHU Specification
if make_ucsc or make_washu:
ucscoutput = "true"
else:
ucscoutput = "false"
# Check no merge specification
if no_merge:
no_merge_str = "true"
else:
no_merge_str = "false"
#------------------------------
# If it is a manifest file, handle it as such; otherwise check for the loop call mode
#------------------------------
if mode.endswith(('.yaml', '.yml')):
m = parse_manifest(mode)
click.echo(gettime() + ".yaml file detected")
click.echo(gettime() + "Parsed manifest as follows: ", logf)
click.echo(m, logf)
elif(mode == "call"):
click.echo(gettime() + "Direct loop call option detected.")
if not os.path.exists(out):
sys.exit("ERROR: Peaks file (%s) cannot be found; with the `call` option, one must supply peaks" % out)
else:
sys.exit("ERROR: Mode option (%s) is invalid. Choose either 'call' or specify a valid path to a .yaml file." % mode)
# Project
p = hichipperProject(script_dir, mode, out, peaks, restriction_frags,
skip_resfrag_pad, skip_background_correction)
peaks = p.peaks
peakopts = ["COMBINED,ALL", "EACH,ALL", "COMBINED,SELF", "EACH,SELF"]
if(p.peaks in peakopts):
if(p.go == "call"):
sys.exit('ERROR: `call` mode only compatible with pre-defined peaks ')
macs2 = get_software_path("macs2", macs2_path)
elif not os.path.isfile(peaks):
sys.exit('ERROR: Could not identify the ' + peaks + ' file; correctly specify file location or use special variable {COMBINED,EACH},{ALL,SELF} for peaks')
if(p.go == "yaml"):
# Get samples
samples = samplesHelper(keep_samples, ignore_samples, p.hicprooutput, logf)
click.echo(gettime() + "Determined that the following samples are good to go: ", logf)
click.echo(samples, logf)
# Set up peaks files if necessary; moreover, specify vector of peaks / sample
click.echo(gettime() + "User defined peaks specification: " + peaks, logf)
peakfilespersample = peakHelper(p.peaks, p.hicprooutput, p.resfrags, halfLength, peak_pad, out, samples,
Rscript, skip_resfrag_pad, skip_background_correction,
logf, macs2_string, macs2_genome, script_dir, no_merge_str)
logf.close()
is_background_sample = "false"
which_background_sample = ""
# Call putative interactions
for i in range(len(samples)):
hichipperRun = os.path.join(script_dir, 'interactionsCall.sh')
cmd = ['bash', hichipperRun, cwd, out, p.hicprooutput, samples[i], peakfilespersample[i], min_dist, max_dist, merge_gap, str(halfLength), ucscoutput, no_merge_str, is_background_sample, which_background_sample]
call(cmd)
if not os.path.isfile(out + "/" + samples[i] + ".stat"):
sys.exit('ERROR: something failed at the individual sample level; check the .log file for more info')
# Examine control samples
if(background_hicpro_dir != "" and background_sample != ""):
click.echo(gettime() + "NEW: Attempting to process additional background samples: " + background_sample)
# Call putative interactions
for i in range(len(samples)):
hichipperRun = os.path.join(script_dir, 'interactionsCall.sh')
is_background_sample = "true"
cmd = ['bash', hichipperRun, cwd, out, background_hicpro_dir, background_sample, peakfilespersample[i], min_dist, max_dist, merge_gap, str(halfLength), ucscoutput, no_merge_str, is_background_sample, samples[i]]
call(cmd)
if not os.path.isfile(out + "/background_" + samples[i] + ".stat"):
sys.exit('ERROR: something failed at the individual sample level; check the .log file for more info')
# Merge the background and foreground samples
cmdRmerge = [Rscript, os.path.join(script_dir, 'background_table_assemble.R'), cwd, out, samples[i]]
call(cmdRmerge)
else:
# do the new implementation for `call`
if(os.path.isfile(input_vi)):
click.echo(gettime() + "Verified valid interations file: %s" % input_vi, logf)
else:
sys.exit('ERROR: in `call` mode, specify `--input-vi` for valid interactions file.')
samples = ["one"]
hicprooutput = ""
# Most of this isn't needed but we'll call it for simplicity with the other parameters
peakfilespersample = peakHelper(p.peaks, hicprooutput, p.resfrags, halfLength, peak_pad, out, samples,
Rscript, skip_resfrag_pad, skip_background_correction,
logf, macs2_string, macs2_genome, script_dir, no_merge_str)
click.echo(gettime() + "Pulling interaction PETs from valid interactions file (rather than full HiC-pro output): " + peaks, logf)
logf.close()
hichipperRunFrags = os.path.join(script_dir, 'interactionsCall_inputFrags.sh')
i = 0
cmd = ['bash', hichipperRunFrags, cwd, out, input_vi, samples[i], peakfilespersample[i], min_dist, max_dist, merge_gap, str(halfLength), ucscoutput, no_merge_str]
call(cmd)
if not os.path.isfile(out + "/" + samples[i] + ".stat"):
sys.exit('ERROR: something failed at the individual sample level; check the .log file for more info')
# Back to Python
logf = open(out + "/" + out + ".hichipper.log", 'a')
# QC Report
if basic_qc:
click.echo(gettime() + "Skipping QC report generation since --skip-qc was specified", logf)
else:
click.echo(gettime() + "Creating QC report", logf)
cftg = ' '.join(samples)
call(['cp', os.path.join(script_dir, 'qcReport_make.Rmd'), out + '/qcReport_make.Rmd'])
call(['cp', os.path.join(script_dir, 'qcReport.R'), out + '/qcReport.R'])
cmd = [Rscript, out + '/qcReport.R', script_dir, str(out), cwd, __version__ , cftg]
click.echo(gettime())
click.echo(cmd)
call(cmd)
# diffloop work
if skip_diffloop:
click.echo(gettime() + "Skipping diffloop analyses since --skip-diffloop was specified", logf)
else:
click.echo(gettime() + "Creating .rds and .mango files", logf)
cftg = ' '.join(samples)
cmd = [Rscript, os.path.join(script_dir, 'diffloop_work.R'), cwd, out, cftg]
call(cmd)
# Temporary File Management
if keep_temp_files:
click.echo(gettime() + "Temporary files not deleted since --keep-temp-files was specified", logf)
else:
click.echo(gettime() + "Deleting temporary files", logf)
files = os.listdir(outfolder)
for file in files:
if file.endswith(".tmp"):
os.remove(os.path.join(outfolder,file))
elif "_temporary_" in file:
os.remove(os.path.join(outfolder,file))
elif "qcReport" in file:
os.remove(os.path.join(outfolder,file))
click.echo(gettime() + "Done", logf)
logf.close()
| {
"content_hash": "6151356b1aef0dae503e17249c518f95",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 223,
"avg_line_length": 46.173758865248224,
"alnum_prop": 0.6872744028876431,
"repo_name": "aryeelab/hichipper",
"id": "842c96824c05f1c8acb53cba3bd3394eda09a882",
"size": "13021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hichipper/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7849384"
},
{
"name": "Python",
"bytes": "37767"
},
{
"name": "R",
"bytes": "11108"
},
{
"name": "Shell",
"bytes": "19541"
}
],
"symlink_target": ""
} |
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import io
import os
import re
import unittest
import tempfile
from os.path import join, dirname, isfile
from pygments.util import StringIO
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter, NullFormatter
from pygments.formatters.html import escape_html
import support
TESTFILE, TESTDIR = support.location(__file__)
with io.open(TESTFILE, encoding='utf-8') as fp:
tokensource = list(PythonLexer().get_tokens(fp.read()))
class HtmlFormatterTest(unittest.TestCase):
def test_correct_output(self):
hfmt = HtmlFormatter(nowrap=True)
houtfile = StringIO()
hfmt.format(tokensource, houtfile)
nfmt = NullFormatter()
noutfile = StringIO()
nfmt.format(tokensource, noutfile)
stripped_html = re.sub('<.*?>', '', houtfile.getvalue())
escaped_text = escape_html(noutfile.getvalue())
self.assertEqual(stripped_html, escaped_text)
def test_external_css(self):
# test correct behavior
# CSS should be in /tmp directory
fmt1 = HtmlFormatter(full=True, cssfile='fmt1.css', outencoding='utf-8')
# CSS should be in TESTDIR (TESTDIR is absolute)
fmt2 = HtmlFormatter(full=True, cssfile=join(TESTDIR, 'fmt2.css'),
outencoding='utf-8')
tfile = tempfile.NamedTemporaryFile(suffix='.html')
fmt1.format(tokensource, tfile)
try:
fmt2.format(tokensource, tfile)
self.assertTrue(isfile(join(TESTDIR, 'fmt2.css')))
except IOError:
# test directory not writable
pass
tfile.close()
self.assertTrue(isfile(join(dirname(tfile.name), 'fmt1.css')))
os.unlink(join(dirname(tfile.name), 'fmt1.css'))
try:
os.unlink(join(TESTDIR, 'fmt2.css'))
except OSError:
pass
def test_all_options(self):
def check(optdict):
outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
for optdict in [
dict(nowrap=True),
dict(linenos=True, full=True),
dict(linenos=True, linespans='L'),
dict(hl_lines=[1, 5, 10, 'xxx']),
dict(hl_lines=[1, 5, 10], noclasses=True),
]:
check(optdict)
for linenos in [False, 'table', 'inline']:
for noclasses in [False, True]:
for linenospecial in [0, 5]:
for anchorlinenos in [False, True]:
optdict = dict(
linenos=linenos,
noclasses=noclasses,
linenospecial=linenospecial,
anchorlinenos=anchorlinenos,
)
check(optdict)
def test_linenos(self):
optdict = dict(linenos=True)
outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
self.assertTrue(re.search("<pre>\s+1\s+2\s+3", html))
def test_linenos_with_startnum(self):
optdict = dict(linenos=True, linenostart=5)
outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
self.assertTrue(re.search("<pre>\s+5\s+6\s+7", html))
def test_lineanchors(self):
optdict = dict(lineanchors="foo")
outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
self.assertTrue(re.search("<pre><a name=\"foo-1\">", html))
def test_lineanchors_with_startnum(self):
optdict = dict(lineanchors="foo", linenostart=5)
outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
self.assertTrue(re.search("<pre><a name=\"foo-5\">", html))
def test_valid_output(self):
# test all available wrappers
fmt = HtmlFormatter(full=True, linenos=True, noclasses=True,
outencoding='utf-8')
handle, pathname = tempfile.mkstemp('.html')
tfile = os.fdopen(handle, 'w+b')
fmt.format(tokensource, tfile)
tfile.close()
catname = os.path.join(TESTDIR, 'dtds', 'HTML4.soc')
try:
import subprocess
po = subprocess.Popen(['nsgmls', '-s', '-c', catname, pathname],
stdout=subprocess.PIPE)
ret = po.wait()
output = po.stdout.read()
po.stdout.close()
except OSError:
# nsgmls not available
pass
else:
if ret:
print(output)
self.assertFalse(ret, 'nsgmls run reported errors')
os.unlink(pathname)
def test_get_style_defs(self):
fmt = HtmlFormatter()
sd = fmt.get_style_defs()
self.assertTrue(sd.startswith('.'))
fmt = HtmlFormatter(cssclass='foo')
sd = fmt.get_style_defs()
self.assertTrue(sd.startswith('.foo'))
sd = fmt.get_style_defs('.bar')
self.assertTrue(sd.startswith('.bar'))
sd = fmt.get_style_defs(['.bar', '.baz'])
fl = sd.splitlines()[0]
self.assertTrue('.bar' in fl and '.baz' in fl)
def test_unicode_options(self):
fmt = HtmlFormatter(title=u'Föö',
cssclass=u'bär',
cssstyles=u'div:before { content: \'bäz\' }',
encoding='utf-8')
handle, pathname = tempfile.mkstemp('.html')
tfile = os.fdopen(handle, 'w+b')
fmt.format(tokensource, tfile)
tfile.close()
def test_ctags(self):
try:
import ctags
except ImportError:
# we can't check without the ctags module, but at least check the exception
self.assertRaises(RuntimeError, HtmlFormatter, tagsfile='support/tags')
else:
# this tagfile says that test_ctags() is on line 165, even if it isn't
# anymore in the actual source
fmt = HtmlFormatter(tagsfile='support/tags', lineanchors='L',
tagurlformat='%(fname)s%(fext)s')
outfile = StringIO()
fmt.format(tokensource, outfile)
self.assertTrue('<a href="test_html_formatter.py#L-165">test_ctags</a>'
in outfile.getvalue())
def test_filename(self):
optdict = dict(filename="test.py")
outfile = StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
self.assertTrue(re.search("<span class=\"filename\">test.py</span><pre>", html))
| {
"content_hash": "08ec3506e3bbfa4d20ed61ca43547570",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 88,
"avg_line_length": 35.4228855721393,
"alnum_prop": 0.5655898876404495,
"repo_name": "hacksterio/pygments.rb",
"id": "567de51f2cfa2ab6bb56f9474e1977b3b602c9d3",
"size": "7148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor/pygments-main/tests/test_html_formatter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "99523"
},
{
"name": "Python",
"bytes": "26555"
},
{
"name": "Ruby",
"bytes": "30390"
}
],
"symlink_target": ""
} |
import pytest
from scripttest import TestFileEnvironment as FileEnvironment
@pytest.fixture
def env():
return FileEnvironment()
def test_no_arguments(env):
res = env.run("gig", expect_error=True)
assert res.returncode == 1
assert "Usage:" in res.stderr
def test_single_language(env):
res = env.run("gig", "Python")
assert res.returncode == 0
assert "### Python ###" in res.stdout
def test_multiple_languages(env):
res = env.run("gig", "Python", "Ruby")
assert "*.py[cod]" in res.stdout
assert "*.gem" in res.stdout
assert "### Python ###" in res.stdout
assert "### Ruby ###" in res.stdout
def test_generate_global_gitignore(env):
res = env.run("gig", "Windows", "macOS", "--global")
assert "Thumbs.db" in res.stdout
assert ".DS_Store" in res.stdout
assert "### Windows ###" in res.stdout
assert "### macOS ###" in res.stdout
def test_no_header(env):
res = env.run("gig", "Python", "--no-header")
assert "Generated by gig" not in res.stdout
def test_error(env):
res = env.run("gig", "notfound", expect_error=True)
assert res.returncode == 1
| {
"content_hash": "1056891d95368be3c329cf83cbc644f6",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 61,
"avg_line_length": 25.288888888888888,
"alnum_prop": 0.6362038664323374,
"repo_name": "sloria/gig",
"id": "903c3d864ec560c50b7a4fc0b6b4da78b8474024",
"size": "1138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_gig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4429"
}
],
"symlink_target": ""
} |
from typing import Iterable
__all__ = [
'chunkify',
'chunkify_iterable',
]
def chunkify(rg, n_max=1000):
"""
generates sublists of at most n_max
elements from the provided array
:param rg: list
:param n_max: the maximum sublist length
:return:
"""
m, n_len = 0, len(rg)
while m < n_len:
l, m = m, m + n_max
yield rg[l:m]
def chunkify_iterable(what: Iterable, n_max: int = 1000):
"""
generates sublists of at most n_max
elements from the provided iterable
"""
n_max = n_max or 1000
chunk = []
for x in what:
chunk.append(x)
if len(chunk) == n_max:
yield chunk
chunk = []
if chunk:
yield chunk
| {
"content_hash": "0059c4824cca7ba8202fcb7402f7e37f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 57,
"avg_line_length": 19.44736842105263,
"alnum_prop": 0.5480378890392422,
"repo_name": "2ps/djenga",
"id": "ff3574b1707ee7ac85cea068dbe04e1bbd83130f",
"size": "739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djenga/utils/list_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "314"
},
{
"name": "Python",
"bytes": "141564"
}
],
"symlink_target": ""
} |
"""
AllinkBasePluginLoadMoreView is going to bes testet in test_plugins.py
"""
# from __future__ import unicode_literals
#
# from django.core.urlresolvers import reverse
# from django.http import Http404
# from django.test.client import RequestFactory
#
# from cms.utils.i18n import force_language
#
#
# from . import DefaultApphookMixin, BaseWorkTest, CMSRequestBasedTest
#
#
# class TestMainListView(BaseWorkTest, CMSRequestBasedTest):
#
# def test_list_view_with_only_en_apphook(self):
# page = self.create_apphook_page(multilang=False)
# # give some time for apphook reload middleware
# self.client.get(page.get_absolute_url())
#
# self.set_default_work_objects_current_language('en')
# with force_language('en'):
# url = page.get_absolute_url()
# work1_url = self.work1.get_absolute_url()
# response = self.client.get(url)
# self.assertEqual(response.status_code, 200)
# self.assertContains(response, self.work1.title)
# self.assertContains(response, work1_url)
# # should not contain person 2 since page for 'de' language is
# # not published
# self.assertNotContains(response, self.work2.title)
# self.assertNotContains(response, self.work2.slug)
#
# def test_list_view_with_en_and_de_apphook(self):
# page = self.create_apphook_page(multilang=True)
# # give some time for apphook reload middleware
# self.client.get(page.get_absolute_url())
# self.set_default_work_objects_current_language('en')
# with force_language('en'):
# url = page.get_absolute_url()
# work1_url = self.work1.get_absolute_url()
# work2_url = self.work2.get_absolute_url()
# response = self.client.get(url)
# self.assertEqual(response.status_code, 200)
# self.assertContains(response, self.work1.title)
# self.assertContains(response, self.work2.title)
# self.assertContains(response, work1_url)
# self.assertContains(response, work2_url)
| {
"content_hash": "12f8d3ae6bf127537d39cb98952222fe",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 71,
"avg_line_length": 40.78431372549019,
"alnum_prop": 0.6615384615384615,
"repo_name": "allink/allink-apps",
"id": "9347da4d63a80431285671f23d79bdc6c76a2b30",
"size": "2109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "work/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "994"
},
{
"name": "HTML",
"bytes": "47533"
},
{
"name": "Python",
"bytes": "183917"
}
],
"symlink_target": ""
} |
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
DEBUG=True,
USE_TZ=True,
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"account",
"account.tests",
],
MIDDLEWARE_CLASSES=[
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.auth.middleware.SessionAuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="account.tests.urls",
SECRET_KEY="notasecret",
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert your TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
)
def runtests(*test_args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
try:
from django.test.runner import DiscoverRunner
runner_class = DiscoverRunner
test_args = ["account.tests"]
except ImportError:
from django.test.simple import DjangoTestSuiteRunner
runner_class = DjangoTestSuiteRunner
test_args = ["tests"]
failures = runner_class(verbosity=1, interactive=True, failfast=False).run_tests(test_args)
sys.exit(failures)
if __name__ == "__main__":
runtests(*sys.argv[1:])
| {
"content_hash": "4398156cd091dc86864ab979434b75a0",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 95,
"avg_line_length": 29.988095238095237,
"alnum_prop": 0.5843588725684795,
"repo_name": "jpotterm/django-user-accounts",
"id": "103f3e2417c8ac532755b57ccd7dac6bfe87c2ce",
"size": "2541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "112"
},
{
"name": "Python",
"bytes": "188954"
}
],
"symlink_target": ""
} |
import unittest
import io
from ppci.binutils.archive import archive, get_archive
from ppci.binutils.linker import link
from ppci.binutils.objectfile import ObjectFile
from ppci.api import get_arch
class ArchiveFileTestCase(unittest.TestCase):
def test_save_load(self):
""" Test some simple code paths of save and load. """
lib = archive([])
f = io.StringIO()
lib.save(f)
f2 = io.StringIO(f.getvalue())
lib2 = get_archive(f2)
self.assertTrue(lib2)
def test_linking(self):
""" Test pull in of undefined symbols from libraries. """
arch = get_arch('msp430')
obj1 = ObjectFile(arch)
obj1.create_section('foo')
obj1.add_symbol(0, 'printf', 'global', None, None, 'func', 0) # undefined
obj2 = ObjectFile(arch)
obj3 = ObjectFile(arch)
obj3.create_section('foo')
obj3.add_symbol(0, 'syscall', 'global', 0, 'foo', 'func', 0) # defined
lib1 = archive([obj2, obj3])
obj4 = ObjectFile(arch)
obj4.create_section('foo')
obj4.add_symbol(0, 'putc', 'global', 0, 'foo', 'func', 0) # defined
obj4.add_symbol(1, 'syscall', 'global', None, None, 'func', 0) # undefined
obj5 = ObjectFile(arch)
obj5.create_section('foo')
obj5.add_symbol(0, 'printf', 'global', 0, 'foo', 'func', 0) # defined
obj5.add_symbol(1, 'putc', 'global', None, None, 'func', 0) # undefined
lib2 = archive([obj4, obj5])
obj = link([obj1], libraries=[lib1, lib2])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "450c516a7cfa3090b4c04302f576d9e9",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 83,
"avg_line_length": 34.04255319148936,
"alnum_prop": 0.591875,
"repo_name": "windelbouwman/ppci-mirror",
"id": "3ecddc8485ef28c87f9544c15622c868413af33c",
"size": "1600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/binutils/test_archive.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "94"
},
{
"name": "Brainfuck",
"bytes": "5867"
},
{
"name": "C",
"bytes": "229265"
},
{
"name": "C++",
"bytes": "1257"
},
{
"name": "Coq",
"bytes": "98028"
},
{
"name": "HTML",
"bytes": "363"
},
{
"name": "JavaScript",
"bytes": "2165"
},
{
"name": "LLVM",
"bytes": "11206"
},
{
"name": "Python",
"bytes": "2991165"
},
{
"name": "Shell",
"bytes": "960"
},
{
"name": "Verilog",
"bytes": "9363"
}
],
"symlink_target": ""
} |
import types
from django.conf import settings
from django.test.testcases import TestCase, SimpleTestCase
from germanium.signals import set_up, set_up_class, tear_down, tear_down_class
from germanium.config import TEST_ALL_DATABASES
class GermaniumSimpleTestCaseMixin:
if TEST_ALL_DATABASES:
databases = list(settings.DATABASES.keys())
@classmethod
def setUpClass(cls):
set_up_class.send(sender=cls)
super().setUpClass()
cls.set_up_class()
@classmethod
def set_up_class(cls):
pass
@classmethod
def tearDownClass(cls):
tear_down_class.send(sender=cls)
super().tearDownClass()
cls.tear_down_class()
@classmethod
def tear_down_class(cls):
pass
def setUp(self):
set_up.send(sender=self.__class__)
super().setUp()
self.set_up()
def set_up(self):
pass
def tearDown(self):
tear_down.send(sender=self.__class__)
super().tearDown()
self.tear_down()
def tear_down(self):
pass
class GermaniumTestCaseMixin(GermaniumSimpleTestCaseMixin):
if getattr(settings, 'GERMANIUM_FIXTURES', None):
fixtures = getattr(settings, 'GERMANIUM_FIXTURES', None)
class GermaniumTestCase(GermaniumTestCaseMixin, TestCase):
pass
class GermaniumSimpleTestCase(GermaniumSimpleTestCaseMixin, SimpleTestCase):
pass
| {
"content_hash": "92539b6efbf2479d97b7fa124f1f1cab",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 78,
"avg_line_length": 22.317460317460316,
"alnum_prop": 0.6650071123755334,
"repo_name": "druids/germanium",
"id": "f1041061b12bef9af8e85d054a9e3e4c83c3dbfb",
"size": "1406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "germanium/test_cases/default.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60890"
}
],
"symlink_target": ""
} |
import os
import subprocess
from os.path import isfile, join
binary_path = "/home/kivan/source/cv-stereo/build/sgm_cnn/release/sgm_cnn"
#binary_path = "/home/kivan/source/cv-stereo/build/sgm_cnn_new/release/sgm"
#data_folder = "/home/kivan/source/deep-metric-learning/output/results/ThuJun1122:16:492015/results/"
#out_folder = "/home/kivan/source/deep-metric-learning/output/results/ThuJun1122:16:492015/results/depth/"
#data_dir = "/home/kivan/source/deep-metric-learning/output/results/ThuJun1800:22:462015/results/"
#data_dir = "/home/kivan/source/deep-metric-learning/output/results/ThuJun1822:33:402015/results/"
#data_dir = '/home/kivan/source/deep-metric-learning/output/results/FriJun1900:57:092015/results/'
#data_dir = '/home/kivan/source/deep-metric-learning/output/results/ThuSep316:10:232015/results/test/'
#data_dir = '/home/kivan/source/deep-metric-learning/output/learned_models/WedSep913:32:102015/results/test/'
#data_dir = '/home/kivan/source/deep-metric-learning/output/learned_models/FriSep1100:25:162015/results/train/'
#data_dir = '/home/kivan/source/deep-metric-learning/output/learned_models/Fri11Sep201503:21:51PMCEST/results/train'
#data_dir = '/home/kivan/source/deep-metric-learning/output/results/Wed23Sep201510:35:12PMCEST/'
#data_dir = '/home/kivan/source/deep-metric-learning/output/learned_models/Wed23Sep201510:35:12PMCEST/results/train/'
data_dir = '/home/kivan/source/deep-metric-learning/output/learned_models/ThuSep2414:54:062015/results/train/'
data_folder = data_dir + "/representation/"
out_folder = data_dir + "/depth/"
if not os.path.exists(out_folder):
os.makedirs(out_folder)
os.makedirs(out_folder + "/disparities/")
os.makedirs(out_folder + "/norm_hist/")
os.makedirs(out_folder + "/interpolation/")
else:
print("WARNING: path exists - ", out_folder)
filelist = [f for f in os.listdir(data_folder) if isfile(join(data_folder,f)) and "left" in f]
for filename in filelist:
print(filename)
prefix = filename[:9]
#num_str = "%06d" % (i)
#num_str = "%010d" % (i)
img_left = data_folder + filename
img_right = data_folder + prefix + "_right.bin"
subprocess.call([binary_path, img_left, img_right, out_folder, prefix, "3", "60"])
#subprocess.call([binary_path, img_left, img_right, out_folder, prefix, "1", "32"])
#subprocess.call([binary_path, img_left, img_right, out_folder, "3", "60", "1"])
| {
"content_hash": "9e8e4910520c961467b4554d7fcc81c0",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 117,
"avg_line_length": 55.81395348837209,
"alnum_prop": 0.7325,
"repo_name": "ivankreso/stereo-vision",
"id": "7d11793efe207e8ff8db747e7d54d857320e329f",
"size": "2418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/run_sgm_batch_cnn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "210223"
},
{
"name": "C++",
"bytes": "3004180"
},
{
"name": "CMake",
"bytes": "92212"
},
{
"name": "Makefile",
"bytes": "2249"
},
{
"name": "Matlab",
"bytes": "10414"
},
{
"name": "Python",
"bytes": "237661"
},
{
"name": "Shell",
"bytes": "123307"
}
],
"symlink_target": ""
} |
import argparse
import stevedore
exts = stevedore.ExtensionManager(namespace='greeting_apps')
funs = exts.names()
parser = argparse.ArgumentParser()
parser.add_argument('command', metavar='C')
args = parser.parse_args()
try:
exts[args.command].plugin()
except:
print("Command %s not found, valid commands are: %s" % (args.command, funs))
| {
"content_hash": "065be112c73282411653739b33f9e287",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 80,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.7257142857142858,
"repo_name": "michaelgugino/openstack-example",
"id": "501072cab528e4ea27de2ae85d4f0a69304e4f6c",
"size": "350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2950"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import AdminSite
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.views import redirect_to_login
from django.shortcuts import resolve_url
from django.utils.http import is_safe_url
from .models import PhoneDevice
from .utils import monkeypatch_method
class AdminSiteOTPRequiredMixin(object):
"""
Mixin for enforcing OTP verified staff users.
Custom admin views should either be wrapped using :meth:`admin_view` or
use :meth:`has_permission` in order to secure those views.
"""
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
if not super(AdminSiteOTPRequiredMixin, self).has_permission(request):
return False
return request.user.is_verified()
def login(self, request, extra_context=None):
"""
Redirects to the site login page for the given HttpRequest.
"""
redirect_to = request.POST.get(REDIRECT_FIELD_NAME, request.GET.get(REDIRECT_FIELD_NAME))
if not redirect_to or not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to_login(redirect_to)
class AdminSiteOTPRequired(AdminSiteOTPRequiredMixin, AdminSite):
"""
AdminSite enforcing OTP verified staff users.
"""
pass
def patch_admin():
@monkeypatch_method(AdminSite)
def login(self, request, extra_context=None):
"""
Redirects to the site login page for the given HttpRequest.
"""
redirect_to = request.POST.get(REDIRECT_FIELD_NAME, request.GET.get(REDIRECT_FIELD_NAME))
if not redirect_to or not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to_login(redirect_to)
def unpatch_admin():
setattr(AdminSite, 'login', original_login)
original_login = AdminSite.login
class PhoneDeviceAdmin(admin.ModelAdmin):
"""
:class:`~django.contrib.admin.ModelAdmin` for
:class:`~two_factor.models.PhoneDevice`.
"""
raw_id_fields = ('user',)
admin.site.register(PhoneDevice, PhoneDeviceAdmin)
| {
"content_hash": "f4a3152745b16ac20c6337f8414c22c5",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 97,
"avg_line_length": 30.487179487179485,
"alnum_prop": 0.6938603868797308,
"repo_name": "koleror/django-two-factor-auth",
"id": "60a46f7143119e2990c6a9b8691237617f09027c",
"size": "2378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "two_factor/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "20913"
},
{
"name": "Makefile",
"bytes": "955"
},
{
"name": "Python",
"bytes": "140230"
}
],
"symlink_target": ""
} |
from eventlet import greenthread
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as os_db_exception
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import importutils
from sqlalchemy import exc as sql_exc
from sqlalchemy.orm import exc as sa_exc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.api.rpc.handlers import metadata_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.api.v2 import attributes
from neutron.callbacks import events
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as const
from neutron.common import exceptions as exc
from neutron.common import ipv6_utils
from neutron.common import log as neutron_log
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2
from neutron.db import dvr_mac_db
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import models_v2
from neutron.db import netmtu_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.db import vlantransparent_db
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import portbindings
from neutron.extensions import portsecurity as psec
from neutron.extensions import providernet as provider
from neutron.extensions import vlantransparent
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import config # noqa
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2 import managers
from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import rpc
LOG = log.getLogger(__name__)
MAX_BIND_TRIES = 10
class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
dvr_mac_db.DVRDbMixin,
external_net_db.External_net_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
addr_pair_db.AllowedAddressPairsMixin,
vlantransparent_db.Vlantransparent_db_mixin,
extradhcpopt_db.ExtraDhcpOptMixin,
netmtu_db.Netmtu_db_mixin):
"""Implement the Neutron L2 abstractions using modules.
Ml2Plugin is a Neutron plugin based on separately extensible sets
of network types and mechanisms for connecting to networks of
those types. The network types and mechanisms are implemented as
drivers loaded via Python entry points. Networks can be made up of
multiple segments (not yet fully implemented).
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
# List of supported extensions
_supported_extension_aliases = ["provider", "external-net", "binding",
"quotas", "security-group", "agent",
"dhcp_agent_scheduler",
"multi-provider", "allowed-address-pairs",
"extra_dhcp_opt", "subnet_allocation",
"net-mtu", "vlan-transparent"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
aliases += self.extension_manager.extension_aliases()
sg_rpc.disable_security_group_extension_by_config(aliases)
vlantransparent.disable_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
# First load drivers, then initialize DB, then initialize drivers
self.type_manager = managers.TypeManager()
self.extension_manager = managers.ExtensionManager()
self.mechanism_manager = managers.MechanismManager()
super(Ml2Plugin, self).__init__()
self.type_manager.initialize()
self.extension_manager.initialize()
self.mechanism_manager.initialize()
self._setup_rpc()
self._setup_dhcp()
LOG.info(_LI("Modular L2 Plugin initialization complete"))
def _setup_rpc(self):
"""Initialize components to support agent communication."""
self.notifier = rpc.AgentNotifierApi(topics.AGENT)
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
self.endpoints = [
rpc.RpcCallbacks(self.notifier, self.type_manager),
securitygroups_rpc.SecurityGroupServerRpcCallback(),
dvr_rpc.DVRServerRpcCallback(),
dhcp_rpc.DhcpRpcCallback(),
agents_db.AgentExtRpcCallback(),
metadata_rpc.MetadataRpcCallback()
]
def _setup_dhcp(self):
"""Initialize components to support DHCP."""
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.start_periodic_dhcp_agent_status_check()
@neutron_log.log
def start_rpc_listeners(self):
"""Start the RPC loop to let the plugin communicate with agents."""
self.topic = topics.PLUGIN
self.conn = n_rpc.create_connection(new=True)
self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
return self.conn.consume_in_threads()
def _filter_nets_provider(self, context, networks, filters):
return [network
for network in networks
if self.type_manager.network_matches_filters(network, filters)
]
def _get_host_port_if_changed(self, mech_context, attrs):
binding = mech_context._binding
host = attrs and attrs.get(portbindings.HOST_ID)
if (attributes.is_attr_set(host) and binding.host != host):
return mech_context.current
def _check_mac_update_allowed(self, orig_port, port, binding):
unplugged_types = (portbindings.VIF_TYPE_BINDING_FAILED,
portbindings.VIF_TYPE_UNBOUND)
new_mac = port.get('mac_address')
mac_change = (new_mac is not None and
orig_port['mac_address'] != new_mac)
if (mac_change and binding.vif_type not in unplugged_types):
raise exc.PortBound(port_id=orig_port['id'],
vif_type=binding.vif_type,
old_mac=orig_port['mac_address'],
new_mac=port['mac_address'])
return mac_change
def _process_port_binding(self, mech_context, attrs):
session = mech_context._plugin_context.session
binding = mech_context._binding
port = mech_context.current
port_id = port['id']
changes = False
host = attributes.ATTR_NOT_SPECIFIED
if attrs and portbindings.HOST_ID in attrs:
host = attrs.get(portbindings.HOST_ID) or ''
original_host = binding.host
if (attributes.is_attr_set(host) and
original_host != host):
binding.host = host
changes = True
vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE)
if (attributes.is_attr_set(vnic_type) and
binding.vnic_type != vnic_type):
binding.vnic_type = vnic_type
changes = True
# treat None as clear of profile.
profile = None
if attrs and portbindings.PROFILE in attrs:
profile = attrs.get(portbindings.PROFILE) or {}
if profile not in (None, attributes.ATTR_NOT_SPECIFIED,
self._get_profile(binding)):
binding.profile = jsonutils.dumps(profile)
if len(binding.profile) > models.BINDING_PROFILE_LEN:
msg = _("binding:profile value too large")
raise exc.InvalidInput(error_message=msg)
changes = True
# Unbind the port if needed.
if changes:
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.vif_details = ''
db.clear_binding_levels(session, port_id, original_host)
mech_context._clear_binding_levels()
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.vif_details = ''
db.clear_binding_levels(session, port_id, original_host)
mech_context._clear_binding_levels()
binding.host = ''
self._update_port_dict_binding(port, binding)
return changes
def _bind_port_if_needed(self, context, allow_notify=False,
need_notify=False):
plugin_context = context._plugin_context
port_id = context.current['id']
# Since the mechanism driver bind_port() calls must be made
# outside a DB transaction locking the port state, it is
# possible (but unlikely) that the port's state could change
# concurrently while these calls are being made. If another
# thread or process succeeds in binding the port before this
# thread commits its results, the already committed results are
# used. If attributes such as binding:host_id,
# binding:profile, or binding:vnic_type are updated
# concurrently, this loop retries binding using the new
# values.
count = 0
while True:
# First, determine whether it is necessary and possible to
# bind the port.
binding = context._binding
if (binding.vif_type != portbindings.VIF_TYPE_UNBOUND
or not binding.host):
# We either don't need to bind the port, or can't, so
# notify if needed and return.
if allow_notify and need_notify:
self._notify_port_updated(context)
return context
# Limit binding attempts to avoid any possibility of
# infinite looping and to ensure an error is logged
# instead. This does not need to be tunable because no
# more than a couple attempts should ever be required in
# normal operation. Log at info level if not 1st attempt.
count += 1
if count > MAX_BIND_TRIES:
LOG.error(_LE("Failed to commit binding results for %(port)s "
"after %(max)s tries"),
{'port': port_id, 'max': MAX_BIND_TRIES})
return context
if count > 1:
greenthread.sleep(0) # yield
LOG.info(_LI("Attempt %(count)s to bind port %(port)s"),
{'count': count, 'port': port_id})
# The port isn't already bound and the necessary
# information is available, so attempt to bind the port.
bind_context = self._bind_port(context)
# Now try to commit result of attempting to bind the port.
new_context, did_commit = self._commit_port_binding(
plugin_context, port_id, binding, bind_context)
if not new_context:
# The port has been deleted concurrently, so just
# return the unbound result from the initial
# transaction that completed before the deletion.
LOG.debug("Port %s has been deleted concurrently",
port_id)
return context
# Need to notify if we succeed and our results were
# committed.
if did_commit and (new_context._binding.vif_type !=
portbindings.VIF_TYPE_BINDING_FAILED):
need_notify = True
context = new_context
def _bind_port(self, orig_context):
# Construct a new PortContext from the one from the previous
# transaction.
port = orig_context.current
orig_binding = orig_context._binding
new_binding = models.PortBinding(
host=orig_binding.host,
vnic_type=orig_binding.vnic_type,
profile=orig_binding.profile,
vif_type=portbindings.VIF_TYPE_UNBOUND,
vif_details=''
)
self._update_port_dict_binding(port, new_binding)
new_context = driver_context.PortContext(
self, orig_context._plugin_context, port,
orig_context.network.current, new_binding, None)
# Attempt to bind the port and return the context with the
# result.
self.mechanism_manager.bind_port(new_context)
return new_context
def _commit_port_binding(self, plugin_context, port_id, orig_binding,
new_context):
session = plugin_context.session
new_binding = new_context._binding
# After we've attempted to bind the port, we begin a
# transaction, get the current port state, and decide whether
# to commit the binding results.
#
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with lockutils.lock('db-access'),\
session.begin(subtransactions=True):
# Get the current port state and build a new PortContext
# reflecting this state as original state for subsequent
# mechanism driver update_port_*commit() calls.
port_db, cur_binding = db.get_locked_port_and_binding(session,
port_id)
if not port_db:
# The port has been deleted concurrently.
return (None, None)
oport = self._make_port_dict(port_db)
port = self._make_port_dict(port_db)
network = new_context.network.current
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
# REVISIT(rkukura): The PortBinding instance from the
# ml2_port_bindings table, returned as cur_binding
# from db.get_locked_port_and_binding() above, is
# currently not used for DVR distributed ports, and is
# replaced here with the DVRPortBinding instance from
# the ml2_dvr_port_bindings table specific to the host
# on which the distributed port is being bound. It
# would be possible to optimize this code to avoid
# fetching the PortBinding instance in the DVR case,
# and even to avoid creating the unused entry in the
# ml2_port_bindings table. But the upcoming resolution
# for bug 1367391 will eliminate the
# ml2_dvr_port_bindings table, use the
# ml2_port_bindings table to store non-host-specific
# fields for both distributed and non-distributed
# ports, and introduce a new ml2_port_binding_hosts
# table for the fields that need to be host-specific
# in the distributed case. Since the PortBinding
# instance will then be needed, it does not make sense
# to optimize this code to avoid fetching it.
cur_binding = db.get_dvr_port_binding_by_host(
session, port_id, orig_binding.host)
cur_context = driver_context.PortContext(
self, plugin_context, port, network, cur_binding, None,
original_port=oport)
# Commit our binding results only if port has not been
# successfully bound concurrently by another thread or
# process and no binding inputs have been changed.
commit = ((cur_binding.vif_type in
[portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED]) and
orig_binding.host == cur_binding.host and
orig_binding.vnic_type == cur_binding.vnic_type and
orig_binding.profile == cur_binding.profile)
if commit:
# Update the port's binding state with our binding
# results.
cur_binding.vif_type = new_binding.vif_type
cur_binding.vif_details = new_binding.vif_details
db.clear_binding_levels(session, port_id, cur_binding.host)
db.set_binding_levels(session, new_context._binding_levels)
cur_context._binding_levels = new_context._binding_levels
# Update PortContext's port dictionary to reflect the
# updated binding state.
self._update_port_dict_binding(port, cur_binding)
# Update the port status if requested by the bound driver.
if (new_context._binding_levels and
new_context._new_port_status):
port_db.status = new_context._new_port_status
port['status'] = new_context._new_port_status
# Call the mechanism driver precommit methods, commit
# the results, and call the postcommit methods.
self.mechanism_manager.update_port_precommit(cur_context)
if commit:
self.mechanism_manager.update_port_postcommit(cur_context)
# Continue, using the port state as of the transaction that
# just finished, whether that transaction committed new
# results or discovered concurrent port state changes.
return (cur_context, commit)
def _update_port_dict_binding(self, port, binding):
port[portbindings.VNIC_TYPE] = binding.vnic_type
port[portbindings.PROFILE] = self._get_profile(binding)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
port[portbindings.HOST_ID] = ''
port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_DISTRIBUTED
port[portbindings.VIF_DETAILS] = {}
else:
port[portbindings.HOST_ID] = binding.host
port[portbindings.VIF_TYPE] = binding.vif_type
port[portbindings.VIF_DETAILS] = self._get_vif_details(binding)
def _get_vif_details(self, binding):
if binding.vif_details:
try:
return jsonutils.loads(binding.vif_details)
except Exception:
LOG.error(_LE("Serialized vif_details DB value '%(value)s' "
"for port %(port)s is invalid"),
{'value': binding.vif_details,
'port': binding.port_id})
return {}
def _get_profile(self, binding):
if binding.profile:
try:
return jsonutils.loads(binding.profile)
except Exception:
LOG.error(_LE("Serialized profile DB value '%(value)s' for "
"port %(port)s is invalid"),
{'value': binding.profile,
'port': binding.port_id})
return {}
def _ml2_extend_port_dict_binding(self, port_res, port_db):
# None when called during unit tests for other plugins.
if port_db.port_binding:
self._update_port_dict_binding(port_res, port_db.port_binding)
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_ml2_extend_port_dict_binding'])
# Register extend dict methods for network and port resources.
# Each mechanism driver that supports extend attribute for the resources
# can add those attribute to the result.
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.NETWORKS, ['_ml2_md_extend_network_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_ml2_md_extend_port_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.SUBNETS, ['_ml2_md_extend_subnet_dict'])
def _ml2_md_extend_network_dict(self, result, netdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_network_dict(session, netdb, result)
def _ml2_md_extend_port_dict(self, result, portdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_port_dict(session, portdb, result)
def _ml2_md_extend_subnet_dict(self, result, subnetdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_subnet_dict(
session, subnetdb, result)
# Note - The following hook methods have "ml2" in their names so
# that they are not called twice during unit tests due to global
# registration of hooks in portbindings_db.py used by other
# plugins.
def _ml2_port_model_hook(self, context, original_model, query):
query = query.outerjoin(models.PortBinding,
(original_model.id ==
models.PortBinding.port_id))
return query
def _ml2_port_result_filter_hook(self, query, filters):
values = filters and filters.get(portbindings.HOST_ID, [])
if not values:
return query
return query.filter(models.PortBinding.host.in_(values))
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Port,
"ml2_port_bindings",
'_ml2_port_model_hook',
None,
'_ml2_port_result_filter_hook')
def _notify_port_updated(self, mech_context):
port = mech_context.current
segment = mech_context.bottom_bound_segment
if not segment:
# REVISIT(rkukura): This should notify agent to unplug port
network = mech_context.network.current
LOG.warning(_LW("In _notify_port_updated(), no bound segment for "
"port %(port_id)s on network %(network_id)s"),
{'port_id': port['id'],
'network_id': network['id']})
return
self.notifier.port_update(mech_context._plugin_context, port,
segment[api.NETWORK_TYPE],
segment[api.SEGMENTATION_ID],
segment[api.PHYSICAL_NETWORK])
def _delete_objects(self, context, resource, objects):
delete_op = getattr(self, 'delete_%s' % resource)
for obj in objects:
try:
delete_op(context, obj['result']['id'])
except KeyError:
LOG.exception(_LE("Could not find %s to delete."),
resource)
except Exception:
LOG.exception(_LE("Could not delete %(res)s %(id)s."),
{'res': resource,
'id': obj['result']['id']})
def _create_bulk_ml2(self, resource, context, request_items):
objects = []
collection = "%ss" % resource
items = request_items[collection]
try:
with context.session.begin(subtransactions=True):
obj_creator = getattr(self, '_create_%s_db' % resource)
for item in items:
attrs = item[resource]
result, mech_context = obj_creator(context, item)
objects.append({'mech_context': mech_context,
'result': result,
'attributes': attrs})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("An exception occurred while creating "
"the %(resource)s:%(item)s"),
{'resource': resource, 'item': item})
try:
postcommit_op = getattr(self.mechanism_manager,
'create_%s_postcommit' % resource)
for obj in objects:
postcommit_op(obj['mech_context'])
return objects
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
resource_ids = [res['result']['id'] for res in objects]
LOG.exception(_LE("mechanism_manager.create_%(res)s"
"_postcommit failed for %(res)s: "
"'%(failed_id)s'. Deleting "
"%(res)ss %(resource_ids)s"),
{'res': resource,
'failed_id': obj['result']['id'],
'resource_ids': ', '.join(resource_ids)})
self._delete_objects(context, resource, objects)
def _create_network_db(self, context, network):
net_data = network[attributes.NETWORK]
tenant_id = self._get_tenant_id_for_create(context, net_data)
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group(context, tenant_id)
result = super(Ml2Plugin, self).create_network(context, network)
self.extension_manager.process_create_network(context, net_data,
result)
self._process_l3_create(context, result, net_data)
net_data['id'] = result['id']
self.type_manager.create_network_segments(context, net_data,
tenant_id)
self.type_manager.extend_network_dict_provider(context, result)
mech_context = driver_context.NetworkContext(self, context,
result)
self.mechanism_manager.create_network_precommit(mech_context)
if net_data.get(api.MTU, 0) > 0:
res = super(Ml2Plugin, self).update_network(context,
result['id'], {'network': {api.MTU: net_data[api.MTU]}})
result[api.MTU] = res.get(api.MTU, 0)
return result, mech_context
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
retry_on_request=True)
def _create_network_with_retries(self, context, network):
return self._create_network_db(context, network)
def create_network(self, context, network):
result, mech_context = self._create_network_with_retries(context,
network)
try:
self.mechanism_manager.create_network_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_network_postcommit "
"failed, deleting network '%s'"), result['id'])
self.delete_network(context, result['id'])
return result
def create_network_bulk(self, context, networks):
objects = self._create_bulk_ml2(attributes.NETWORK, context, networks)
return [obj['result'] for obj in objects]
def update_network(self, context, id, network):
net_data = network[attributes.NETWORK]
provider._raise_if_updates_provider_attributes(net_data)
session = context.session
with session.begin(subtransactions=True):
original_network = super(Ml2Plugin, self).get_network(context, id)
updated_network = super(Ml2Plugin, self).update_network(context,
id,
network)
self.extension_manager.process_update_network(context, net_data,
updated_network)
self._process_l3_update(context, updated_network, net_data)
self.type_manager.extend_network_dict_provider(context,
updated_network)
mech_context = driver_context.NetworkContext(
self, context, updated_network,
original_network=original_network)
self.mechanism_manager.update_network_precommit(mech_context)
# TODO(apech) - handle errors raised by update_network, potentially
# by re-calling update_network with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_network_postcommit(mech_context)
return updated_network
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
result = super(Ml2Plugin, self).get_network(context, id, None)
self.type_manager.extend_network_dict_provider(context, result)
return self._fields(result, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(Ml2Plugin,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self.type_manager.extend_network_dict_provider(context, net)
nets = self._filter_nets_provider(context, nets, filters)
return [self._fields(net, fields) for net in nets]
def _delete_ports(self, context, ports):
for port in ports:
try:
self.delete_port(context, port.id)
except (exc.PortNotFound, sa_exc.ObjectDeletedError):
context.session.expunge(port)
# concurrent port deletion can be performed by
# release_dhcp_port caused by concurrent subnet_delete
LOG.info(_LI("Port %s was deleted concurrently"), port.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Exception auto-deleting port %s"),
port.id)
def _delete_subnets(self, context, subnets):
for subnet in subnets:
try:
self.delete_subnet(context, subnet.id)
except (exc.SubnetNotFound, sa_exc.ObjectDeletedError):
context.session.expunge(subnet)
LOG.info(_LI("Subnet %s was deleted concurrently"),
subnet.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Exception auto-deleting subnet %s"),
subnet.id)
def delete_network(self, context, id):
# REVISIT(rkukura) The super(Ml2Plugin, self).delete_network()
# function is not used because it auto-deletes ports and
# subnets from the DB without invoking the derived class's
# delete_port() or delete_subnet(), preventing mechanism
# drivers from being called. This approach should be revisited
# when the API layer is reworked during icehouse.
LOG.debug("Deleting network %s", id)
session = context.session
while True:
try:
# REVISIT: Serialize this operation with a semaphore
# to prevent deadlock waiting to acquire a DB lock
# held by another thread in the same process, leading
# to 'lock wait timeout' errors.
#
# Process L3 first, since, depending on the L3 plugin, it may
# involve locking the db-access semaphore, sending RPC
# notifications, and/or calling delete_port on this plugin.
# Additionally, a rollback may not be enough to undo the
# deletion of a floating IP with certain L3 backends.
self._process_l3_delete(context, id)
# Using query().with_lockmode isn't necessary. Foreign-key
# constraints prevent deletion if concurrent creation happens.
with lockutils.lock('db-access'),\
session.begin(subtransactions=True):
# Get ports to auto-delete.
ports = (session.query(models_v2.Port).
enable_eagerloads(False).
filter_by(network_id=id).all())
LOG.debug("Ports to auto-delete: %s", ports)
only_auto_del = all(p.device_owner
in db_base_plugin_v2.
AUTO_DELETE_PORT_OWNERS
for p in ports)
if not only_auto_del:
LOG.debug("Tenant-owned ports exist")
raise exc.NetworkInUse(net_id=id)
# Get subnets to auto-delete.
subnets = (session.query(models_v2.Subnet).
enable_eagerloads(False).
filter_by(network_id=id).all())
LOG.debug("Subnets to auto-delete: %s", subnets)
if not (ports or subnets):
network = self.get_network(context, id)
mech_context = driver_context.NetworkContext(self,
context,
network)
self.mechanism_manager.delete_network_precommit(
mech_context)
self.type_manager.release_network_segments(session, id)
record = self._get_network(context, id)
LOG.debug("Deleting network record %s", record)
session.delete(record)
# The segment records are deleted via cascade from the
# network record, so explicit removal is not necessary.
LOG.debug("Committing transaction")
break
except os_db_exception.DBError as e:
with excutils.save_and_reraise_exception() as ctxt:
if isinstance(e.inner_exception, sql_exc.IntegrityError):
ctxt.reraise = False
LOG.warning(_LW("A concurrent port creation has "
"occurred"))
continue
self._delete_ports(context, ports)
self._delete_subnets(context, subnets)
try:
self.mechanism_manager.delete_network_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the network. Ideally we'd notify the caller of
# the fact that an error occurred.
LOG.error(_LE("mechanism_manager.delete_network_postcommit"
" failed"))
self.notifier.network_delete(context, id)
def _create_subnet_db(self, context, subnet):
session = context.session
with session.begin(subtransactions=True):
result = super(Ml2Plugin, self).create_subnet(context, subnet)
self.extension_manager.process_create_subnet(
context, subnet[attributes.SUBNET], result)
mech_context = driver_context.SubnetContext(self, context, result)
self.mechanism_manager.create_subnet_precommit(mech_context)
return result, mech_context
def create_subnet(self, context, subnet):
result, mech_context = self._create_subnet_db(context, subnet)
try:
self.mechanism_manager.create_subnet_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_subnet_postcommit "
"failed, deleting subnet '%s'"), result['id'])
self.delete_subnet(context, result['id'])
return result
def create_subnet_bulk(self, context, subnets):
objects = self._create_bulk_ml2(attributes.SUBNET, context, subnets)
return [obj['result'] for obj in objects]
def update_subnet(self, context, id, subnet):
session = context.session
with session.begin(subtransactions=True):
original_subnet = super(Ml2Plugin, self).get_subnet(context, id)
updated_subnet = super(Ml2Plugin, self).update_subnet(
context, id, subnet)
self.extension_manager.process_update_subnet(
context, subnet[attributes.SUBNET], updated_subnet)
mech_context = driver_context.SubnetContext(
self, context, updated_subnet, original_subnet=original_subnet)
self.mechanism_manager.update_subnet_precommit(mech_context)
# TODO(apech) - handle errors raised by update_subnet, potentially
# by re-calling update_subnet with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_subnet_postcommit(mech_context)
return updated_subnet
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
retry_on_request=True)
def delete_subnet(self, context, id):
# REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet()
# function is not used because it deallocates the subnet's addresses
# from ports in the DB without invoking the derived class's
# update_port(), preventing mechanism drivers from being called.
# This approach should be revisited when the API layer is reworked
# during icehouse.
LOG.debug("Deleting subnet %s", id)
session = context.session
while True:
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock
# wait timeout' errors.
with lockutils.lock('db-access'),\
session.begin(subtransactions=True):
record = self._get_subnet(context, id)
subnet = self._make_subnet_dict(record, None)
qry_allocated = (session.query(models_v2.IPAllocation).
filter_by(subnet_id=id).
join(models_v2.Port))
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
# Remove network owned ports, and delete IP allocations
# for IPv6 addresses which were automatically generated
# via SLAAC
if is_auto_addr_subnet:
self._subnet_check_ip_allocations_internal_router_ports(
context, id)
else:
qry_allocated = (
qry_allocated.filter(models_v2.Port.device_owner.
in_(db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS)))
allocated = qry_allocated.all()
# Delete all the IPAllocation that can be auto-deleted
if allocated:
map(session.delete, allocated)
LOG.debug("Ports to auto-deallocate: %s", allocated)
# Check if there are more IP allocations, unless
# is_auto_address_subnet is True. In that case the check is
# unnecessary. This additional check not only would be wasteful
# for this class of subnet, but is also error-prone since when
# the isolation level is set to READ COMMITTED allocations made
# concurrently will be returned by this query
if not is_auto_addr_subnet:
alloc = self._subnet_check_ip_allocations(context, id)
if alloc:
user_alloc = self._subnet_get_user_allocation(
context, id)
if user_alloc:
LOG.info(_LI("Found port (%(port_id)s, %(ip)s) "
"having IP allocation on subnet "
"%(subnet)s, cannot delete"),
{'ip': user_alloc.ip_address,
'port_id': user_alloc.port_id,
'subnet': id})
raise exc.SubnetInUse(subnet_id=id)
else:
# allocation found and it was DHCP port
# that appeared after autodelete ports were
# removed - need to restart whole operation
raise os_db_exception.RetryRequest(
exc.SubnetInUse(subnet_id=id))
db_base_plugin_v2._check_subnet_not_used(context, id)
# If allocated is None, then all the IPAllocation were
# correctly deleted during the previous pass.
if not allocated:
mech_context = driver_context.SubnetContext(self, context,
subnet)
self.mechanism_manager.delete_subnet_precommit(
mech_context)
LOG.debug("Deleting subnet record")
session.delete(record)
LOG.debug("Committing transaction")
break
for a in allocated:
if a.port_id:
# calling update_port() for each allocation to remove the
# IP from the port and call the MechanismDrivers
data = {attributes.PORT:
{'fixed_ips': [{'subnet_id': ip.subnet_id,
'ip_address': ip.ip_address}
for ip in a.port.fixed_ips
if ip.subnet_id != id]}}
try:
self.update_port(context, a.port_id, data)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Exception deleting fixed_ip "
"from port %s"), a.port_id)
try:
self.mechanism_manager.delete_subnet_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the subnet. Ideally we'd notify the caller of
# the fact that an error occurred.
LOG.error(_LE("mechanism_manager.delete_subnet_postcommit failed"))
# TODO(yalei) - will be simplified after security group and address pair be
# converted to ext driver too.
def _portsec_ext_port_create_processing(self, context, port_data, port):
attrs = port[attributes.PORT]
port_security = ((port_data.get(psec.PORTSECURITY) is None) or
port_data[psec.PORTSECURITY])
# allowed address pair checks
if self._check_update_has_allowed_address_pairs(port):
if not port_security:
raise addr_pair.AddressPairAndPortSecurityRequired()
else:
# remove ATTR_NOT_SPECIFIED
attrs[addr_pair.ADDRESS_PAIRS] = []
if port_security:
self._ensure_default_security_group_on_port(context, port)
elif self._check_update_has_security_groups(port):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
def _create_port_db(self, context, port):
attrs = port[attributes.PORT]
if not attrs.get('status'):
attrs['status'] = const.PORT_STATUS_DOWN
session = context.session
with session.begin(subtransactions=True):
dhcp_opts = attrs.get(edo_ext.EXTRADHCPOPTS, [])
result = super(Ml2Plugin, self).create_port(context, port)
self.extension_manager.process_create_port(context, attrs, result)
self._portsec_ext_port_create_processing(context, result, port)
# sgids must be got after portsec checked with security group
sgids = self._get_security_groups_on_port(context, port)
self._process_port_create_security_group(context, result, sgids)
network = self.get_network(context, result['network_id'])
binding = db.add_port_binding(session, result['id'])
mech_context = driver_context.PortContext(self, context, result,
network, binding, None)
self._process_port_binding(mech_context, attrs)
result[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, result,
attrs.get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, result,
dhcp_opts)
self.mechanism_manager.create_port_precommit(mech_context)
return result, mech_context
def create_port(self, context, port):
attrs = port[attributes.PORT]
result, mech_context = self._create_port_db(context, port)
new_host_port = self._get_host_port_if_changed(mech_context, attrs)
# notify any plugin that is interested in port create events
kwargs = {'context': context, 'port': new_host_port}
registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs)
try:
self.mechanism_manager.create_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_port_postcommit "
"failed, deleting port '%s'"), result['id'])
self.delete_port(context, result['id'])
# REVISIT(rkukura): Is there any point in calling this before
# a binding has been successfully established?
self.notify_security_groups_member_updated(context, result)
try:
bound_context = self._bind_port_if_needed(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("_bind_port_if_needed "
"failed, deleting port '%s'"), result['id'])
self.delete_port(context, result['id'])
return bound_context.current
def create_port_bulk(self, context, ports):
objects = self._create_bulk_ml2(attributes.PORT, context, ports)
# REVISIT(rkukura): Is there any point in calling this before
# a binding has been successfully established?
results = [obj['result'] for obj in objects]
self.notify_security_groups_member_updated_bulk(context, results)
for obj in objects:
attrs = obj['attributes']
if attrs and attrs.get(portbindings.HOST_ID):
new_host_port = self._get_host_port_if_changed(
obj['mech_context'], attrs)
kwargs = {'context': context, 'port': new_host_port}
registry.notify(
resources.PORT, events.AFTER_CREATE, self, **kwargs)
try:
for obj in objects:
obj['bound_context'] = self._bind_port_if_needed(
obj['mech_context'])
return [obj['bound_context'].current for obj in objects]
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
resource_ids = [res['result']['id'] for res in objects]
LOG.error(_LE("_bind_port_if_needed failed. "
"Deleting all ports from create bulk '%s'"),
resource_ids)
self._delete_objects(context, attributes.PORT, objects)
# TODO(yalei) - will be simplified after security group and address pair be
# converted to ext driver too.
def _portsec_ext_port_update_processing(self, updated_port, context, port,
id):
port_security = ((updated_port.get(psec.PORTSECURITY) is None) or
updated_port[psec.PORTSECURITY])
if port_security:
return
# check the address-pairs
if self._check_update_has_allowed_address_pairs(port):
# has address pairs in request
raise addr_pair.AddressPairAndPortSecurityRequired()
elif (not
self._check_update_deletes_allowed_address_pairs(port)):
# not a request for deleting the address-pairs
updated_port[addr_pair.ADDRESS_PAIRS] = (
self.get_allowed_address_pairs(context, id))
# check if address pairs has been in db, if address pairs could
# be put in extension driver, we can refine here.
if updated_port[addr_pair.ADDRESS_PAIRS]:
raise addr_pair.AddressPairAndPortSecurityRequired()
# checks if security groups were updated adding/modifying
# security groups, port security is set
if self._check_update_has_security_groups(port):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
elif (not
self._check_update_deletes_security_groups(port)):
# Update did not have security groups passed in. Check
# that port does not have any security groups already on it.
filters = {'port_id': [id]}
security_groups = (
super(Ml2Plugin, self)._get_port_security_group_bindings(
context, filters)
)
if security_groups:
raise psec.PortSecurityPortHasSecurityGroup()
def update_port(self, context, id, port):
attrs = port[attributes.PORT]
need_port_update_notify = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with lockutils.lock('db-access'),\
session.begin(subtransactions=True):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
raise exc.PortNotFound(port_id=id)
mac_address_updated = self._check_mac_update_allowed(
port_db, attrs, binding)
need_port_update_notify |= mac_address_updated
original_port = self._make_port_dict(port_db)
updated_port = super(Ml2Plugin, self).update_port(context, id,
port)
self.extension_manager.process_update_port(context, attrs,
updated_port)
self._portsec_ext_port_update_processing(updated_port, context,
port, id)
if (psec.PORTSECURITY in attrs) and (
original_port[psec.PORTSECURITY] !=
updated_port[psec.PORTSECURITY]):
need_port_update_notify = True
if addr_pair.ADDRESS_PAIRS in attrs:
need_port_update_notify |= (
self.update_address_pairs_on_port(context, id, port,
original_port,
updated_port))
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
network = self.get_network(context, original_port['network_id'])
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
levels = db.get_binding_levels(session, id, binding.host)
mech_context = driver_context.PortContext(
self, context, updated_port, network, binding, levels,
original_port=original_port)
new_host_port = self._get_host_port_if_changed(mech_context, attrs)
need_port_update_notify |= self._process_port_binding(
mech_context, attrs)
self.mechanism_manager.update_port_precommit(mech_context)
# Notifications must be sent after the above transaction is complete
kwargs = {
'context': context,
'port': new_host_port,
'mac_address_updated': mac_address_updated,
}
registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs)
# TODO(apech) - handle errors raised by update_port, potentially
# by re-calling update_port with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_port_postcommit(mech_context)
self.check_and_notify_security_group_member_changed(
context, original_port, updated_port)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
bound_context = self._bind_port_if_needed(
mech_context,
allow_notify=True,
need_notify=need_port_update_notify)
return bound_context.current
def _process_dvr_port_binding(self, mech_context, context, attrs):
session = mech_context._plugin_context.session
binding = mech_context._binding
port = mech_context.current
port_id = port['id']
if binding.vif_type != portbindings.VIF_TYPE_UNBOUND:
binding.vif_details = ''
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
if binding.host:
db.clear_binding_levels(session, port_id, binding.host)
binding.host = ''
self._update_port_dict_binding(port, binding)
binding.host = attrs and attrs.get(portbindings.HOST_ID)
binding.router_id = attrs and attrs.get('device_id')
def update_dvr_port_binding(self, context, id, port):
attrs = port[attributes.PORT]
host = attrs and attrs.get(portbindings.HOST_ID)
host_set = attributes.is_attr_set(host)
if not host_set:
LOG.error(_LE("No Host supplied to bind DVR Port %s"), id)
return
session = context.session
binding = db.get_dvr_port_binding_by_host(session, id, host)
device_id = attrs and attrs.get('device_id')
router_id = binding and binding.get('router_id')
update_required = (not binding or
binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED or
router_id != device_id)
if update_required:
with session.begin(subtransactions=True):
try:
orig_port = super(Ml2Plugin, self).get_port(context, id)
except exc.PortNotFound:
LOG.debug("DVR Port %s has been deleted concurrently", id)
return
if not binding:
binding = db.ensure_dvr_port_binding(
session, id, host, router_id=device_id)
network = self.get_network(context, orig_port['network_id'])
levels = db.get_binding_levels(session, id, host)
mech_context = driver_context.PortContext(self,
context, orig_port, network,
binding, levels, original_port=orig_port)
self._process_dvr_port_binding(mech_context, context, attrs)
self._bind_port_if_needed(mech_context)
def _pre_delete_port(self, context, port_id, port_check):
"""Do some preliminary operations before deleting the port."""
LOG.debug("Deleting port %s", port_id)
try:
# notify interested parties of imminent port deletion;
# a failure here prevents the operation from happening
kwargs = {
'context': context,
'port_id': port_id,
'port_check': port_check
}
registry.notify(
resources.PORT, events.BEFORE_DELETE, self, **kwargs)
except exceptions.CallbackFailure as e:
# NOTE(armax): preserve old check's behavior
if len(e.errors) == 1:
raise e.errors[0].error
raise exc.ServicePortInUse(port_id=port_id, reason=e)
@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
retry_on_deadlock=True)
def delete_port(self, context, id, l3_port_check=True):
self._pre_delete_port(context, id, l3_port_check)
# TODO(armax): get rid of the l3 dependency in the with block
removed_routers = []
router_ids = []
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
is_dvr_enabled = utils.is_extension_supported(
l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS)
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with lockutils.lock('db-access'),\
session.begin(subtransactions=True):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
LOG.debug("The port '%s' was deleted", id)
return
port = self._make_port_dict(port_db)
network = self.get_network(context, port['network_id'])
bound_mech_contexts = []
device_owner = port['device_owner']
if device_owner == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, id)
for bind in bindings:
levels = db.get_binding_levels(context.session, id,
bind.host)
mech_context = driver_context.PortContext(
self, context, port, network, bind, levels)
self.mechanism_manager.delete_port_precommit(mech_context)
bound_mech_contexts.append(mech_context)
else:
levels = db.get_binding_levels(context.session, id,
binding.host)
mech_context = driver_context.PortContext(
self, context, port, network, binding, levels)
if is_dvr_enabled and utils.is_dvr_serviced(device_owner):
removed_routers = l3plugin.dvr_deletens_if_no_port(
context, id)
self.mechanism_manager.delete_port_precommit(mech_context)
bound_mech_contexts.append(mech_context)
if l3plugin:
router_ids = l3plugin.disassociate_floatingips(
context, id, do_notify=False)
LOG.debug("Calling delete_port for %(port_id)s owned by %(owner)s",
{"port_id": id, "owner": device_owner})
super(Ml2Plugin, self).delete_port(context, id)
self._post_delete_port(
context, port, router_ids, removed_routers, bound_mech_contexts)
def _post_delete_port(
self, context, port, router_ids, removed_routers, bound_mech_contexts):
kwargs = {
'context': context,
'port': port,
'router_ids': router_ids,
'removed_routers': removed_routers
}
registry.notify(resources.PORT, events.AFTER_DELETE, self, **kwargs)
try:
# Note that DVR Interface ports will have bindings on
# multiple hosts, and so will have multiple mech_contexts,
# while other ports typically have just one.
for mech_context in bound_mech_contexts:
self.mechanism_manager.delete_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the port. Ideally we'd notify the caller of the
# fact that an error occurred.
LOG.error(_LE("mechanism_manager.delete_port_postcommit failed for"
" port %s"), port['id'])
self.notifier.port_delete(context, port['id'])
self.notify_security_groups_member_updated(context, port)
def get_bound_port_context(self, plugin_context, port_id, host=None,
cached_networks=None):
session = plugin_context.session
with session.begin(subtransactions=True):
try:
port_db = (session.query(models_v2.Port).
enable_eagerloads(False).
filter(models_v2.Port.id.startswith(port_id)).
one())
except sa_exc.NoResultFound:
LOG.debug("No ports have port_id starting with %s",
port_id)
return
except sa_exc.MultipleResultsFound:
LOG.error(_LE("Multiple ports have port_id starting with %s"),
port_id)
return
port = self._make_port_dict(port_db)
network = (cached_networks or {}).get(port['network_id'])
if not network:
network = self.get_network(plugin_context, port['network_id'])
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_dvr_port_binding_by_host(
session, port['id'], host)
if not binding:
LOG.error(_LE("Binding info for DVR port %s not found"),
port_id)
return None
levels = db.get_binding_levels(session, port_db.id, host)
port_context = driver_context.PortContext(
self, plugin_context, port, network, binding, levels)
else:
# since eager loads are disabled in port_db query
# related attribute port_binding could disappear in
# concurrent port deletion.
# It's not an error condition.
binding = port_db.port_binding
if not binding:
LOG.info(_LI("Binding info for port %s was not found, "
"it might have been deleted already."),
port_id)
return
levels = db.get_binding_levels(session, port_db.id,
port_db.port_binding.host)
port_context = driver_context.PortContext(
self, plugin_context, port, network, binding, levels)
return self._bind_port_if_needed(port_context)
def update_port_status(self, context, port_id, status, host=None):
"""
Returns port_id (non-truncated uuid) if the port exists.
Otherwise returns None.
"""
updated = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with lockutils.lock('db-access'),\
session.begin(subtransactions=True):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_LW("Port %(port)s updated up by agent not found"),
{'port': port_id})
return None
if (port.status != status and
port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE):
original_port = self._make_port_dict(port)
port.status = status
updated_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
levels = db.get_binding_levels(session, port.id,
port.port_binding.host)
mech_context = driver_context.PortContext(
self, context, updated_port, network, port.port_binding,
levels, original_port=original_port)
self.mechanism_manager.update_port_precommit(mech_context)
updated = True
elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_dvr_port_binding_by_host(
session, port['id'], host)
if not binding:
return
binding['status'] = status
binding.update(binding)
updated = True
if (updated and
port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
with lockutils.lock('db-access'),\
session.begin(subtransactions=True):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_LW("Port %s not found during update"),
port_id)
return
original_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
port.status = db.generate_dvr_port_status(session, port['id'])
updated_port = self._make_port_dict(port)
levels = db.get_binding_levels(session, port_id, host)
mech_context = (driver_context.PortContext(
self, context, updated_port, network,
binding, levels, original_port=original_port))
self.mechanism_manager.update_port_precommit(mech_context)
if updated:
self.mechanism_manager.update_port_postcommit(mech_context)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
db.delete_dvr_port_binding_if_stale(session, binding)
return port['id']
def port_bound_to_host(self, context, port_id, host):
port = db.get_port(context.session, port_id)
if not port:
LOG.debug("No Port match for: %s", port_id)
return False
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, port_id)
for b in bindings:
if b.host == host:
return True
LOG.debug("No binding found for DVR port %s", port['id'])
return False
else:
port_host = db.get_port_binding_host(context.session, port_id)
return (port_host == host)
def get_ports_from_devices(self, context, devices):
port_ids_to_devices = dict(
(self._device_to_port_id(context, device), device)
for device in devices)
port_ids = port_ids_to_devices.keys()
ports = db.get_ports_and_sgs(context, port_ids)
for port in ports:
# map back to original requested id
port_id = next((port_id for port_id in port_ids
if port['id'].startswith(port_id)), None)
port['device'] = port_ids_to_devices.get(port_id)
return ports
@staticmethod
def _device_to_port_id(context, device):
# REVISIT(rkukura): Consider calling into MechanismDrivers to
# process device names, or having MechanismDrivers supply list
# of device prefixes to strip.
for prefix in const.INTERFACE_PREFIXES:
if device.startswith(prefix):
return device[len(prefix):]
# REVISIT(irenab): Consider calling into bound MD to
# handle the get_device_details RPC
if not uuidutils.is_uuid_like(device):
port = db.get_port_from_device_mac(context, device)
if port:
return port.id
return device
| {
"content_hash": "cde69caa5b5023e41fd83417acbc8ac8",
"timestamp": "",
"source": "github",
"line_count": 1476,
"max_line_length": 79,
"avg_line_length": 47.989837398373986,
"alnum_prop": 0.5719226914009007,
"repo_name": "JioCloud/neutron",
"id": "df831fdc81f52ba33db29186eab00cacfc2a72c9",
"size": "71473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/plugins/ml2/plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7396915"
},
{
"name": "Shell",
"bytes": "12807"
}
],
"symlink_target": ""
} |
""" Code to scrap Lyon-Bron weather from a specific date """
from lxml import html
from datetime import datetime, timedelta
from cleaner import cleaner
import requests
import pandas as pd
# Defines which date you want to get data from, january = 1, december = 12
startingDate = datetime(2016,8,1)
endingDate = datetime(2016,8,31)
deltaDate = endingDate - startingDate
# Defines which weather station code (For Lyon-Bron it is 7480)
cityCode = 7480
# Initialisation
body = []
header = []
i = 0
for d in range(0, deltaDate.days+1):
sourcingDate = startingDate + timedelta(days=d)
# Get webpage source code to scrap data from it with our parameters
rawSource = requests.get("http://www.meteociel.fr/temps-reel/obs_villes.php?jour2=%s&mois2=%s&annee2=%s&code2=%s" % (sourcingDate.day, sourcingDate.month-1, sourcingDate.year, cityCode))
# Cleaning raw source code
source = cleaner(rawSource.text)
# Transforming into a tree
tree = html.fromstring(source)
# Browsing tree, seeking for tr
tables = tree.xpath('//tr')
# Initializing counter
k = 0
# Get header
header = tables[0].xpath('td/text()')
# Getting values and refactoring date/time
for td in tables:
if k > 0:
body.append(tables[k].xpath('td/text()'))
body[i][0] = body[i][0].lower()
addHour = body[i][0]
newDate = sourcingDate + timedelta(hours=int(addHour))
# Take care of summer/winter UTC time for 2015/2016
# Winter 2016
if newDate > datetime(2016,10,30,2):
addUTC = "+01"
# Summer 2016
elif newDate > datetime(2016,3,27,2):
addUTC = "+02"
# Winter 2015
elif newDate > datetime(2015,10,25,2):
addUTC = "+01"
# DIY
else:
addUTC = "Z"
newDate = str(newDate).replace(" ", "")
newDate = newDate[:10] + "T" + newDate[10:] + addUTC
body[i][0] = newDate
i+=1
k+=1
header[0] = "Datetime"
data = pd.DataFrame(body, columns=header)
data = data.drop('Temps', 1)
print(data)
data.to_json("test.json", orient="records")
| {
"content_hash": "0f6f152bc30fa5fb91371dd30e5db43a",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 220,
"avg_line_length": 30.8,
"alnum_prop": 0.5796536796536796,
"repo_name": "DataScience-appliquee/Plateforme-prediction-velov",
"id": "074fdf19967b70da12812fe2c760148d77367ad1",
"size": "2354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Meteo/meteociel/scrapit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "16945"
},
{
"name": "Jupyter Notebook",
"bytes": "16256"
},
{
"name": "Python",
"bytes": "26294"
}
],
"symlink_target": ""
} |
from tests.tests import run
run()
| {
"content_hash": "dcae0d08b02969f6dd36ad5684ce2ac6",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 27,
"avg_line_length": 11.666666666666666,
"alnum_prop": 0.7428571428571429,
"repo_name": "lightd22/smartDraft",
"id": "ece8e6af1de1c678765b00da0cff6ed7682246fd",
"size": "35",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/run_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93322"
}
],
"symlink_target": ""
} |
print("STARTING WEBSERVER\nImporting models...")
from models import *
print("Starting...")
monkey.patch_all()
# SOCKETIO DEFINITIONS
@sio.on('getEvents')
def getEvents(self):
data = []
#print("loading events...")
events = db.session.query(Opps).filter(Opps.date > datetime.now()).filter(Opps.locked == False).all()
alldates = [-1]
i = 1
j = 0
for event in events:
alldates.append(event.date.day)
evdata = {"name":event.name,
"date":event.date.strftime("%B %d"),
"id":event.id,
"time":event.date.strftime("%-I:%M") + " - " + event.enddate.strftime("%-I:%M %p"),
"info":event.info,
"attendees":[]}
for dude in event.users:
evdata['attendees'].append(dude.fname + ' ' + dude.lname)
if event.date.day == alldates[i-1]:
# append to events
#print("appended to old!")
data[j-1]['events'].append(evdata)
else:
# make new weekday
#print("new weekday!")
data.append({"weekday":event.date.strftime("%a"),"date":event.date.strftime("%b %d"),"events":[evdata]})
j += 1
i += 1
sio.emit('sendEvents', data)
@app.route('/')
def index():
return render_template('signup.html')
# RUN APP #
if __name__ == '__main__':
socketApp = socketio.Middleware(sio, app)
eventlet.wsgi.server(eventlet.listen(('', 8080)), socketApp)
| {
"content_hash": "442aa0aa2e906476836d68cc36d7a13e",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 116,
"avg_line_length": 31.1875,
"alnum_prop": 0.5377421509686039,
"repo_name": "theapricot/oppapp2",
"id": "e843fa8c7a2698eedd0109177aebcee01df87a9a",
"size": "1497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1848"
},
{
"name": "HTML",
"bytes": "50589"
},
{
"name": "JavaScript",
"bytes": "203785"
},
{
"name": "Python",
"bytes": "45716"
}
],
"symlink_target": ""
} |
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def resolve(url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vidto.me/embed-%s.html' % url
result = client.request(url)
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
result = jsunpack.unpack(result)
url = client.parseDOM(result, 'embed', ret='src')
url += re.compile("file *: *[\'|\"](http.+?)[\'|\"]").findall(result)
url = [i for i in url if not i.endswith('.srt')]
url = 'http://' + url[0].split('://', 1)[-1]
return url
except:
return
def check(url):
try:
result = client.request(url)
if result == None: return False
if 'File Deleted.' in result: return False
return True
except:
return False | {
"content_hash": "d795cdea24ccaf32a98f57d9745e1566",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 77,
"avg_line_length": 31.5,
"alnum_prop": 0.6239316239316239,
"repo_name": "rysson/filmkodi",
"id": "b720e5d7eee3bd59901a1f9c6996af5f1a44b995",
"size": "1663",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugin.video.fanfilm/resources/lib/resolvers/vidto.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7510"
},
{
"name": "Python",
"bytes": "8058464"
},
{
"name": "Shell",
"bytes": "18531"
}
],
"symlink_target": ""
} |
"""Checks import position rule"""
# pylint: disable=unused-import,ungrouped-imports,import-error,no-name-in-module,relative-beyond-top-level
import os
| {
"content_hash": "18ae6b51c90284c5c414ef236e281102",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 106,
"avg_line_length": 50.333333333333336,
"alnum_prop": 0.7947019867549668,
"repo_name": "ruchee/vimrc",
"id": "a1808a8fd7554d4acb1b263825fa802723905f82",
"size": "151",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vimfiles/bundle/vim-python/submodules/pylint/tests/functional/w/wrong_import_position3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22028"
},
{
"name": "Blade",
"bytes": "3314"
},
{
"name": "C#",
"bytes": "1734"
},
{
"name": "CSS",
"bytes": "31547"
},
{
"name": "Clojure",
"bytes": "47036"
},
{
"name": "CoffeeScript",
"bytes": "9274"
},
{
"name": "Common Lisp",
"bytes": "54314"
},
{
"name": "D",
"bytes": "11562"
},
{
"name": "Dockerfile",
"bytes": "7620"
},
{
"name": "Elixir",
"bytes": "41696"
},
{
"name": "Emacs Lisp",
"bytes": "10489"
},
{
"name": "Erlang",
"bytes": "137788"
},
{
"name": "F#",
"bytes": "2230"
},
{
"name": "Go",
"bytes": "54655"
},
{
"name": "HTML",
"bytes": "178954"
},
{
"name": "Haml",
"bytes": "39"
},
{
"name": "Haskell",
"bytes": "2031"
},
{
"name": "JavaScript",
"bytes": "9086"
},
{
"name": "Julia",
"bytes": "9540"
},
{
"name": "Kotlin",
"bytes": "8669"
},
{
"name": "Less",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "87500"
},
{
"name": "Mustache",
"bytes": "3375"
},
{
"name": "Nix",
"bytes": "1860"
},
{
"name": "PHP",
"bytes": "9238"
},
{
"name": "PLpgSQL",
"bytes": "33747"
},
{
"name": "Perl",
"bytes": "84200"
},
{
"name": "PostScript",
"bytes": "3891"
},
{
"name": "Python",
"bytes": "7366233"
},
{
"name": "Racket",
"bytes": "1150"
},
{
"name": "Raku",
"bytes": "21146"
},
{
"name": "Ruby",
"bytes": "133344"
},
{
"name": "SCSS",
"bytes": "327"
},
{
"name": "Sass",
"bytes": "308"
},
{
"name": "Scala",
"bytes": "13125"
},
{
"name": "Shell",
"bytes": "52916"
},
{
"name": "Smarty",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "11436"
},
{
"name": "TypeScript",
"bytes": "4663"
},
{
"name": "Vim Script",
"bytes": "10545492"
},
{
"name": "Vim Snippet",
"bytes": "559139"
}
],
"symlink_target": ""
} |
from django.http import HttpResponseNotAllowed, HttpResponseBadRequest, HttpResponseNotFound, HttpResponse, HttpResponseForbidden
from SCAuthExample import settings
def user_request(request, username):
request_method = request.META['REQUEST_METHOD'];
if (request_method == 'POST'):
return register_user(request);
elif (request_method == 'GET' or request_method == 'HEAD'):
return get_user(username);
elif(request_method == 'PUT'):
return update_user(request)
return HttpResponseNotAllowed(['POST', 'GET', 'PUT'])
def register_user(request):
return HttpResponseBadRequest('', mimetype = 'application/json', status = 409)
def get_user(username):
if username == 'demo':
return HttpResponse('', mimetype='application/json')
return HttpResponseNotFound('', mimetype = 'application/json')
def update_user(username):
if settings.CURRENT_SESSION:
return HttpResponse('', mimetype='application/json')
return HttpResponseBadRequest('', mimetype = 'application/json', status = 401)
| {
"content_hash": "59aa659598195dedcf4c7b162a241332",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 129,
"avg_line_length": 42.24,
"alnum_prop": 0.709280303030303,
"repo_name": "saikat/SCAuthExample",
"id": "9ebc730b57407895ecd9077353bc1d0889eaaa58",
"size": "1289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Objective-J",
"bytes": "4477"
},
{
"name": "Python",
"bytes": "8105"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import json
from django import forms
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from wagtail.tests.testapp.models import FormField, FormPage
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Page
from wagtail.wagtailforms.forms import FormBuilder
from wagtail.wagtailforms.models import FormSubmission
def make_form_page(**kwargs):
kwargs.setdefault('title', "Contact us")
kwargs.setdefault('slug', "contact-us")
kwargs.setdefault('to_address', "to@email.com")
kwargs.setdefault('from_address', "from@email.com")
kwargs.setdefault('subject', "The subject")
home_page = Page.objects.get(url_path='/home/')
form_page = home_page.add_child(instance=FormPage(**kwargs))
FormField.objects.create(
page=form_page,
sort_order=1,
label="Your email",
field_type='email',
required=True,
)
FormField.objects.create(
page=form_page,
sort_order=2,
label="Your message",
field_type='multiline',
required=True,
)
FormField.objects.create(
page=form_page,
sort_order=3,
label="Your choices",
field_type='checkboxes',
required=False,
choices='foo,bar,baz',
)
return form_page
class TestFormSubmission(TestCase):
def setUp(self):
# Create a form page
self.form_page = make_form_page()
def test_get_form(self):
response = self.client.get('/contact-us/')
# Check response
self.assertContains(response, """<label for="id_your-email">Your email</label>""")
self.assertTemplateUsed(response, 'tests/form_page.html')
self.assertTemplateNotUsed(response, 'tests/form_page_landing.html')
# check that variables defined in get_context are passed through to the template (#1429)
self.assertContains(response, "<p>hello world</p>")
def test_post_invalid_form(self):
response = self.client.post('/contact-us/', {
'your-email': 'bob',
'your-message': 'hello world',
'your-choices': ''
})
# Check response
self.assertContains(response, "Enter a valid email address.")
self.assertTemplateUsed(response, 'tests/form_page.html')
self.assertTemplateNotUsed(response, 'tests/form_page_landing.html')
def test_post_valid_form(self):
response = self.client.post('/contact-us/', {
'your-email': 'bob@example.com',
'your-message': 'hello world',
'your-choices': {'foo': '', 'bar': '', 'baz': ''}
})
# Check response
self.assertContains(response, "Thank you for your feedback.")
self.assertTemplateNotUsed(response, 'tests/form_page.html')
self.assertTemplateUsed(response, 'tests/form_page_landing.html')
# check that variables defined in get_context are passed through to the template (#1429)
self.assertContains(response, "<p>hello world</p>")
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "The subject")
self.assertIn("Your message: hello world", mail.outbox[0].body)
self.assertEqual(mail.outbox[0].to, ['to@email.com'])
self.assertEqual(mail.outbox[0].from_email, 'from@email.com')
# Check that form submission was saved correctly
form_page = Page.objects.get(url_path='/home/contact-us/')
self.assertTrue(FormSubmission.objects.filter(page=form_page, form_data__contains='hello world').exists())
def test_post_unicode_characters(self):
self.client.post('/contact-us/', {
'your-email': 'bob@example.com',
'your-message': 'こんにちは、世界',
'your-choices': {'foo': '', 'bar': '', 'baz': ''}
})
# Check the email
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Your message: こんにちは、世界", mail.outbox[0].body)
# Check the form submission
submission = FormSubmission.objects.get()
submission_data = json.loads(submission.form_data)
self.assertEqual(submission_data['your-message'], 'こんにちは、世界')
def test_post_multiple_values(self):
response = self.client.post('/contact-us/', {
'your-email': 'bob@example.com',
'your-message': 'hello world',
'your-choices': {'foo': 'on', 'bar': 'on', 'baz': 'on'}
})
# Check response
self.assertContains(response, "Thank you for your feedback.")
self.assertTemplateNotUsed(response, 'tests/form_page.html')
self.assertTemplateUsed(response, 'tests/form_page_landing.html')
# Check that the three checkbox values were saved correctly
form_page = Page.objects.get(url_path='/home/contact-us/')
submission = FormSubmission.objects.filter(
page=form_page, form_data__contains='hello world'
)
self.assertIn("foo", submission[0].form_data)
self.assertIn("bar", submission[0].form_data)
self.assertIn("baz", submission[0].form_data)
def test_post_blank_checkbox(self):
response = self.client.post('/contact-us/', {
'your-email': 'bob@example.com',
'your-message': 'hello world',
'your-choices': {},
})
# Check response
self.assertContains(response, "Thank you for your feedback.")
self.assertTemplateNotUsed(response, 'tests/form_page.html')
self.assertTemplateUsed(response, 'tests/form_page_landing.html')
# Check that the checkbox was serialised in the email correctly
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Your choices: None", mail.outbox[0].body)
class TestFormBuilder(TestCase):
def setUp(self):
# Create a form page
self.form_page = make_form_page()
# Create a form builder
self.fb = FormBuilder(self.form_page.form_fields.all())
def test_fields(self):
"""
This tests that all fields were added to the form with the correct types
"""
form_class = self.fb.get_form_class()
self.assertIn('your-email', form_class.base_fields.keys())
self.assertIn('your-message', form_class.base_fields.keys())
self.assertIsInstance(form_class.base_fields['your-email'], forms.EmailField)
self.assertIsInstance(form_class.base_fields['your-message'], forms.CharField)
class TestFormsIndex(TestCase):
fixtures = ['test.json']
def setUp(self):
self.assertTrue(self.client.login(username='siteeditor', password='password'))
self.form_page = Page.objects.get(url_path='/home/contact-us/')
def make_form_pages(self):
"""
This makes 100 form pages and adds them as children to 'contact-us'
This is used to test pagination on the forms index
"""
for i in range(100):
self.form_page.add_child(instance=FormPage(
title="Form " + str(i),
slug='form-' + str(i),
live=True
))
def test_forms_index(self):
response = self.client.get(reverse('wagtailforms:index'))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailforms/index.html')
def test_forms_index_pagination(self):
# Create some more form pages to make pagination kick in
self.make_form_pages()
# Get page two
response = self.client.get(reverse('wagtailforms:index'), {'p': 2})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailforms/index.html')
# Check that we got the correct page
self.assertEqual(response.context['form_pages'].number, 2)
def test_forms_index_pagination_invalid(self):
# Create some more form pages to make pagination kick in
self.make_form_pages()
# Get page two
response = self.client.get(reverse('wagtailforms:index'), {'p': 'Hello world!'})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailforms/index.html')
# Check that it got page one
self.assertEqual(response.context['form_pages'].number, 1)
def test_forms_index_pagination_out_of_range(self):
# Create some more form pages to make pagination kick in
self.make_form_pages()
# Get page two
response = self.client.get(reverse('wagtailforms:index'), {'p': 99999})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailforms/index.html')
# Check that it got the last page
self.assertEqual(response.context['form_pages'].number, response.context['form_pages'].paginator.num_pages)
def test_cannot_see_forms_without_permission(self):
# Login with as a user without permission to see forms
self.assertTrue(self.client.login(username='eventeditor', password='password'))
response = self.client.get(reverse('wagtailforms:index'))
# Check that the user cannot see the form page
self.assertFalse(self.form_page in response.context['form_pages'])
def test_can_see_forms_with_permission(self):
response = self.client.get(reverse('wagtailforms:index'))
# Check that the user can see the form page
self.assertIn(self.form_page, response.context['form_pages'])
class TestFormsSubmissions(TestCase, WagtailTestUtils):
def setUp(self):
# Create a form page
self.form_page = make_form_page()
# Add a couple of form submissions
old_form_submission = FormSubmission.objects.create(
page=self.form_page,
form_data=json.dumps({
'your-email': "old@example.com",
'your-message': "this is a really old message",
}),
)
old_form_submission.submit_time = '2013-01-01T12:00:00.000Z'
old_form_submission.save()
new_form_submission = FormSubmission.objects.create(
page=self.form_page,
form_data=json.dumps({
'your-email': "new@example.com",
'your-message': "this is a fairly new message",
}),
)
new_form_submission.submit_time = '2014-01-01T12:00:00.000Z'
new_form_submission.save()
# Login
self.login()
def make_list_submissions(self):
"""
This makes 100 submissions to test pagination on the forms submissions page
"""
for i in range(100):
submission = FormSubmission(
page=self.form_page,
form_data=json.dumps({
'hello': 'world'
})
)
submission.save()
def test_list_submissions(self):
response = self.client.get(reverse('wagtailforms:list_submissions', args=(self.form_page.id, )))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailforms/index_submissions.html')
self.assertEqual(len(response.context['data_rows']), 2)
def test_list_submissions_filtering(self):
response = self.client.get(
reverse('wagtailforms:list_submissions', args=(self.form_page.id, )), {'date_from': '01/01/2014'}
)
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailforms/index_submissions.html')
self.assertEqual(len(response.context['data_rows']), 1)
def test_list_submissions_pagination(self):
self.make_list_submissions()
response = self.client.get(reverse('wagtailforms:list_submissions', args=(self.form_page.id, )), {'p': 2})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailforms/index_submissions.html')
# Check that we got the correct page
self.assertEqual(response.context['submissions'].number, 2)
def test_list_submissions_pagination_invalid(self):
self.make_list_submissions()
response = self.client.get(
reverse('wagtailforms:list_submissions', args=(self.form_page.id, )), {'p': 'Hello World!'}
)
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailforms/index_submissions.html')
# Check that we got page one
self.assertEqual(response.context['submissions'].number, 1)
def test_list_submissions_pagination_out_of_range(self):
self.make_list_submissions()
response = self.client.get(reverse('wagtailforms:list_submissions', args=(self.form_page.id, )), {'p': 99999})
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailforms/index_submissions.html')
# Check that we got the last page
self.assertEqual(response.context['submissions'].number, response.context['submissions'].paginator.num_pages)
def test_list_submissions_csv_export(self):
response = self.client.get(
reverse('wagtailforms:list_submissions', args=(self.form_page.id, )),
{'date_from': '01/01/2014', 'action': 'CSV'}
)
# Check response
self.assertEqual(response.status_code, 200)
data_line = response.content.decode().split("\n")[1]
self.assertIn('new@example.com', data_line)
def test_list_submissions_csv_export_with_unicode(self):
unicode_form_submission = FormSubmission.objects.create(
page=self.form_page,
form_data=json.dumps({
'your-email': "unicode@example.com",
'your-message': 'こんにちは、世界',
}),
)
unicode_form_submission.submit_time = '2014-01-02T12:00:00.000Z'
unicode_form_submission.save()
response = self.client.get(
reverse('wagtailforms:list_submissions', args=(self.form_page.id, )),
{'date_from': '01/02/2014', 'action': 'CSV'}
)
# Check response
self.assertEqual(response.status_code, 200)
data_line = response.content.decode('utf-8').split("\n")[1]
self.assertIn('こんにちは、世界', data_line)
class TestDeleteFormSubmission(TestCase):
fixtures = ['test.json']
def setUp(self):
self.assertTrue(self.client.login(username='siteeditor', password='password'))
self.form_page = Page.objects.get(url_path='/home/contact-us/')
def test_delete_submission_show_cofirmation(self):
response = self.client.get(reverse(
'wagtailforms:delete_submission',
args=(self.form_page.id, FormSubmission.objects.first().id)
))
# Check show confirm page when HTTP method is GET
self.assertTemplateUsed(response, 'wagtailforms/confirm_delete.html')
# Check that the deletion has not happened with GET request
self.assertEqual(FormSubmission.objects.count(), 2)
def test_delete_submission_with_permissions(self):
response = self.client.post(reverse(
'wagtailforms:delete_submission',
args=(self.form_page.id, FormSubmission.objects.first().id)
))
# Check that the submission is gone
self.assertEqual(FormSubmission.objects.count(), 1)
# Should be redirected to list of submissions
self.assertRedirects(response, reverse("wagtailforms:list_submissions", args=(self.form_page.id, )))
def test_delete_submission_bad_permissions(self):
self.assertTrue(self.client.login(username="eventeditor", password="password"))
response = self.client.post(reverse(
'wagtailforms:delete_submission',
args=(self.form_page.id, FormSubmission.objects.first().id)
))
# Check that the user recieved a 403 response
self.assertEqual(response.status_code, 403)
# Check that the deletion has not happened
self.assertEqual(FormSubmission.objects.count(), 2)
class TestIssue798(TestCase):
fixtures = ['test.json']
def setUp(self):
self.assertTrue(self.client.login(username='siteeditor', password='password'))
self.form_page = Page.objects.get(url_path='/home/contact-us/').specific
# Add a number field to the page
FormField.objects.create(
page=self.form_page,
label="Your favourite number",
field_type='number',
)
def test_post(self):
response = self.client.post('/contact-us/', {
'your-email': 'bob@example.com',
'your-message': 'hello world',
'your-choices': {'foo': '', 'bar': '', 'baz': ''},
'your-favourite-number': '7.3',
})
# Check response
self.assertTemplateUsed(response, 'tests/form_page_landing.html')
# Check that form submission was saved correctly
self.assertTrue(FormSubmission.objects.filter(page=self.form_page, form_data__contains='7.3').exists())
| {
"content_hash": "dd5ae9d802cb96563598283f7d3295c7",
"timestamp": "",
"source": "github",
"line_count": 467,
"max_line_length": 118,
"avg_line_length": 37.33190578158458,
"alnum_prop": 0.6292875989445911,
"repo_name": "hamsterbacke23/wagtail",
"id": "02fd3147a7b8ace26b6aa6e1bf9e35703ec6aaad",
"size": "17538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtail/wagtailforms/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "172736"
},
{
"name": "HTML",
"bytes": "291553"
},
{
"name": "JavaScript",
"bytes": "116387"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "2243460"
},
{
"name": "Shell",
"bytes": "7387"
}
],
"symlink_target": ""
} |
"""
EauDouce.plugins.ssoclient
~~~~~~~~~~~~~~
SSO Client
:copyright: (c) 2017 by staugur.
:license: MIT, see LICENSE for more details.
"""
#: Importing these two modules is the first and must be done.
#: 首先导入这两个必须模块
from __future__ import absolute_import
from libs.base import PluginBase
#: Import the other modules here, and if it's your own module, use the relative Import. eg: from .lib import Lib
#: 在这里导入其他模块, 如果有自定义包目录, 使用相对导入, 如: from .lib import Lib
import requests, json
from config import SSO
from utils.web import login_required, anonymous_required, set_ssoparam, set_sessionId, get_redirect_url, get_referrer_url
from utils.tool import url_check, logger, hmac_sha256
from flask import Blueprint, request, jsonify, g, redirect, url_for, make_response
#:Your plugin name
#:你的插件名称
__plugin_name__ = "ssoclient"
#: Plugin describes information
#: 插件描述信息
__description__ = "SSO Client"
#: Plugin Author
#: 插件作者
__author__ = "Mr.tao <staugur@saintic.com>"
#: Plugin Version
#: 插件版本
__version__ = "0.1.0"
#: Plugin Url
#: 插件主页
__url__ = "https://www.saintic.com"
#: Plugin License
#: 插件许可证
__license__ = "MIT"
#: Plugin License File
#: 插件许可证文件
__license_file__= "LICENSE"
#: Plugin Readme File
#: 插件自述文件
__readme_file__ = "README"
#: Plugin state, enabled or disabled, default: enabled
#: 插件状态, enabled、disabled, 默认enabled
__state__ = "enabled"
# 定义sso server地址并删除SSO多余参数
sso_server = SSO.get("sso_server").strip("/")
# 定义请求函数
def sso_request(url, params=None, data=None, timeout=5, num_retries=1):
"""
@params dict: 请求查询参数
@data dict: 提交表单数据
@timeout int: 超时时间,单位秒
@num_retries int: 超时重试次数
"""
headers = {"User-Agent": "Mozilla/5.0 (X11; CentOS; Linux i686; rv:7.0.1406) Gecko/20100101 PassportClient/{}".format(__version__)}
try:
resp = requests.post(url, params=params, headers=headers, timeout=timeout, data=data).json()
except requests.exceptions.Timeout,e:
if num_retries > 0:
return sso_request(url, params=params, data=data, timeout=timeout, num_retries=num_retries-1)
else:
return resp
# 定义蓝图
sso_blueprint = Blueprint("sso", "sso")
@sso_blueprint.route("/Login")
@anonymous_required
def Login():
""" Client登录地址,需要跳转到SSO Server上 """
ReturnUrl = request.args.get("ReturnUrl") or get_referrer_url() or url_for("front.index", _external=True)
if url_check(sso_server):
NextUrl = "{}/sso/?sso={}".format(sso_server, set_ssoparam(ReturnUrl))
return redirect(NextUrl)
else:
return "Invalid Configuration"
@sso_blueprint.route("/Logout")
@login_required
def Logout():
""" Client注销地址,需要跳转到SSO Server上 """
ReturnUrl = request.args.get("ReturnUrl") or get_referrer_url() or url_for("front.index", _external=True)
NextUrl = "{}/signOut?ReturnUrl={}".format(sso_server, ReturnUrl)
return redirect(NextUrl)
@sso_blueprint.route("/authorized", methods=["GET", "POST"])
def authorized():
""" Client SSO 单点登录、注销入口, 根据`Action`参数判断是`ssoLogin`还是`ssoLogout` """
Action = request.args.get("Action")
if Action == "ssoLogin":
# 单点登录
ticket = request.args.get("ticket")
if request.method == "GET" and ticket and g.signin == False:
resp = sso_request("{}/sso/validate".format(sso_server), dict(Action="validate_ticket"), dict(ticket=ticket, app_name=SSO["app_name"], get_userinfo=True, get_userbind=False))
logger.sys.debug("SSO check ticket resp: {}".format(resp))
if resp and isinstance(resp, dict) and "success" in resp and "uid" in resp:
if resp["success"] is True:
uid = resp["uid"]
sid = resp["sid"]
expire = int(resp["expire"])
g.userinfo = resp["userinfo"].get("data") or dict()
logger.sys.debug(g.userinfo)
# 缓存用户信息
g.api.sso_set_userinfo(uid, g.userinfo, expire)
# 授权令牌验证通过,设置局部会话,允许登录
sessionId = set_sessionId(uid=uid, seconds=expire, sid=sid)
response = make_response(redirect(get_redirect_url("front.index")))
response.set_cookie(key="sessionId", value=sessionId, max_age=expire, httponly=True, secure=False if request.url_root.split("://")[0] == "http" else True)
return response
elif Action == "ssoLogout":
# 单点注销
ReturnUrl = request.args.get("ReturnUrl") or get_referrer_url() or url_for("front.index", _external=True)
NextUrl = "{}/signOut?ReturnUrl={}".format(sso_server, ReturnUrl)
app_name = request.args.get("app_name")
if request.method == "GET" and NextUrl and app_name and g.signin == True and app_name == SSO["app_name"]:
response = make_response(redirect(NextUrl))
response.set_cookie(key="sessionId", value="", expires=0)
return response
elif Action == "ssoConSync":
# 数据同步:参数中必须包含大写的hmac_sha256(app_name:app_id:app_secret)的signature值
# 此处可以改为要求登录,passport sessionId可以解析出所需要的sid、uid
signature = request.args.get("signature")
if request.method == "POST" and signature and signature == hmac_sha256("{}:{}:{}".format(SSO["app_name"], SSO["app_id"], SSO["app_secret"])).upper():
try:
data = json.loads(request.form.get("data"))
ct = data["CallbackType"]
cd = data["CallbackData"]
uid = data["uid"]
token = data["token"]
except Exception,e:
logger.plugin.warning(e)
else:
logger.plugin.info("ssoConSync with uid: {} -> {}: {}".format(uid, ct, cd))
resp = sso_request("{}/sso/validate".format(sso_server), dict(Action="validate_sync"), dict(token=token, uid=uid))
logger.plugin.debug("ssoCronSync resp: {}".format(resp))
if resp and isinstance(resp, dict) and resp.get("success") is True:
# 之后根据不同类型的ct处理cd
logger.plugin.debug("ssoConSync is ok")
if ct == "user_profile":
# like {u'nick_name': u'.\u5f18\u5f08', u'gender': u'1', u'domain_name': u'taochengwei', u'birthday': u'1995-04-22', u'location': u'\u5317\u4eac \u671d\u9633', u'signature': u'\u5c81\u6708\u5982\u5200\u65a9\u5929\u9a84'}
logger.plugin.debug("sync user_profile before: {}".format(g.userinfo))
g.userinfo.update(cd)
logger.plugin.debug("sync user_profile after: {}".format(g.userinfo))
elif ct == "user_avatar":
logger.plugin.debug("sync user_avatar before: {}".format(g.userinfo["avatar"]))
g.userinfo["avatar"] = cd
logger.plugin.debug("sync user_avatar after: {}".format(g.userinfo["avatar"]))
return jsonify(msg="Synchronization completed", success=g.api.sso_set_userinfo(uid, g.userinfo), app_name=SSO["app_name"])
return "Invalid Authorized"
#: 返回插件主类
def getPluginClass():
return SSOClientMain
#: 插件主类, 不强制要求名称与插件名一致, 保证getPluginClass准确返回此类
class SSOClientMain(PluginBase):
def register_bep(self):
"""注册蓝图入口, 返回蓝图路由前缀及蓝图名称"""
bep = {"prefix": "/sso", "blueprint": sso_blueprint}
return bep
| {
"content_hash": "f8ad0c6f6eb380b61ce2e9ba2a99f80a",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 244,
"avg_line_length": 44.40119760479042,
"alnum_prop": 0.6089008766014835,
"repo_name": "staugur/EauDouce",
"id": "6f7f65c2d86f79196494fcbe1f22bb95502c6269",
"size": "8053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/plugins/ssoclient/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622187"
},
{
"name": "Dockerfile",
"bytes": "270"
},
{
"name": "HTML",
"bytes": "161302"
},
{
"name": "JavaScript",
"bytes": "489048"
},
{
"name": "Python",
"bytes": "162345"
},
{
"name": "Shell",
"bytes": "6215"
},
{
"name": "TSQL",
"bytes": "6403"
}
],
"symlink_target": ""
} |
from hs_core.hydroshare.utils import get_resource_file_name_and_extension
from hs_file_types.models import GeoRasterLogicalFile, GeoRasterFileMetaData, GenericLogicalFile
def assert_raster_file_type_metadata(self):
# test the resource now has 2 files (vrt file added as part of metadata extraction)
self.assertEqual(self.composite_resource.files.all().count(), 2)
# check that the 2 resource files are now associated with GeoRasterLogicalFile
for res_file in self.composite_resource.files.all():
self.assertEqual(res_file.logical_file_type_name, "GeoRasterLogicalFile")
self.assertEqual(res_file.has_logical_file, True)
self.assertTrue(isinstance(res_file.logical_file, GeoRasterLogicalFile))
# check that we put the 2 files in a new folder (small_logan)
for res_file in self.composite_resource.files.all():
file_path, base_file_name, _ = get_resource_file_name_and_extension(res_file)
expected_file_path = "{}/data/contents/small_logan/{}"
expected_file_path = expected_file_path.format(self.composite_resource.root_path,
base_file_name)
self.assertEqual(file_path, expected_file_path)
# check that there is no GenericLogicalFile object
self.assertEqual(GenericLogicalFile.objects.count(), 0)
# check that there is one GeoRasterLogicalFile object
self.assertEqual(GeoRasterLogicalFile.objects.count(), 1)
res_file = self.composite_resource.files.first()
# check that the logicalfile is associated with 2 files
logical_file = res_file.logical_file
self.assertEqual(logical_file.dataset_name, 'small_logan')
self.assertEqual(logical_file.has_metadata, True)
self.assertEqual(logical_file.files.all().count(), 2)
self.assertEqual(set(self.composite_resource.files.all()),
set(logical_file.files.all()))
# test that size property of the logical file is equal to sun of size of all files
# that are part of the logical file
self.assertEqual(logical_file.size, sum([f.size for f in logical_file.files.all()]))
# test that there should be 1 object of type GeoRasterFileMetaData
self.assertEqual(GeoRasterFileMetaData.objects.count(), 1)
# test that the metadata associated with logical file id of type GeoRasterFileMetaData
self.assertTrue(isinstance(logical_file.metadata, GeoRasterFileMetaData))
# there should be 2 format elements associated with resource
self.assertEqual(self.composite_resource.metadata.formats.all().count(), 2)
self.assertEqual(
self.composite_resource.metadata.formats.all().filter(value='application/vrt').count(),
1)
self.assertEqual(self.composite_resource.metadata.formats.all().filter(
value='image/tiff').count(), 1)
# test extracted metadata for the file type
# geo raster file type should have all the metadata elements
self.assertEqual(logical_file.metadata.has_all_required_elements(), True)
# there should be 1 coverage element - box type
self.assertNotEqual(logical_file.metadata.spatial_coverage, None)
self.assertEqual(logical_file.metadata.spatial_coverage.type, 'box')
box_coverage = logical_file.metadata.spatial_coverage
self.assertEqual(box_coverage.value['projection'], 'WGS 84 EPSG:4326')
self.assertEqual(box_coverage.value['units'], 'Decimal degrees')
self.assertEqual(box_coverage.value['northlimit'], 42.049364058252266)
self.assertEqual(box_coverage.value['eastlimit'], -111.57773718106195)
self.assertEqual(box_coverage.value['southlimit'], 41.987884327209976)
self.assertEqual(box_coverage.value['westlimit'], -111.69756293084055)
# testing extended metadata element: original coverage
ori_coverage = logical_file.metadata.originalCoverage
self.assertNotEqual(ori_coverage, None)
self.assertEqual(ori_coverage.value['northlimit'], 4655492.446916306)
self.assertEqual(ori_coverage.value['eastlimit'], 452144.01909127034)
self.assertEqual(ori_coverage.value['southlimit'], 4648592.446916306)
self.assertEqual(ori_coverage.value['westlimit'], 442274.01909127034)
self.assertEqual(ori_coverage.value['units'], 'meter')
self.assertEqual(ori_coverage.value['projection'],
'NAD83 / UTM zone 12N')
# testing extended metadata element: cell information
cell_info = logical_file.metadata.cellInformation
self.assertEqual(cell_info.rows, 230)
self.assertEqual(cell_info.columns, 329)
self.assertEqual(cell_info.cellSizeXValue, 30.0)
self.assertEqual(cell_info.cellSizeYValue, 30.0)
self.assertEqual(cell_info.cellDataType, 'Float32')
# testing extended metadata element: band information
self.assertEqual(logical_file.metadata.bandInformations.count(), 1)
band_info = logical_file.metadata.bandInformations.first()
self.assertEqual(band_info.noDataValue, '-3.40282346639e+38')
self.assertEqual(band_info.maximumValue, '2880.00708008')
self.assertEqual(band_info.minimumValue, '1870.63659668')
| {
"content_hash": "f9dfdd4a32710efdc4141d8f3f5e788e",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 96,
"avg_line_length": 52.864583333333336,
"alnum_prop": 0.7316256157635468,
"repo_name": "FescueFungiShare/hydroshare",
"id": "4ffcb544d402f356ae80377fcf77e85ce963439d",
"size": "5075",
"binary": false,
"copies": "1",
"ref": "refs/heads/FescueFungiShare-develop",
"path": "hs_file_types/tests/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "374952"
},
{
"name": "HTML",
"bytes": "1107800"
},
{
"name": "JavaScript",
"bytes": "1822132"
},
{
"name": "Python",
"bytes": "3599347"
},
{
"name": "R",
"bytes": "4475"
},
{
"name": "Shell",
"bytes": "49970"
},
{
"name": "XSLT",
"bytes": "790987"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-locksmith',
version='0.1.1',
packages=['locksmith',
'locksmith.management',
'locksmith.management.commands',
'locksmith.templatetags'],
include_package_data=True,
license='MIT License',
description='Simple access control system.',
long_description=README,
url='https://github.com/MichalMazurek/django-locksmith',
author='Michal Mazurek',
zip_safe=False,
author_email='me@michalmazurek.eu',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
) | {
"content_hash": "9230623714960fde0c7471940c0e5e45",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 34.24324324324324,
"alnum_prop": 0.611681136543015,
"repo_name": "MichalMazurek/django-locksmith",
"id": "f24c8e76c82130d47935cd4b54c39282060784a2",
"size": "1267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9777"
}
],
"symlink_target": ""
} |
"""Top-level package for SQLAlchemy Dremio."""
__author__ = """Ajish George"""
__email__ = 'yell@aji.sh'
__version__ = '0.1.0'
# Dremio supported data types: https://docs.dremio.com/sql-reference/data-types.html
# BOOLEAN # VARBINARY # DATE # FLOAT
# DECIMAL # DOUBLE # INTERVAL # INT
# BIGINT # TIME # TIMESTAMP # VARCHAR
# MAP # LIST
# TODO: figure out how to handle DOUBLE properly
# TODO: figure out how to deal with LIST and MAP types
from sqlalchemy.dialects import registry
registry.register("dremio", "sqlalchemy_dremio.pyodbc", "DremioDialect_pyodbc")
registry.register("dremio.pyodbc", "sqlalchemy_dremio.pyodbc", "DremioDialect_pyodbc")
| {
"content_hash": "9492d37b79a9188727d81c87ee85af47",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 86,
"avg_line_length": 36.73684210526316,
"alnum_prop": 0.6747851002865329,
"repo_name": "sqggles/sqlalchemy_dremio",
"id": "6604e4a417e87d5fed3bb3cc03cc042dfad1b365",
"size": "723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqlalchemy_dremio/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2309"
},
{
"name": "Python",
"bytes": "34167"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="namelengthsrc", parent_name="violin.hoverlabel", **kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "eb116f35baf8d57711dc08d4131b8dab",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 84,
"avg_line_length": 34.285714285714285,
"alnum_prop": 0.6083333333333333,
"repo_name": "plotly/python-api",
"id": "dd0d0e007091d85d8c2b3a429d17382a5168cbc8",
"size": "480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/violin/hoverlabel/_namelengthsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 1 16:46:25 2014
@author: micha
"""
# model header, 2 substitutions
# $dt: timestep in MS
# $timestamp: a time stamp
model_header = """
/* GeNN model generated by PyNN */
/* $timestamp */
#define DT $dt
#include "modelSpec.h"
#include "modelSpec.cc"
"""
# ... in between here we later insert the parameter definitions
# model_definition_header substitutes $modelname
model_definition_header = """
void modelDefinition(NNmodel &model)
{
initGeNN();
model.setName("$modelname");
"""
# ... then come the NeuronPopulations
# ... then the SynapsePopulations
# ... then anything alse we'd like to add (e.g model.setSynapseG() or so)
model_GPU_selection = """
model.setGPUDevice($nGPU);
"""
model_definition_footer = """
model.setSeed($model_seed);
model.setPrecision($C_TYPE);
}
"""
| {
"content_hash": "7c7aedee726508c24cda32bb690955ec",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 73,
"avg_line_length": 18.434782608695652,
"alnum_prop": 0.6721698113207547,
"repo_name": "Huitzilo/pynn-genn",
"id": "3d9183deb0b745b72bef1371537b3fc2dda746cf",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "genn/templates/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39298"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import datetime
import socket
from pprint import pprint
import time
import yaml
import six
from fabric.api import (
env,
require,
runs_once,
settings,
)
from burlap import common
from burlap.common import (
run_or_dryrun,
local_or_dryrun,
get_dryrun,
)
from burlap import constants as c
from burlap.decorators import task_or_dryrun
try:
import boto
import boto.ec2
except ImportError:
boto = None
EC2 = 'ec2'
KVM = 'kvm'
#env.vm_type = None
#env.vm_group = None
if 'vm_name_tag' not in env:
env.vm_name_tag = 'Name'
env.vm_group_tag = 'Group'
env.vm_release_tag = 'Release'
env.vm_type = None
# If a name is not given, one will be auto-generated based on this pattern.
env.vm_name_template = 'web{index}'
# A release tag given to the instance when created to distinquish it from
# future upgrades to the same instance name.
env.vm_release = None
env.vm_ec2_account_id = None
# https://help.ubuntu.com/community/EC2StartersGuide#Official_Ubuntu_Cloud_Guest_Amazon_Machine_Images_.28AMIs.29
env.vm_ec2_ami = None # e.g. 'ami-a29943cb'
env.vm_ec2_instance_type = None # e.g. 'm1.small'
env.vm_ec2_ebs = None
env.vm_ec2_region = None # e.g. 'us-east-1'
env.vm_ec2_zone = None # e.g. 'us-east-1b'
env.vm_ec2_available_security_groups = {} # {(name,desc):[(protocol, port, port, ip_range)]
env.vm_ec2_selected_security_groups = []
env.vm_ec2_aws_access_key_id = None
env.vm_ec2_aws_secret_access_key = None
env.vm_ec2_volume = '/dev/sdh1'
env.vm_ec2_keypair_name = None
env.vm_ec2_use_elastic_ip = False
env.vm_ec2_subnet_id = None
env.vm_ec2_allocate_address_domain = None
# If true, we will attempt to add or delete group rules.
env.vm_ec2_security_group_owner = False
# Stores dynamically allocated EIP for each host, {hostname: ip}.
# Usually stored in a shelf file.
env.vm_elastic_ip_mappings = None
def retrieve_ec2_hosts(extended=0, site=None):
verbose = common.get_verbose()
extended = int(extended)
if verbose:
print('site:', site)
for host_name, data in list_instances(show=0, verbose=verbose).items():
if verbose:
print('host_name:', host_name)
pprint(data, indent=4)
# Ignore hosts that are disabled for the given site.
if site not in (None, c.ALL) and env.available_sites_by_host and host_name in env.available_sites_by_host:
if site not in env.available_sites_by_host[host_name]:
if verbose:
print('skipping because site %s is not set for this host' % site)
continue
if extended:
yield (host_name, data)
elif data.public_dns_name:
yield data.public_dns_name
else:
yield data.ip
env.hosts_retrievers[EC2] = retrieve_ec2_hosts
def translate_ec2_hostname(hostname):
verbose = common.get_verbose()
for name, data in list_instances(show=0, verbose=verbose).items():
if name == hostname:
return data.public_dns_name
env.hostname_translators[EC2] = translate_ec2_hostname
def get_ec2_connection():
conn = boto.ec2.connect_to_region(
#env.vm_ec2_zone,
env.vm_ec2_region,
aws_access_key_id=env.vm_ec2_aws_access_key_id,
aws_secret_access_key=env.vm_ec2_aws_secret_access_key,
)
assert conn, 'Unable to create EC2 connection with region %s and access key %s.' % (env.vm_ec2_region, env.vm_ec2_aws_access_key_id)
return conn
def get_all_ec2_instances(instance_ids=None):
conn = get_ec2_connection()
#return sum(map(lambda r: r.instances, conn.get_all_instances(instance_ids=instance_ids)), [])
return sum([r.instances for r in conn.get_all_instances(instance_ids=instance_ids)], [])
def get_all_running_ec2_instances():
#instances = filter(lambda i: i.state == 'running', get_all_ec2_instances())
instances = [i for i in get_all_ec2_instances() if i.state == 'running']
instances.reverse()
return instances
@task_or_dryrun
#@runs_once #breaks get_or_create()
def list_instances(show=1, name=None, group=None, release=None, except_release=None):
"""
Retrieves all virtual machines instances in the current environment.
"""
from burlap.common import shelf, OrderedDict, get_verbose
verbose = get_verbose()
require('vm_type', 'vm_group')
assert env.vm_type, 'No VM type specified.'
env.vm_type = (env.vm_type or '').lower()
_name = name
_group = group
_release = release
if verbose:
print('name=%s, group=%s, release=%s' % (_name, _group, _release))
env.vm_elastic_ip_mappings = shelf.get('vm_elastic_ip_mappings')
data = type(env)()
if env.vm_type == EC2:
if verbose:
print('Checking EC2...')
for instance in get_all_running_ec2_instances():
name = instance.tags.get(env.vm_name_tag)
group = instance.tags.get(env.vm_group_tag)
release = instance.tags.get(env.vm_release_tag)
if env.vm_group and env.vm_group != group:
if verbose:
print(('Skipping instance %s because its group "%s" '
'does not match env.vm_group "%s".') \
% (instance.public_dns_name, group, env.vm_group))
continue
if _group and group != _group:
if verbose:
print(('Skipping instance %s because its group "%s" '
'does not match local group "%s".') \
% (instance.public_dns_name, group, _group))
continue
if _name and name != _name:
if verbose:
print(('Skipping instance %s because its name "%s" '
'does not match name "%s".') \
% (instance.public_dns_name, name, _name))
continue
if _release and release != _release:
if verbose:
print(('Skipping instance %s because its release "%s" '
'does not match release "%s".') \
% (instance.public_dns_name, release, _release))
continue
if except_release and release == except_release:
continue
if verbose:
print('Adding instance %s (%s).' % (name, instance.public_dns_name or instance.ip_address))
data.setdefault(name, type(env)())
data[name]['id'] = instance.id
data[name]['public_dns_name'] = instance.public_dns_name or instance.ip_address
if verbose:
print('Public DNS: %s' % instance.public_dns_name)
if env.vm_elastic_ip_mappings and name in env.vm_elastic_ip_mappings:
data[name]['ip'] = env.vm_elastic_ip_mappings[name]
else:
data[name]['ip'] = socket.gethostbyname(instance.public_dns_name)
if int(show):
pprint(data, indent=4)
return data
if env.vm_type == KVM:
#virsh list
pass
else:
raise NotImplementedError
#@task_or_dryrun
#@runs_once
#def list(*args, **kwargs):
# #execute(list_instances, *args, **kwargs)
# list_instances(*args, **kwargs)
def set_ec2_security_group_id(name, id): # pylint: disable=redefined-builtin
from burlap.common import shelf, OrderedDict
v = shelf.get('vm_ec2_security_group_ids', OrderedDict())
v[name] = str(id)
shelf.set('vm_ec2_security_group_ids', v)
@task_or_dryrun
def get_ec2_security_group_id(name=None, verbose=0):
from burlap.common import shelf, OrderedDict
verbose = int(verbose)
group_id = None
conn = get_ec2_connection()
groups = conn.get_all_security_groups()
for group in groups:
if verbose:
print('group:', group.name, group.id)
if group.name == name:
group_id = group.id
# Otherwise try the local cache.
if not group_id:
v = shelf.get('vm_ec2_security_group_ids', OrderedDict())
group_id = v.get(name)
if verbose:
print(group_id)
return group_id
@task_or_dryrun
def get_or_create_ec2_security_groups(names=None, verbose=1):
"""
Creates a security group opening 22, 80 and 443
"""
verbose = int(verbose)
if verbose:
print('Creating EC2 security groups...')
conn = get_ec2_connection()
if isinstance(names, six.string_types):
names = names.split(',')
names = names or env.vm_ec2_selected_security_groups
if verbose:
print('Group names:', names)
ret = []
for name in names:
try:
group_id = get_ec2_security_group_id(name)
if verbose:
print('group_id:', group_id)
#group = conn.get_all_security_groups(groupnames=[name])[0]
# Note, groups in a VPC can't be referred to by name?
group = conn.get_all_security_groups(group_ids=[group_id])[0]
except boto.exception.EC2ResponseError as e:
if verbose:
print(e)
group = get_ec2_connection().create_security_group(
name,
name,
vpc_id=env.vm_ec2_vpc_id,
)
print('group_id:', group.id)
set_ec2_security_group_id(name, group.id)
ret.append(group)
# Find existing rules.
actual_sets = set()
for rule in list(group.rules):
ip_protocol = rule.ip_protocol
from_port = rule.from_port
to_port = rule.to_port
for cidr_ip in rule.grants:
#print('Revoking:', ip_protocol, from_port, to_port, cidr_ip)
#group.revoke(ip_protocol, from_port, to_port, cidr_ip)
rule_groups = ((rule.groups and rule.groups.split(',')) or [None])
for src_group in rule_groups:
src_group = (src_group or '').strip()
if src_group:
actual_sets.add((ip_protocol, from_port, to_port, str(cidr_ip), src_group))
else:
actual_sets.add((ip_protocol, from_port, to_port, str(cidr_ip)))
# Find actual rules.
expected_sets = set()
for authorization in env.vm_ec2_available_security_groups.get(name, []):
if verbose:
print('authorization:', authorization)
if len(authorization) == 4 or (len(authorization) == 5 and not (authorization[-1] or '').strip()):
src_group = None
ip_protocol, from_port, to_port, cidr_ip = authorization[:4]
if cidr_ip:
expected_sets.add((ip_protocol, str(from_port), str(to_port), cidr_ip))
else:
ip_protocol, from_port, to_port, cidr_ip, src_group = authorization
if cidr_ip:
expected_sets.add((ip_protocol, str(from_port), str(to_port), cidr_ip, src_group))
# Calculate differences and update rules if we own the group.
if env.vm_ec2_security_group_owner:
if verbose:
print('expected_sets:')
print(expected_sets)
print('actual_sets:')
print(actual_sets)
del_sets = actual_sets.difference(expected_sets)
if verbose:
print('del_sets:')
print(del_sets)
add_sets = expected_sets.difference(actual_sets)
if verbose:
print('add_sets:')
print(add_sets)
# Revoke deleted.
for auth in del_sets:
print(len(auth))
print('revoking:', auth)
group.revoke(*auth)
# Create fresh rules.
for auth in add_sets:
print('authorizing:', auth)
group.authorize(*auth)
return ret
@task_or_dryrun
def get_or_create_ec2_key_pair(name=None, verbose=1):
"""
Creates and saves an EC2 key pair to a local PEM file.
"""
verbose = int(verbose)
name = name or env.vm_ec2_keypair_name
pem_path = 'roles/%s/%s.pem' % (env.ROLE, name)
conn = get_ec2_connection()
kp = conn.get_key_pair(name)
if kp:
print('Key pair %s already exists.' % name)
else:
# Note, we only get the private key during creation.
# If we don't save it here, it's lost forever.
kp = conn.create_key_pair(name)
open(pem_path, 'wb').write(kp.material)
os.system('chmod 600 %s' % pem_path)
print('Key pair %s created.' % name)
#return kp
return pem_path
@task_or_dryrun
def list_security_groups():
conn = get_ec2_connection()
sgs = conn.get_all_security_groups()
print('Id,Name,Number of Instances')
for sg in sorted(sgs, key=lambda o: o.name):
print('%s,%s,%s' % (sg.id, sg.name, len(sg.instances())))
def get_or_create_ec2_instance(name=None, group=None, release=None, verbose=0, backend_opts=None):
"""
Creates a new EC2 instance.
You should normally run get_or_create() instead of directly calling this.
"""
from burlap.common import shelf, OrderedDict
from boto.exception import EC2ResponseError
assert name, "A name must be specified."
backend_opts = backend_opts or {}
verbose = int(verbose)
conn = get_ec2_connection()
security_groups = get_or_create_ec2_security_groups()
security_group_ids = [_.id for _ in security_groups]
if verbose:
print('security_groups:', security_group_ids)
pem_path = get_or_create_ec2_key_pair()
assert env.vm_ec2_ami, 'No AMI specified.'
print('Creating EC2 instance from %s...' % (env.vm_ec2_ami,))
print(env.vm_ec2_zone)
opts = backend_opts.get('run_instances', {})
reservation = conn.run_instances(
env.vm_ec2_ami,
key_name=env.vm_ec2_keypair_name,
#security_groups=env.vm_ec2_selected_security_groups,#conflicts with subnet_id?!
security_group_ids=security_group_ids,
placement=env.vm_ec2_zone,
instance_type=env.vm_ec2_instance_type,
subnet_id=env.vm_ec2_subnet_id,
**opts
)
instance = reservation.instances[0]
# Name new instance.
# Note, creation is not instantious, so we may have to wait for a moment
# before we can access it.
while 1:
try:
if name:
instance.add_tag(env.vm_name_tag, name)
if group:
instance.add_tag(env.vm_group_tag, group)
if release:
instance.add_tag(env.vm_release_tag, release)
break
except EC2ResponseError as e:
#print('Unable to set tag: %s' % e)
print('Waiting for the instance to be created...')
if verbose:
print(e)
time.sleep(3)
# Assign IP.
allocation_id = None
if env.vm_ec2_use_elastic_ip:
# Initialize name/ip mapping since we can't tag elastic IPs.
shelf.setdefault('vm_elastic_ip_mappings', OrderedDict())
vm_elastic_ip_mappings = shelf.get('vm_elastic_ip_mappings')
elastic_ip = vm_elastic_ip_mappings.get(name)
if not elastic_ip:
print('Allocating new elastic IP address...')
addr = conn.allocate_address(domain=env.vm_ec2_allocate_address_domain)
#allocation_id = addr.allocation_id
#print('allocation_id:',allocation_id)
elastic_ip = addr.public_ip
print('Allocated address %s.' % elastic_ip)
vm_elastic_ip_mappings[name] = str(elastic_ip)
shelf.set('vm_elastic_ip_mappings', vm_elastic_ip_mappings)
#conn.get_all_addresses()
# Lookup allocation_id.
all_eips = conn.get_all_addresses()
for eip in all_eips:
if elastic_ip == eip.public_ip:
allocation_id = eip.allocation_id
break
print('allocation_id:', allocation_id)
while 1:
try:
conn.associate_address(
instance_id=instance.id,
#public_ip=elastic_ip,
allocation_id=allocation_id, # needed for VPC instances
)
print('IP address associated!')
break
except EC2ResponseError as e:
#print('Unable to assign IP: %s' % e)
print('Waiting to associate IP address...')
if verbose:
print(e)
time.sleep(3)
# Confirm public DNS name was assigned.
while 1:
try:
instance = get_all_ec2_instances(instance_ids=[instance.id])[0]
#assert instance.public_dns_name, 'No public DNS name found!'
if instance.public_dns_name:
break
except Exception as e:
print('error:', e)
except SystemExit as e:
print('systemexit:', e)
print('Waiting for public DNS name to be assigned...')
time.sleep(3)
# Confirm we can SSH into the server.
#TODO:better handle timeouts? try/except doesn't really work?
env.connection_attempts = 10
while 1:
try:
with settings(warn_only=True):
env.host_string = instance.public_dns_name
ret = run_or_dryrun('who -b')
#print 'ret.return_code:',ret.return_code
if not ret.return_code:
break
except Exception as e:
print('error:', e)
except SystemExit as e:
print('systemexit:', e)
print('Waiting for sshd to accept connections...')
time.sleep(3)
print("")
print("Login with: ssh -o StrictHostKeyChecking=no -i %s %s@%s" \
% (pem_path, env.user, instance.public_dns_name))
print("OR")
print("fab %(ROLE)s:hostname=%(name)s shell" % dict(name=name, ROLE=env.ROLE))
ip = socket.gethostbyname(instance.public_dns_name)
print("")
print("""Example hosts entry:)
%(ip)s www.mydomain.com # %(name)s""" % dict(ip=ip, name=name))
return instance
@task_or_dryrun
def exists(name=None, group=None, release=None, except_release=None, verbose=1):
"""
Determines if a virtual machine instance exists.
"""
verbose = int(verbose)
instances = list_instances(
name=name,
group=group,
release=release,
except_release=except_release,
verbose=verbose,
show=verbose)
ret = bool(instances)
if verbose:
print('\ninstance %s exist' % ('DOES' if ret else 'does NOT'))
#return ret
return instances
@task_or_dryrun
def get_or_create(name=None, group=None, config=None, extra=0, verbose=0, backend_opts=None):
"""
Creates a virtual machine instance.
"""
require('vm_type', 'vm_group')
backend_opts = backend_opts or {}
verbose = int(verbose)
extra = int(extra)
if config:
config_fn = common.find_template(config)
config = yaml.load(open(config_fn), Loader=yaml.SafeLoader)
env.update(config)
env.vm_type = (env.vm_type or '').lower()
assert env.vm_type, 'No VM type specified.'
group = group or env.vm_group
assert group, 'No VM group specified.'
ret = exists(name=name, group=group)
if not extra and ret:
if verbose:
print('VM %s:%s exists.' % (name, group))
return ret
today = datetime.date.today()
release = int('%i%02i%02i' % (today.year, today.month, today.day))
if not name:
existing_instances = list_instances(
group=group,
release=release,
verbose=verbose)
name = env.vm_name_template.format(index=len(existing_instances)+1)
if env.vm_type == EC2:
return get_or_create_ec2_instance(
name=name,
group=group,
release=release,
verbose=verbose,
backend_opts=backend_opts)
raise NotImplementedError
@task_or_dryrun
def delete(name=None, group=None, release=None, except_release=None,
dryrun=1, verbose=1):
"""
Permanently erase one or more VM instances from existence.
"""
verbose = int(verbose)
if env.vm_type == EC2:
conn = get_ec2_connection()
instances = list_instances(
name=name,
group=group,
release=release,
except_release=except_release,
)
for instance_name, instance_data in instances.items():
public_dns_name = instance_data['public_dns_name']
print('\nDeleting %s (%s)...' \
% (instance_name, instance_data['id']))
if not get_dryrun():
conn.terminate_instances(instance_ids=[instance_data['id']])
# Clear host key on localhost.
known_hosts = os.path.expanduser('~/.ssh/known_hosts')
cmd = 'ssh-keygen -f "%s" -R %s' % (known_hosts, public_dns_name)
local_or_dryrun(cmd)
else:
raise NotImplementedError
@task_or_dryrun
def get_name():
"""
Retrieves the instance name associated with the current host string.
"""
if env.vm_type == EC2:
for instance in get_all_running_ec2_instances():
if env.host_string == instance.public_dns_name:
name = instance.tags.get(env.vm_name_tag)
return name
else:
raise NotImplementedError
@task_or_dryrun
def respawn(name=None, group=None):
"""
Deletes and recreates one or more VM instances.
"""
if name is None:
name = get_name()
delete(name=name, group=group)
instance = get_or_create(name=name, group=group)
env.host_string = instance.public_dns_name
@task_or_dryrun
def shutdown(force=False):
#virsh shutdown <name>
#virsh destroy <name> #to force
raise NotImplementedError
@task_or_dryrun
def reboot():
#virsh reboot <name>
raise NotImplementedError
@task_or_dryrun
@runs_once
def list_ips():
data = list_instances(show=0, verbose=0)
for key, attrs in data.items():
print(attrs.get('ip'), key)
| {
"content_hash": "0671701ec679fe70d40040cd66acceb7",
"timestamp": "",
"source": "github",
"line_count": 671,
"max_line_length": 136,
"avg_line_length": 33.41132637853949,
"alnum_prop": 0.5809804183951113,
"repo_name": "chrisspen/burlap",
"id": "8f698b71a7e0fbb98febe3aed1a7cc36eec55854",
"size": "22419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "burlap/vm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "722479"
},
{
"name": "Shell",
"bytes": "11659"
}
],
"symlink_target": ""
} |
import numpy as np
from acoustics import Signal
import pytest
import tempfile
import itertools as it
from acoustics.signal import EqualBand
#def test_operator():
#n = 10000
#fs = 5000
class test_wav():
"""Test writing to and reading from wav file."""
duration = 5.0
fs = 10025
samples = int(fs*duration)
channels = 3
signal = Signal(np.random.randn(channels, samples), fs)
with tempfile.TemporaryFile() as file:
signal.to_wav(file)
signal = Signal.from_wav(file)
assert signal.samples == samples
assert signal.fs == fs
assert signal.channels == channels
class TestSignal():
# (channels, samples, sample rate)
@pytest.fixture(params=[(1, 88200, 22050), (3, 88200, 22050), (3, 88200, 44100)])
def signal(self, request):
return Signal(np.random.randn(request.param[0], request.param[1]), request.param[2])
def test_samples(self, signal):
x = signal.samples
def test_calibrate_to_scalar(self, signal):
# Scalar decibel
signal.calibrate_to(100.0)
signal.copy().calibrate_to(100.0, inplace=True)
def test_calibrate_to_channels(self, signal):
# Value per channel. Note that [...,None] is required!
signal.calibrate_to((np.ones(signal.channels)*100.0)[...,None])
signal.copy().calibrate_to((np.ones(signal.channels)*100.0)[...,None], inplace=True)
def test_calibrate_to_samples(self, signal):
# Value per samples
signal.calibrate_to(np.ones(signal.samples))
signal.copy().calibrate_to(np.ones(signal.samples), inplace=True)
def test_calibrate_to_samples_channels(self, signal):
# Value per sample per channel
signal.calibrate_to(np.ones(signal.shape))
signal.copy().calibrate_to(np.ones(signal.shape), inplace=True)
def test_calibrate_with(self, signal):
calibration_signal_level = 50.0
decibel = 94.0
calibration_signal = Signal(np.random.randn(signal.samples), signal.fs).calibrate_to(calibration_signal_level)
out = signal.calibrate_with(calibration_signal, decibel)
assert ( (out.leq() - signal.leq()).mean() - (decibel - calibration_signal_level) ) < 0.01
def test_channels(self, signal):
x = signal.channels
def test_duration(self, signal):
x = signal.duration
def test_decimate(self, signal):
factor = 4
decimated = signal.decimate(factor)
assert (signal.fs / factor == decimated.fs)
def test_upsample(self, signal):
factor = 2
assert (signal.upsample(factor).fs / signal.fs) == factor
def test_gain_scalar(self, signal):
gain = +20.0
# `.all()` because of multichannel signals
assert ( np.abs( signal.gain(gain).leq() - (signal.leq() + gain) ) < 0.01 ).all()
assert ( np.abs( signal.copy().gain(gain, inplace=True).leq() - (signal.leq()+gain) ) < 0.01 ).all()
def test_pick(self, signal):
x = signal.pick(signal.duration*0.1, signal.duration*0.6)
def test_times(self, signal):
times = signal.times()
def test_energy(self, signal):
energy = signal.energy()
def test_power(self, signal):
power = signal.power()
def test_ms(self, signal):
ms = signal.ms()
def test_rms(self, signal):
rms = signal.rms()
def test_correlate(self, signal):
signal = signal[..., 0:100]
if signal.channels > 1: # Multichannel is not supported
with pytest.raises(ValueError):
assert((signal.correlate()==signal.correlate(signal)).all())
else:
assert((signal.correlate()==signal.correlate(signal)).all())
def test_amplitude_envelope(self, signal):
x = signal.amplitude_envelope()
def test_instantaneous_frequency(self, signal):
x = signal.instantaneous_frequency()
def test_instantaneous_phase(self, signal):
x = signal.instantaneous_phase()
def test_detrend(self, signal):
x = signal.detrend()
def test_unwrap(self, signal):
x = signal.unwrap()
def test_complex_cepstrum(self, signal):
t, c, d = signal.complex_cepstrum()
def test_real_cepstrum(self, signal):
t, c = signal.real_cepstrum()
def test_power_spectrum(self, signal):
freq, power = signal.power_spectrum()
def test_phase_spectrum(self, signal):
freq, phase = signal.phase_spectrum()
def test_peak(self, signal):
value = signal.peak()
assert len(value) == signal.channels
def test_peak_level(self, signal):
value = signal.peak_level()
assert len(value) == signal.channels
def test_sound_exposure(self, signal):
value = signal.sound_exposure()
assert len(value) == signal.channels
def test_sound_exposure_level(self, signal):
value = signal.sound_exposure_level()
assert len(value) == signal.channels
def test_octaves(self, signal):
freq, octaves = signal.octaves()
def test_levels(self, signal):
times, levels = signal.levels()
def test_leq(self, signal):
#s = Signal(np.random.randn(10000), 22050)
leq = signal.leq()
assert(type(leq) is np.ndarray)
def test_bandpass(self, signal):
x = signal.bandpass(1000.0, 2000.0)
def test_bandstop(self, signal):
x = signal.bandstop(1000.0, 2000.0)
def test_highpass(self, signal):
x = signal.highpass(1000.0)
def test_lowpass(self, signal):
x = signal.lowpass(1000.0)
def test_octavepass(self, signal):
x = signal.octavepass(1000.0, fraction=6)
def test_bandpass_frequencies(self, signal):
f = EqualBand(center=[100.,200.,300.], bandwidth=20.)
f, x = signal.bandpass_frequencies(f)
def test_bandpass_octaves(self, signal):
f, x = signal.octaves()
def test_bandpass_third_octaves(self, signal):
f, x = signal.third_octaves()
def test_bandpass_fractional_octaves(self, signal):
f, x = signal.fractional_octaves()
def test_weigh(self, signal):
s = signal.weigh()
s = signal.weigh('C')
s = signal.weigh('A', zero_phase=True)
## Plot methods with arguments to test.
#plot_methods = {'plot' : None,
#'plot_levels' : {
#'time' : [None, 0.125, 1.0],
#'method' : ['average', 'weighting'],
#},
#'plot_octaves' : None,
#'plot_third_octaves' : None,
#'plot_fractional_octaves' : {
#'fraction' : [3, 6]
#},
#'plot_spectrum' : {
#'N' : [None, 8000]
#},
#}
#@pytest.yield_fixture
#def plot_function_with_argument(self):
## This won't work with pytest. Apparently they do teardown after the yield
## statement and therefore don't support multiple yield statements.
## Using a closure doesn't help either.
#for func, arguments in self.plot_methods.items():
#if arguments is not None:
#for prod in it.product(*arguments.values()):
#yield (func, dict(zip(arguments.keys(), prod)))
#else:
#yield (func, None)
#def test_plot_functions(self, signal, plot_function_with_argument):
#func, arguments = plot_function_with_argument
#if arguments is None:
#getattr(signal, func)()
#else:
#getattr(signal, func)(**arguments)
def test_plot(self, signal):
signal.plot()
def test_plot_levels(self, signal):
signal.plot_levels()
signal.plot_levels(method='average', time=1.0)
signal.plot_levels(method='weighting', time=1.0)
def test_plot_octaves(self, signal):
signal.plot_octaves()
def test_plot_third_octaves(self, signal):
signal.plot_third_octaves()
def test_plot_fractional_octaves(self, signal):
signal.plot_fractional_octaves(3)
signal.plot_fractional_octaves(6)
signal.plot_fractional_octaves(9)
def test_plot_power_spectrum(self, signal):
signal.plot_power_spectrum()
def test_plot_phase_spectrum(self, signal):
signal.plot_phase_spectrum()
def test_spectrogram(self, signal):
if signal.channels > 1:
with pytest.raises(ValueError):
signal.spectrogram()
else:
try:
signal.spectrogram()
except NotImplementedError: # easy way to skip mpl 1.3.1 specgram mode issue
pass
def test_pickling(self, signal):
import pickle
p = pickle.dumps(signal)
obj = pickle.loads(p)
assert((obj==signal).all())
assert(obj.fs==signal.fs)
assert(type(obj) is type(signal))
| {
"content_hash": "abf3deab88c2f9bc88558edca1285848",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 118,
"avg_line_length": 30.636942675159236,
"alnum_prop": 0.5562370062370062,
"repo_name": "FRidh/python-acoustics",
"id": "0716279da52e8426167ba86ceaa7c55038194b66",
"size": "9620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test__signal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "300162"
}
],
"symlink_target": ""
} |
"""\
This breaks the files in INPUT_DIR into chunks of CHUNK_SIZE, windowed by
OFFSET.
NB: OUTPUT_DIR will be destroyed and recreated."""
import argparse
import os
import shutil
import sys
def tokenize(text):
return text.split()
def explode_chunks(chunk_size, offset, input_file):
with open(input_file) as f:
tokens = tokenize(f.read())
indexes = range(len(tokens))
for start in indexes[::offset]:
end = start + chunk_size
yield ' '.join(tokens[start:end])
def parse_args(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'input_dir', type=str, default='dialogs', metavar='INPUT_DIR',
help='The directory to read for input files.',
)
parser.add_argument(
'output_dir', type=str, default='chunks', metavar='OUTPUT_DIR',
help='The directory to put the output chunks into.',
)
parser.add_argument(
'chunk_size', type=int, default=500, metavar='CHUNK_SIZE',
help='The size of the chunks to create, in rough tokens '
'(default=500).',
)
parser.add_argument(
'offset', type=int, default=250, metavar='OFFSET',
help='The offset for each windowed chunk (default=250).',
)
args = parser.parse_args()
return args
def main(argv=None):
opts = parse_args(argv or sys.argv[1:])
input_dir = os.path.abspath(opts.input_dir)
output_dir = os.path.abspath(opts.output_dir)
if os.path.exists(output_dir):
print('deleting {0}...'.format(output_dir))
shutil.rmtree(opts.output_dir)
for (root, _, files) in os.walk(input_dir):
for fn in files:
fullfn = os.path.join(root, fn)
shared = os.path.commonprefix([input_dir, fullfn])
rest = fullfn[len(shared)+1:]
(base, _) = os.path.splitext(rest)
base = os.path.join(output_dir, base)
chunks = explode_chunks(
opts.chunk_size,
opts.offset,
fullfn,
)
print(fullfn)
os.makedirs(base)
for (i, chunk) in enumerate(chunks):
outfile = os.path.join(base, 'chunk-%04d.txt' % (i,))
print("\t=> {0}".format(outfile))
with open(outfile, 'w') as f:
f.write(chunk)
print('done.')
if __name__ == '__main__':
main()
| {
"content_hash": "c47df1884d42832760b26ad29890d0e0",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 75,
"avg_line_length": 30.178571428571427,
"alnum_prop": 0.5495069033530572,
"repo_name": "erochest/greek",
"id": "1b11ca3cd8edfdf54d80d3c7e076bf921e081ac7",
"size": "2559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/chunk_dir.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Haskell",
"bytes": "22992"
},
{
"name": "Python",
"bytes": "4412"
}
],
"symlink_target": ""
} |
from sleeps import *
from sleepergroups import *
from static import *
from django.template import RequestContext
from django.template.loader import render_to_string
from django.http import *
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, render_to_response
from django.core import serializers
from django.db.models import Q
from django.core.exceptions import *
from django.utils.timezone import now
from django.utils.decorators import method_decorator
from django.views.generic import CreateView
from django.core.cache import cache
from sleep.models import *
from sleep.forms import *
import datetime
import pytz
import csv
MAX_LEADERBOARD_SIZE = 10
@login_required
def graph(request):
return render_to_response('graph.html', {"user": request.user, "sleeps": request.user.sleep_set.all().order_by('-end_time')}, context_instance=RequestContext(request))
class CreateGroup(CreateView):
model = SleeperGroup
template_name = 'create_group.html'
fields = ['name', 'privacy', 'description']
def form_valid(self, form):
response = super(CreateGroup, self).form_valid(form)
Membership(
user=self.request.user,
group=form.instance,
privacy=self.request.user.sleeperprofile.privacyLoggedIn,
role=Membership.ADMIN).save()
return response
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(CreateView, self).dispatch(*args, **kwargs)
@login_required
def acceptInvite(request):
if 'id' in request.POST and 'accepted' in request.POST:
invites = GroupInvite.objects.filter(id=request.POST['id'],accepted=None)
if len(invites)!=1:
raise Http404
invite = invites[0]
if request.user.id is not invite.user_id:
raise PermissionDenied
if request.POST['accepted']=="True":
invite.accept()
else:
invite.reject()
return HttpResponse('')
else:
return HttpResponseBadRequest('')
@login_required
def inviteMember(request):
if 'group' in request.POST and 'user' in request.POST:
gid = request.POST['group']
uid = request.POST['user']
gs = SleeperGroup.objects.filter(id=gid)
if len(gs)!=1 or request.user not in gs[0].members.all():
raise Http404
us = Sleeper.objects.filter(id=uid)
if len(us)!=1:
raise Http404
g=gs[0]
u=us[0]
rs = GroupRequest.objects.filter(user = u, group = g, accepted=None)
if rs.count() >= 1: #the user has made a request to join, accept them.
rs[0].accept()
else:
g.invite(u,request.user)
return HttpResponse('')
else:
return HttpResponseBadRequest('')
@login_required
def manageMember(request):
if 'group' in request.POST and 'user' in request.POST:
gid = request.POST['group']
uid = request.POST['user']
gs = SleeperGroup.objects.filter(id=gid)
if len(gs)!=1 or request.user not in gs[0].members.all():
raise Http404
us = Sleeper.objects.filter(id=uid)
if len(us)!=1:
raise Http404
g=gs[0]
u=us[0]
if not (request.user.pk == u.pk):
ms = Membership.objects.filter(user=request.user, group=g)
if ms.count() != 1: raise Http404
m = ms[0]
if m.role < m.ADMIN: raise PermissionDenied
if 'action' in request.POST and request.POST["action"] == "remove":
for m in Membership.objects.filter(user=u,group=g):
r = m.removeMember()
if r == "redirect": return HttpResponseRedirect("/groups")
return HttpResponse('')
if 'action' in request.POST and request.POST["action"] == "makeAdmin":
for m in Membership.objects.filter(user=u,group=g):
m.makeAdmin()
return HttpResponse('')
if 'action' in request.POST and request.POST["action"] == "removeAdmin":
for m in Membership.objects.filter(user=u, group=g):
try:
m.makeMember()
except ValueError:
return HttpResponseBadRequest('')
return HttpResponse('')
else:
return HttpResponseBadRequest('')
@login_required
def groupRequest(request):
if 'group' in request.POST:
gid = request.POST['group']
gs = SleeperGroup.objects.filter(id=gid)
if gs.count() != 1: raise Http404
g = gs[0]
if g.privacy < g.REQUEST: raise PermissionDenied
if g.privacy >= g.PUBLIC: # it's a public group, allow user to join
m = Membership(user=request.user, group=g, privacy = request.user.sleeperprofile.privacyLoggedIn)
m.save()
invites = GroupInvite.objects.filter(user=request.user, group=g, accepted = None)
if invites.count() >= 1: # the user has already been invited, accept them.
invites[0].accept()
else:
g.request(request.user)
return HttpResponse('')
else:
return HttpResponseBadRequest('')
@login_required
def groupJoin(request):
if 'group' in request.POST:
gid = request.POST['group']
gs = SleeperGroup.objects.filter(id=gid)
if gs.count() != 1: raise Http404
g = gs[0]
if g.privacy < SleeperGroup.PUBLIC: raise PermissionDenied
m = Membership(user = request.user, group = g, privacy = request.user.sleeperprofile.privacyLoggedIn)
m.save()
return HttpResponse('')
else:
return HttpResponseBadRequest('')
@login_required
def processRequest(request):
if 'id' in request.POST:
rs = GroupRequest.objects.filter(id=request.POST["id"])
if rs.count() != 1: raise Http404
r = rs[0]
m = Membership.objects.get(group=r.group, user=request.user)
if m.role < m.ADMIN: raise PermissionDenied
if "accepted" in request.POST:
if request.POST["accepted"] == "True":
r.accept()
elif request.POST["accepted"] == "False":
r.reject()
return HttpResponse('')
return HttpResponseBadRequest('')
else:
return HttpResponseBadRequest('')
@login_required
def manageGroup(request,gid):
gs=SleeperGroup.objects.filter(id=gid)
if len(gs)!=1:
raise Http404
g=gs[0]
if request.user not in g.members.all():
raise PermissionDenied
context={
'group':g,
'isAdmin': (request.user.membership_set.get(group = g).role >= 50),
}
m = request.user.membership_set.get(group = g)
if request.method == 'POST' and "SleeperSearchForm" in request.POST:
searchForm=SleeperSearchForm(request.POST)
if searchForm.is_valid():
us=User.objects.filter(username__icontains=searchForm.cleaned_data['username']).exclude(sleepergroups__id=g.id)
context['results']=us
context['showResults'] = True
context['count']=us.count()
else:
searchForm = SleeperSearchForm()
if request.method == 'POST' and "GroupForm" in request.POST:
if context['isAdmin'] == False:
raise PermissionDenied
groupForm = GroupForm(request.POST, instance=g)
if groupForm.is_valid():
if 'delete' in groupForm.data and groupForm.data['delete'] == 'on':
g.delete()
return HttpResponseRedirect('/groups/')
groupForm.save()
else:
context['page'] = 2
else:
groupForm = GroupForm(instance=g)
if request.method == 'POST' and "MembershipForm" in request.POST:
membershipForm = MembershipForm(request.POST, instance=m)
if membershipForm.is_valid():
membershipForm.save()
else:
membershipForm = MembershipForm(instance=m)
context['searchForm']=searchForm
context['groupForm']=groupForm
context['membershipForm'] = membershipForm
context['members']=g.members.all()
if context['isAdmin']:
context['requests'] = g.grouprequest_set.filter(accepted=None)
if 'page' not in context and context['requests'].count() > 0: context['page'] = 3
return render_to_response('manage_group.html',context,context_instance=RequestContext(request))
def leaderboard(request,group_id=None):
if request.user.is_authenticated():
user_metrics = request.user.sleeperprofile.metrics.all()
else:
user_metrics = Metric.objects.filter(show_by_default=True)
if 'sort' not in request.GET or request.GET['sort'] not in [m.name for m in user_metrics]:
sort_by = 'zScore'
else:
sort_by = request.GET['sort']
if group_id is None:
board_size = MAX_LEADERBOARD_SIZE
group = None
else:
try:
group = SleeperGroup.objects.get(id=group_id)
except SleeperGroup.DoesNotExist:
raise Http404
if request.user not in group.members.all():
raise PermissionDenied
num_members = group.members.count()
# don't show bottom half of leaderboard for sufficiently large groups:
# we don't want to encourage bad sleep behavior
board_size = num_members if num_members < 4 else min(MAX_LEADERBOARD_SIZE, num_members//2)
ss = Sleeper.objects.sorted_sleepers(sortBy=sort_by,user=request.user,group=group)
top = [ s for s in ss if s['rank'] <= board_size or request.user.is_authenticated() and s['user'].pk==request.user.pk ]
numLeaderboard = len([s for s in ss if s['rank']!='n/a'])
n = now()
try:
recent_winner = Sleeper.objects.bestByTime(start=n-datetime.timedelta(3),end=n,user=request.user,group=group)[0]
except IndexError:
return HttpResponseBadRequest("Can't load leaderboard if there are no users")
if group:
allUsers = group.members.all()
else:
allUsers = Sleeper.objects.all()
number = allUsers.filter(sleep__isnull=False).distinct().count()
context = {
'group' : group,
'top' : top,
'recentWinner' : recent_winner,
'total' : Sleep.objects.totalSleep(group=group),
'number' : number,
'numLeaderboard' : numLeaderboard,
'leaderboard_valid' : len(ss),
'userMetrics' : user_metrics
}
return render_to_response('leaderboard.html',context,context_instance=RequestContext(request))
def graphs(request,group=None):
if group is not None:
gs = SleeperGroup.objects.filter(id=group)
if gs.count()!=1:
raise Http404
group = gs[0]
if request.user not in group.members.all():
raise PermissionDenied
return render_to_response('graphs.html',{'group': group},context_instance=RequestContext(request))
def creep(request,username=None):
if not username:
if request.user.is_anonymous():
creepable=Sleeper.objects.filter(sleeperprofile__privacy__gte=SleeperProfile.PRIVACY_STATS)
followed = []
else:
creepable=Sleeper.objects.filter(
Q(sleeperprofile__privacyLoggedIn__gte=SleeperProfile.PRIVACY_STATS) |
(
Q(sleeperprofile__privacyFriends__gte=SleeperProfile.PRIVACY_STATS) &
Q(sleeperprofile__friends=request.user)
)
)
followed = request.user.sleeperprofile.follows.order_by('username')
total=creepable.distinct().count()
if request.method == 'POST':
form=SleeperSearchForm(request.POST)
if form.is_valid():
users = creepable.filter(username__icontains=form.cleaned_data['username']).distinct()
count = users.count()
if count==1: return HttpResponseRedirect('/creep/%s/' % users[0].username)
else:
context = {
'results' : users,
'count' : count,
'form' : form,
'new' : False,
'total' : total,
'followed' : followed,
}
return render_to_response('creepsearch.html',context,context_instance=RequestContext(request))
else:
form = SleeperSearchForm()
context = {
'form' : form,
'new' : True,
'total' : total,
'followed' : followed,
}
return render_to_response('creepsearch.html',context,context_instance=RequestContext(request))
else:
context = {}
try:
user=Sleeper.objects.get(username=username)
p = user.sleeperprofile
if p.user_id == request.user.id and "as" in request.GET:
priv = p.checkPermissions(request.GET['as'])
else:
priv = p.getPermissions(request.user)
if not(request.user.is_anonymous()) and request.user.pk == user.pk: context["isself"] =True
if priv<=p.PRIVACY_NORMAL: return render_to_response('creepfailed.html',{},context_instance=RequestContext(request))
except:
return render_to_response('creepfailed.html',{},context_instance=RequestContext(request))
context.update({'user' : user,'global' : user.decayStats()})
if priv>=p.PRIVACY_PUBLIC: context['sleeps']=user.sleep_set.all().order_by('-end_time')
if priv>=p.PRIVACY_GRAPHS:
if "type" in request.GET and request.GET["type"] == "graph": return render_to_response('graph.html',context,context_instance=RequestContext(request))
context["graphs"] = True
return render_to_response('creep.html',context,context_instance=RequestContext(request))
@login_required
def editProfile(request):
p = request.user.sleeperprofile
if p.use12HourTime: fmt = "%I:%M %p"
else: fmt = "%H:%M"
if request.method == 'POST':
form = SleeperProfileForm(fmt, request.POST, instance=p)
context = {"form":form}
if form.is_valid():
form.save()
return HttpResponseRedirect('/editprofile/?success=True')
else:
for k in form.errors.viewkeys():
if "ideal" in k:
context["page"] = 2
break
else:
initial = {"idealWakeupWeekend": p.idealWakeupWeekend.strftime(fmt),
"idealWakeupWeekday": p.idealWakeupWeekday.strftime(fmt),
"idealSleepTimeWeekend": p.idealSleepTimeWeekend.strftime(fmt),
"idealSleepTimeWeekday": p.idealSleepTimeWeekday.strftime(fmt),}
form = SleeperProfileForm(fmt, instance=p, initial = initial)
context = {"form":form}
if "success" in request.GET and request.GET["success"] == "True": context["success"] = True
return render_to_response('editprofile.html', context ,context_instance=RequestContext(request))
@login_required
def exportSleeps(request):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="zscore_sleeps_' + request.user.username + '.csv"'
writer = csv.writer(response)
writer.writerow(["Start Time", "End Time", "Date", "Comments", "Timezone", "Quality"])
for s in request.user.sleep_set.all():
writer.writerow([s.start_local_time(), s.end_local_time(), s.date, s.comments, s.timezone, s.quality])
return response
@login_required
def friends(request):
prof = request.user.sleeperprofile
friendfollow = (prof.friends.all() | prof.follows.all()).distinct().order_by('username').select_related('sleeperprofile').prefetch_related('sleeperprofile__friends','sleeperprofile__follows')
requests = request.user.requests.filter(friendrequest__accepted=None).order_by('user__username')
if request.method == 'POST':
form=SleeperSearchForm(request.POST)
if form.is_valid():
users = User.objects.filter(username__icontains=form.cleaned_data['username']).exclude(pk=request.user.pk).distinct()
count = users.count()
context = {
'results' : users,
'count' : count,
'form' : form,
'new' : False,
'friendfollow' : friendfollow,
'requests' : requests,
}
return render_to_response('friends.html',context,context_instance=RequestContext(request))
else:
form = SleeperSearchForm()
context = {
'form' : form,
'new' : True,
'friendfollow' : friendfollow,
'requests' : requests,
}
return render_to_response('friends.html',context,context_instance=RequestContext(request))
@login_required
def requestFriend(request):
if 'id' in request.POST:
i = request.POST['id']
if i==request.user.pk or len(User.objects.filter(pk=i))!=1:
raise Http404
them = Sleeper.objects.get(pk=i)
if not FriendRequest.objects.filter(requestor=request.user.sleeperprofile,requestee=them):
if request.user in them.sleeperprofile.friends.all():
accept = True
else:
accept = None
FriendRequest.objects.create(requestor=request.user.sleeperprofile,requestee=them,accepted=accept)
return HttpResponse('')
else:
return HttpResponseBadRequest('')
@login_required
def hideRequest(request):
if 'id' in request.POST:
i = request.POST['id']
if i==request.user.pk or len(User.objects.filter(pk=i))!=1:
raise Http404
frs = FriendRequest.objects.filter(requestor__user__pk=i,requestee=request.user)
for fr in frs:
fr.accepted=False
fr.save()
return HttpResponse('')
else:
return HttpResponseBadRequest('')
@login_required
def addFriend(request):
if 'id' in request.POST:
i = request.POST['id']
if i==request.user.pk or len(User.objects.filter(pk=i))!=1:
raise Http404
prof = request.user.sleeperprofile
prof.friends.add(i)
prof.save()
frs = FriendRequest.objects.filter(requestor__user__pk=i,requestee=request.user)
for fr in frs:
fr.accepted=True
fr.save()
return HttpResponse('')
else:
return HttpResponseBadRequest('')
@login_required
def removeFriend(request):
if 'id' in request.POST:
i = request.POST['id']
if i==request.user.pk or len(User.objects.filter(pk=i))!=1:
raise Http404
prof = request.user.sleeperprofile
prof.friends.remove(i)
return HttpResponse('')
else:
return HttpResponseBadRequest('')
@login_required
def follow(request):
if 'id' in request.POST:
i = request.POST['id']
if i==request.user.pk or len(User.objects.filter(pk=i))!=1:
raise Http404
prof = request.user.sleeperprofile
prof.follows.add(i)
prof.save()
return HttpResponse('')
else:
return HttpResponseBadRequest('')
@login_required
def unfollow(request):
if 'id' in request.POST:
i = request.POST['id']
if i==request.user.pk or len(User.objects.filter(pk=i))!=1:
raise Http404
prof = request.user.sleeperprofile
prof.follows.remove(i)
return HttpResponse('')
else:
return HttpResponseBadRequest('')
@login_required
def createSleep(request):
# Date-ify start, end, and center
timezone = pytz.timezone(request.POST['timezone'])
start = datetime.datetime(*(map(int, request.POST.getlist("start[]"))))
start=timezone.localize(start)
end = datetime.datetime(*(map(int, request.POST.getlist("end[]"))))
end=timezone.localize(end)
date = datetime.date(*(map(int, request.POST.getlist("date[]"))[:3]))
# Pull out comments
if "comments" in request.POST:
comments = request.POST["comments"]
else:
comments = ""
# Create the Sleep instance
if start > end: start,end = end, start #if end is after start, flip them
s = Sleep(user=request.user, start_time=start, end_time=end, comments=comments, date=date,timezone=timezone)
try:
s.validate_unique()
s.save()
except ValidationError:
return HttpResponseBadRequest('')
return HttpResponse('')
@login_required
def createPartialSleep(request):
created = PartialSleep.create_new_for_user(request.user)
if created:
return HttpResponseRedirect("/mysleep/")
else:
return HttpResponseBadRequest("")
@login_required
def finishPartialSleep(request):
try:
s = PartialSleep.finish_for_user(request.user)
return HttpResponseRedirect("/sleep/edit/" + str(s.pk) + "/?from=partial")
except ValidationError:
return HttpResponseRedirect("/sleep/simple/?error=partial")
except PartialSleep.DoesNotExist:
return HttpResponseBadRequest("")
@login_required
def deletePartialSleep(request):
try:
p= request.user.partialsleep
p.delete()
if "next" in request.GET: return HttpResponseRedirect(request.GET["next"])
return HttpResponseRedirect("/")
except PartialSleep.DoesNotExist:
return HttpResponseBadRequest('')
@login_required
def deleteSleep(request):
if 'id' in request.POST:
i = request.POST['id']
s = Sleep.objects.filter(pk=i)
if len(s) == 0:
raise Http404
s = s[0]
if s.user != request.user:
raise PermissionDenied
s.delete()
return HttpResponse('')
return HttpResponseBadRequest('')
@login_required
def deleteAllnighter(request):
if 'id' in request.POST:
i = request.POST['id']
a = Allnighter.objects.filter(pk=i)
if len(a) == 0: raise Http404
a = a[0]
if a.user != request.user: raise PermissionDenied
a.delete()
return HttpResponse('')
return HttpResponseBadRequest('')
@login_required
def getSleepsJSON(request):
u = request.user
sleeps = list(Sleep.objects.filter(user=u))
for sleep in sleeps:
tz = pytz.timezone(sleep.timezone)
#warning: the following is kind of hacky but it's better than dealing with the timezones in JS. JS doesn't understand timezones, so we convert the timezone server-side, then pass it through to JS without telling the JS what timezone it's in. JS interprets it as local time, which is slightly incorrect but works since all we want to do is get the hours/minutes/seconds back out as local time.
sleep.start_time=sleep.start_time.astimezone(tz).replace(tzinfo=None)
sleep.end_time=sleep.end_time.astimezone(tz).replace(tzinfo=None)
data = serializers.serialize('json', sleeps)
return HttpResponse(data, content_type='application/json')
| {
"content_hash": "ccea27594c9a51b3c1b00a148592359b",
"timestamp": "",
"source": "github",
"line_count": 592,
"max_line_length": 402,
"avg_line_length": 39.229729729729726,
"alnum_prop": 0.6140199793317258,
"repo_name": "sleepers-anonymous/zscore",
"id": "531dc3d46a0158200f3a4e2789358fe60970f7dc",
"size": "23224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sleep/views/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10431"
},
{
"name": "HTML",
"bytes": "50744"
},
{
"name": "JavaScript",
"bytes": "38737"
},
{
"name": "Makefile",
"bytes": "1346"
},
{
"name": "Python",
"bytes": "249964"
},
{
"name": "Shell",
"bytes": "414"
}
],
"symlink_target": ""
} |
__author__ = 'Janez Stupar'
from tastypie import resources, authorization, authentication, fields
from polls import models as poll_models
class PollResource(resources.ModelResource):
choices = fields.ToManyField('tp_demo.api.polls_api.ChoiceResource', 'choices',null=True, blank=True, full=True)
class Meta:
queryset= poll_models.Poll.objects.all()
resource_name = 'poll_resource'
authentication = authentication.Authentication()
authorization = authorization.Authorization()
# custom_meta_option = object() # add a custom meta option
list_allowed_methods = ['get','post']
detail_allowed_methods = ['put','get','delete']
fields = ['question','pub_date']
# filtering = {'pub_date':['exact']} # Filter entries
limit = 20
## Note that there are many other meta options, so you should refer to the documentation
def dispatch(self, request_type, request, **kwargs):
# You might wnat to override this method to initialize custom meta options
# This is the point from which you will start if you want to assert
# total control of the request/response, but still want some tastypie candy
return super(PollResource,self).dispatch(request_type, request, **kwargs)
def dispatch_detail(self, request, **kwargs):
# This one is actually a wrapper for dispatch, for the purpose of
# handling of detail request on a resource (e.g. /api/poll/10/ )
return super(PollResource,self).dispatch_detail(request,**kwargs)
def get_resource_uri(self, bundle_or_obj):
# override to change the way specific resource uri is generated
return super(PollResource,self).get_resource_uri(bundle_or_obj)
def get_resource_list_uri(self):
# override to change the way resource_list_uri is generated
return super(PollResource,self).get_resource_list_uri()
# Note that there are other method available
def get_list(self, request, **kwargs):
## You might override this method to override HTTP GET request behavior
# on list endpoint of a resource (e.g. /api/poll/ )
return super(PollResource,self).get_list(request, **kwargs)
def post_list(self, request, **kwargs):
return super(PollResource, self).post_list(request, **kwargs)
def obj_get(self, request=None, **kwargs):
return super(PollResource,self).obj_get(request, **kwargs)
def obj_create(self, bundle, request=None, **kwargs):
return super(PollResource,self).obj_create(bundle, request, **kwargs)
def obj_delete(self, request=None, **kwargs):
return super(PollResource,self).obj_delete(request, **kwargs)
def build_filters(self, filters=None):
# You might override this method to change the behavior of default filtering
# mechanism. (e.g.: You want to do complex lookups using Q objects)
return super(PollResource, self).build_filters(filters)
def apply_filters(self, request, applicable_filters):
# You might override this method if you want to override the way
# filters are aplied. (e.g.: you built your filtering around Q objects
# in the build_filters method
return super(PollResource,self).apply_filters(request, applicable_filters)
def hydrate(self, bundle):
# You will implement this method to clean the data from the
# request that doesn't belong to the model or compute some data
# that is required on the model respectively.
return super(PollResource,self).hydrate(bundle)
def dehydrate(self, bundle):
# You will implement this method to prepare the data for
# serialization. You might want to remove unnecessary data
# or add some other data, you might desire
return super(PollResource,self).dehydrate(bundle)
class ChoiceResource(resources.ModelResource):
poll = fields.ForeignKey(PollResource, 'poll')
class Meta:
queryset = poll_models.Choice.objects.all()
resource_name = 'choice_resource'
authentication = authentication.Authentication()
authorization = authorization.Authorization()
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['put','get','delete']
fields = ['choice','votes'] | {
"content_hash": "6c05034c0ae6da9e4cc0b14c906725fd",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 116,
"avg_line_length": 47.05434782608695,
"alnum_prop": 0.6830676830676831,
"repo_name": "JanezStupar/tastypie_demo",
"id": "0551c51a7e5fa7749560e0c9ce10f500a31ff05f",
"size": "4329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tp_demo/api/polls_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "192837"
},
{
"name": "Python",
"bytes": "15994"
}
],
"symlink_target": ""
} |
"""Tests for `ninjadog` package."""
# TODO: test raises ValueError when pug cli can't be found and not passed explicitly to renderer
def test_npm_installed():
from subprocess import Popen
assert Popen(('which', 'npm')).wait() == 0, 'npm must be installed'
def test_pug_cli_exists():
from pathlib import Path
from ninjadog.constants import PUG_CLI_PATH
assert Path(PUG_CLI_PATH).exists()
def test_hello_world():
from ninjadog import render
assert render('h1 hello world') == '<h1>hello world</h1>'
def test_pug_variable():
from ninjadog import render
assert render('h1= title', context={'title': 'hello world'}) == '<h1>hello world</h1>'
def test_jinja2_variable():
from ninjadog import render
assert render('h1 {{ title }}', context={'title': 'hello world'}, with_jinja=True) == '<h1>hello world</h1>'
def test_context():
import ninjadog
context = {'name': 'Derp'}
assert ninjadog.render('h1 hello #{ name }', context=context) == '<h1>hello Derp</h1>'
assert ninjadog.render("h1= name", context=context) == '<h1>Derp</h1>'
def test_conditional():
from textwrap import dedent
import ninjadog
string = dedent("""
if name == 'sam'
h1 hello #{ name }
""")
assert ninjadog.render(string, context={'name': 'sam'}) == '<h1>hello sam</h1>'
string = dedent("""
if person.name == 'sam'
h1 hello #{ person.name }
""")
assert ninjadog.render(string, context={'person': {'name': 'sam'}}) == '<h1>hello sam</h1>'
def test_render_no_string_argument():
from tempfile import NamedTemporaryFile
import ninjadog
string = 'h1 hello'
with NamedTemporaryFile('w+') as tempfile:
tempfile.write(string)
tempfile.seek(0)
assert ninjadog.render(file=tempfile.name) == ninjadog.render(string) == '<h1>hello</h1>'
def test_with_pug_with_jinja2():
from textwrap import dedent
from ninjadog import render
string = dedent("""
if person.name == "Bob"
h1 Hello Bob
else
h1 My name is #{ person.name }
p The persons's uppercase name is {{ person.get('name').upper() }}
p The person's name is #{ person.name }
if animal
h1 This should not output
else
p animal value is false
""").strip()
context = {'person': {'name': 'Bob'}, 'animal': None}
expected_output = dedent("""
<h1>Hello Bob</h1>
<p>The persons's uppercase name is BOB</p>
<p>The person's name is Bob</p>
<p>animal value is false</p>
""").strip()
actual_output = render(string, context=context, pretty=True, with_jinja=True).strip()
assert expected_output == actual_output
def test_cli_string():
from ninjadog.cli import main
from ninjadog.utils import jsonify
context = jsonify({'title': 'hello, world'})
assert main(('string', 'h1= title', '-c', context)) == '<h1>hello, world</h1>'
def test_extends():
from tempfile import gettempdir
from textwrap import dedent
from pathlib import Path
from ninjadog import render
parent_string = dedent("""
h1 Title
block content
""")
child_string = dedent("""
extends parent
block content
h2 Subtitle
""")
parent_path = Path(gettempdir(), 'parent.pug')
child_path = Path(gettempdir(), 'child.pug')
with parent_path.open('w+') as parent, child_path.open('w+') as child:
parent.write(parent_string)
parent.seek(0)
child.write(child_string)
child.seek(0)
assert render(file=child_path) == '<h1>Title</h1><h2>Subtitle</h2>'
assert render(file=str(child_path)) == '<h1>Title</h1><h2>Subtitle</h2>'
| {
"content_hash": "cf869930877e6da2c8b678d324d5f438",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 112,
"avg_line_length": 27.946969696969695,
"alnum_prop": 0.6253727297370562,
"repo_name": "knowsuchagency/ninjadog",
"id": "6c36c7337778993804185f55e34f582ccb3e038c",
"size": "3736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_ninjadog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "121"
},
{
"name": "Python",
"bytes": "16601"
}
],
"symlink_target": ""
} |
from out import Out
| {
"content_hash": "7aa0e38e024d36e6dcabf046cb9bff12",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 19,
"avg_line_length": 10.5,
"alnum_prop": 0.7619047619047619,
"repo_name": "Neppord/py2py",
"id": "cb3e63f5d2b2d3e3e392fb1ca6a8e0427ca49d3b",
"size": "21",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py2py_lib/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18725"
}
],
"symlink_target": ""
} |
"""Clean invalid entries from the dSYM symbol cache."""
import os
import sys
from symbol_cache_schema import SQLITE_SYMBOL_CACHE_PATH
from symbol_cache_schema import SymbolCacheSchema
class CleanSymbolCache(object):
"""Cleans all orphaned entries in the DBGScriptCommands database."""
def CleanMissingDSYMs(self):
"""Removes all entries where dsym_path cannot be found."""
connection = self.cache_schema.connection
cursor = connection.cursor()
cursor.execute('SELECT DISTINCT dsym_path FROM symbol_cache;')
dsym_path_rows = cursor.fetchall()
dsym_paths_to_delete = []
for dsym_path_row in dsym_path_rows:
dsym_path = dsym_path_row[0]
# dSYM bundles are directories, not files.
if not os.path.isdir(dsym_path):
dsym_paths_to_delete.append(dsym_path)
if dsym_paths_to_delete:
paths_to_delete = ['dsym_path = "%s"' % x for x in dsym_paths_to_delete]
delete_query_where = ' OR '.join(paths_to_delete)
cursor.execute('DELETE FROM symbol_cache '
'WHERE %s' % delete_query_where)
connection.commit()
def __init__(self, db_path=SQLITE_SYMBOL_CACHE_PATH):
self.cache_schema = SymbolCacheSchema(db_path)
if __name__ == '__main__':
CleanSymbolCache().CleanMissingDSYMs()
sys.exit(0)
| {
"content_hash": "f389a49a6d92b24dfa211ca86f05dd9c",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 30.928571428571427,
"alnum_prop": 0.6797536566589685,
"repo_name": "pinterest/tulsi",
"id": "69ab9b26b17f1635caa13452b71de24faa7b5435",
"size": "1922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/TulsiGenerator/Scripts/clean_symbol_cache.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3879"
},
{
"name": "C++",
"bytes": "1598"
},
{
"name": "HTML",
"bytes": "11090"
},
{
"name": "Objective-C",
"bytes": "1478"
},
{
"name": "Python",
"bytes": "114121"
},
{
"name": "SCSS",
"bytes": "7689"
},
{
"name": "Shell",
"bytes": "11559"
},
{
"name": "Starlark",
"bytes": "68767"
},
{
"name": "Swift",
"bytes": "1064599"
}
],
"symlink_target": ""
} |
"""Unit tests of resource records."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
@pytest.mark.usefixtures("resource_record_class_fixture", "resource_record_test_fixture")
class TestResourceRecord(object):
"""Tests for ResourceRecord"""
@pytest.mark.usefixtures("resource_query_record_class_fixture", "resource_query_record_test_fixture")
class TestResourceQueryRecord(object):
"""Tests for ResourceQueryRecord"""
@pytest.mark.usefixtures("resource_form_record_class_fixture", "resource_form_record_test_fixture")
class TestResourceFormRecord(object):
"""Tests for ResourceFormRecord"""
@pytest.mark.usefixtures("resource_search_record_class_fixture", "resource_search_record_test_fixture")
class TestResourceSearchRecord(object):
"""Tests for ResourceSearchRecord"""
@pytest.mark.usefixtures("bin_record_class_fixture", "bin_record_test_fixture")
class TestBinRecord(object):
"""Tests for BinRecord"""
@pytest.mark.usefixtures("bin_query_record_class_fixture", "bin_query_record_test_fixture")
class TestBinQueryRecord(object):
"""Tests for BinQueryRecord"""
@pytest.mark.usefixtures("bin_form_record_class_fixture", "bin_form_record_test_fixture")
class TestBinFormRecord(object):
"""Tests for BinFormRecord"""
@pytest.mark.usefixtures("bin_search_record_class_fixture", "bin_search_record_test_fixture")
class TestBinSearchRecord(object):
"""Tests for BinSearchRecord"""
| {
"content_hash": "b6c1ef738620f806c3abc66f84078743",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 103,
"avg_line_length": 31.80851063829787,
"alnum_prop": 0.7632107023411371,
"repo_name": "mitsei/dlkit",
"id": "f3c09302148845b194b3aab28500e45b2ff5bcaa",
"size": "1495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/resource/test_record_templates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25170465"
},
{
"name": "TeX",
"bytes": "1088"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import os
import sys
from invoke import task, call
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__)))
PROJECTS = [
'demo',
'wapps',
'wapps.blog',
'wapps.gallery',
# 'wapps.forms',
]
LANGUAGES = ['fr']
I18N_DOMAIN = 'django'
CLEAN_PATTERNS = [
'build', 'dist', '**/*.pyc', '**/__pycache__', '.tox', '**/*.mo', 'reports'
]
def color(code):
'''A simple ANSI color wrapper factory'''
return lambda t: '\033[{0}{1}\033[0;m'.format(code, t)
green = color('1;32m')
red = color('1;31m')
blue = color('1;30m')
cyan = color('1;36m')
purple = color('1;35m')
white = color('1;39m')
def header(text):
'''Display an header'''
print(' '.join((blue('>>'), cyan(text))))
sys.stdout.flush()
def info(text, *args, **kwargs):
'''Display informations'''
text = text.format(*args, **kwargs)
print(' '.join((purple('>>>'), text)))
sys.stdout.flush()
def success(text):
'''Display a success message'''
print(' '.join((green('✔'), white(text))))
sys.stdout.flush()
def error(text):
'''Display an error message'''
print(red('✘ {0}'.format(text)))
sys.stdout.flush()
def exit(text=None, code=-1):
if text:
error(text)
sys.exit(-1)
@task
def clean(ctx):
'''Cleanup all build artifacts'''
header(clean.__doc__)
with ctx.cd(ROOT):
for pattern in CLEAN_PATTERNS:
info(pattern)
ctx.run('rm -rf {0}'.format(' '.join(CLEAN_PATTERNS)))
@task
def demo(ctx):
'''Run the demo'''
header(demo.__doc__)
with ctx.cd(ROOT):
ctx.run('./manage.py migrate', pty=True)
ctx.run('./manage.py runserver', pty=True)
@task
def test(ctx, report=False, verbose=False):
'''Run tests suite'''
header(test.__doc__)
cmd = ['pytest']
if verbose:
cmd.append('-v')
if report:
cmd.append('--junitxml=reports/tests.xml')
with ctx.cd(ROOT):
ctx.run(' '.join(cmd), pty=True)
@task
def cover(ctx, report=False, verbose=False):
'''Run tests suite with coverage'''
header(cover.__doc__)
cmd = [
'pytest',
'--cov-config coverage.rc',
'--cov-report term',
'--cov=wapps',
]
if verbose:
cmd.append('-v')
if report:
cmd += [
'--cov-report html:reports/coverage',
'--cov-report xml:reports/coverage.xml',
'--junitxml=reports/tests.xml'
]
with ctx.cd(ROOT):
ctx.run(' '.join(cmd), pty=True)
@task
def qa(ctx):
'''Run a quality report'''
header(qa.__doc__)
with ctx.cd(ROOT):
info('Python Static Analysis')
flake8_results = ctx.run('flake8 wapps tests', pty=True, warn=True)
if flake8_results.failed:
error('There is some lints to fix')
else:
success('No lint to fix')
info('Ensure PyPI can render README and CHANGELOG')
readme_results = ctx.run('python setup.py check -r -s', pty=True, warn=True, hide=True)
if readme_results.failed:
print(readme_results.stdout)
error('README and/or CHANGELOG is not renderable by PyPI')
else:
success('README and CHANGELOG are renderable by PyPI')
if flake8_results.failed or readme_results.failed:
exit('Quality check failed', flake8_results.return_code or readme_results.return_code)
success('Quality check OK')
@task
def migration(ctx, app='wapps', name=None, empty=False):
'''Create a new migration'''
header('Create a new django migration')
cmd = ['./manage.py', 'makemigrations', app]
if name:
cmd += ['-n', name]
if empty:
cmd += ['--empty']
with ctx.cd(ROOT):
ctx.run(' '.join(cmd), pty=True)
@task
def update(ctx):
'''Update dependancies'''
header(update.__doc__)
with ctx.cd(ROOT):
ctx.run('pip install -r requirements/develop.pip')
ctx.run('npm install')
@task
def i18n(ctx):
'''Extract translatable strings'''
header(i18n.__doc__)
for project in PROJECTS:
root = project.replace('.', '/')
with ctx.cd(root):
ctx.run('pybabel extract -F babel.cfg -o locale/{0}.pot .'.format(I18N_DOMAIN))
for lang in LANGUAGES:
translation = os.path.join(root, 'locale', lang, 'LC_MESSAGES', '{0}.po'.format(I18N_DOMAIN))
if not os.path.exists(translation):
ctx.run('pybabel init -D {domain} -i locale/django.pot -d locale -l {lang}'.format(
lang=lang, domain=I18N_DOMAIN
))
ctx.run('pybabel update -D {0} -i locale/{0}.pot -d locale'.format(I18N_DOMAIN))
ctx.run('rm locale/{0}.pot'.format(I18N_DOMAIN))
@task
def i18nc(ctx):
'''Compile translations'''
header(i18nc.__doc__)
for project in PROJECTS:
root = project.replace('.', '/')
with ctx.cd(root):
ctx.run('pybabel compile -D {0} -d locale --statistics'.format(I18N_DOMAIN))
@task(i18nc)
def dist(ctx, buildno=None):
'''Package for distribution'''
header(dist.__doc__)
cmd = ['python3 setup.py']
if buildno:
cmd.append('egg_info -b {0}'.format(buildno))
cmd.append('bdist_wheel')
with ctx.cd(ROOT):
ctx.run(' '.join(cmd), pty=True)
success('Distribution is available in dist directory')
@task(clean, qa, call(cover, report=True), dist, default=True)
def all(ctx):
'''Run tests, reports and packaging'''
pass
| {
"content_hash": "39b9d4351f59e3635b6adc5c23dd8b48",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 109,
"avg_line_length": 26.04186046511628,
"alnum_prop": 0.572959457045901,
"repo_name": "apihackers/wapps",
"id": "54fc6e26bfbe8f8ecfa63b79e502c13f1f537c05",
"size": "5603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "935"
},
{
"name": "HTML",
"bytes": "6935"
},
{
"name": "JavaScript",
"bytes": "5969"
},
{
"name": "Python",
"bytes": "183616"
},
{
"name": "Shell",
"bytes": "818"
},
{
"name": "Vue",
"bytes": "1969"
}
],
"symlink_target": ""
} |
from GroundedScan.world import LogicalForm
from GroundedScan.world import Term
from GroundedScan.world import SemType
from GroundedScan.world import ENTITY
from GroundedScan.world import Variable
from GroundedScan.world import Weights
from GroundedScan.world import EVENT
from GroundedScan.world import COLOR
from GroundedScan.world import SIZE
from typing import List
from typing import ClassVar
from collections import namedtuple
import numpy as np
from itertools import product
Nonterminal = namedtuple("Nonterminal", "name")
Terminal = namedtuple("Terminal", "name")
ROOT = Nonterminal("ROOT")
VP = Nonterminal("VP")
VV_intransitive = Nonterminal("VV_intransitive")
VV_transitive = Nonterminal("VV_transitive")
RB = Nonterminal("RB")
DP = Nonterminal("DP")
NP = Nonterminal("NP")
NN = Nonterminal("NN")
JJ = Nonterminal("JJ")
# TODO cleaner
VAR_COUNTER = [0]
def free_var(sem_type):
name = "x{}".format(VAR_COUNTER[0])
VAR_COUNTER[0] += 1
return Variable(name, sem_type)
class Rule(object):
"""
Rule-class of form LHS -> RHS with method instantiate that defines its meaning.
"""
def __init__(self, lhs: Nonterminal, rhs: List, max_recursion=2):
self.lhs = lhs
self.rhs = rhs
self.sem_type = None
self.max_recursion = max_recursion
def instantiate(self, *args, **kwargs):
raise NotImplementedError()
class LexicalRule(Rule):
"""
Rule of form Non-Terminal -> Terminal.
"""
def __init__(self, lhs: Nonterminal, word: str, specs: Weights, sem_type: SemType):
super().__init__(lhs=lhs, rhs=[Terminal(word)], max_recursion=1)
self.name = word
self.sem_type = sem_type
self.specs = specs
def instantiate(self, meta=None, **kwargs) -> LogicalForm:
# TODO a little fishy to have recursion meta here rather than in wrapper
var = free_var(self.sem_type)
return LogicalForm(
variables=(var, ),
terms=(Term(self.name, (var, ), specs=self.specs, meta=meta), )
)
def __repr__(self):
lhs = self.lhs.name
rhs = self.rhs[0].name
return "{} -> {}".format(lhs, rhs)
class Root(Rule):
def __init__(self):
super().__init__(lhs=ROOT, rhs=[VP])
def instantiate(self, child, **kwargs):
return child
def __repr__(self):
return "ROOT -> VP"
class RootConj(Rule):
def __init__(self, max_recursion=0):
super().__init__(lhs=ROOT, rhs=[VP, Terminal("and"), ROOT], max_recursion=max_recursion)
def instantiate(self, left_child, right_child, **kwargs):
return LogicalForm(
variables=left_child.variables + right_child.variables,
terms=left_child.terms + right_child.terms + (Term("seq", (left_child.head, right_child.head)),)
)
def __repr__(self):
return "ROOT -> VP 'and' ROOT"
class VpWrapper(Rule):
def __init__(self, max_recursion=0):
super().__init__(lhs=VP, rhs=[VP, RB], max_recursion=max_recursion)
def instantiate(self, rb, vp, meta, **kwargs):
bound = rb.bind(vp.head)
assert bound.variables[0] == vp.head
return LogicalForm(variables=vp.variables + bound.variables[1:], terms=vp.terms + bound.terms)
def __repr__(self):
return "VP -> VP RB"
class VpIntransitive(Rule):
def __init__(self):
super().__init__(lhs=VP, rhs=[VV_intransitive, Terminal("to"), DP])
def instantiate(self, vv, dp, meta, **kwargs):
role = Term("patient", (vv.head, dp.head))
meta["arguments"].append(dp)
return LogicalForm(variables=vv.variables + dp.variables, terms=vv.terms + dp.terms + (role,))
def __repr__(self):
return "VP -> VV_intrans 'to' DP"
class VpTransitive(Rule):
def __init__(self):
super().__init__(lhs=VP, rhs=[VV_transitive, DP])
def instantiate(self, vv, dp, meta, **kwargs):
role = Term("patient", (vv.head, dp.head))
meta["arguments"].append(dp)
return LogicalForm(variables=vv.variables + dp.variables, terms=vv.terms + dp.terms + (role,))
def __repr__(self):
return "VP -> VV_trans DP"
class Dp(Rule):
def __init__(self):
super().__init__(lhs=DP, rhs=[Terminal("a"), NP])
def instantiate(self, np, **kwargs):
return np
def __repr__(self):
return "DP -> 'a' NP"
class NpWrapper(Rule):
def __init__(self, max_recursion=0):
super().__init__(lhs=NP, rhs=[JJ, NP], max_recursion=max_recursion)
def instantiate(self, jj, np, meta=None, **kwargs):
bound = jj.bind(np.head)
assert bound.variables[0] == np.head
return LogicalForm(variables=np.variables + bound.variables[1:], terms=np.terms + bound.terms)
def __repr__(self):
return "NP -> JJ NP"
class Np(Rule):
def __init__(self):
super().__init__(lhs=NP, rhs=[NN])
def instantiate(self, nn, **kwargs):
return nn
def __repr__(self):
return "NP -> NN"
class Derivation(object):
"""
Holds a constituency tree that makes up a sentence. Can be used to obtain the meaning of a sentence in terms
of a Logical Form. The meaning of a derivation is made up of the meaning of its children.
"""
def __init__(self, rule, children=None, meta={}):
self.rule = rule
self.lhs = rule.lhs
self.children = children
self.meta = meta
@classmethod
def from_rules(cls, rules: list, symbol=ROOT, lexicon=None):
"""Recursively form a derivation from a rule list that has been constructed in a depth-first manner,
use the lexicon for the Lexical Rules at the leafs of the constituency tree."""
# If the current symbol is a Terminal, close current branch and return.
if isinstance(symbol, Terminal):
return symbol
if symbol not in lexicon.keys():
next_rule = rules.pop()
else:
next_rule = lexicon[symbol].pop()
return Derivation(
next_rule,
tuple(cls.from_rules(rules, symbol=next_symbol, lexicon=lexicon) for next_symbol in next_rule.rhs)
)
def to_rules(self, rules: list, lexicon: dict):
for child in self.children:
if isinstance(child, Derivation):
child.to_rules(rules, lexicon)
else:
lexicon[child] = [child]
if isinstance(self.rule, LexicalRule):
if self.rule.lhs not in lexicon:
lexicon[self.rule.lhs] = [self.rule]
else:
lexicon[self.rule.lhs] = [self.rule] + lexicon[self.rule.lhs]
else:
rules.append(self.rule)
return
def words(self) -> tuple:
"""Obtain all words of a derivation by combining the words of all the children."""
out = []
for child in self.children:
if isinstance(child, Terminal):
out.append(child.name)
else:
out += child.words()
return tuple(out)
# TODO canonical variable names, not memoization
def meaning(self, arguments: list) -> LogicalForm:
"""Recursively define the meaning of the derivation by instantiating the meaning of each child."""
self.meta["arguments"] = arguments
if not hasattr(self, "_cached_logical_form"):
child_meanings = [
child.meaning(arguments)
for child in self.children
if isinstance(child, Derivation)
]
meaning = self.rule.instantiate(*child_meanings, meta=self.meta)
self._cached_logical_form = meaning
return self._cached_logical_form
@classmethod
def from_str(cls, rules_str, lexicon_str, grammar):
# TODO: method to instantiate derivation from str (see __repr__)
rules_list = []
for rule in rules_str.split(','):
rules_list.append(grammar.rule_str_to_rules[rule])
lexicon = {}
lexicon_list = lexicon_str.split(',')
for entry in lexicon_list:
items = entry.split(':')
symbol_type = items[0]
for item in items[1:]:
if symbol_type == 'T':
new_terminal = Terminal(item)
lexicon[new_terminal] = [new_terminal]
else:
rule = grammar.rule_str_to_rules[item]
if rule.lhs not in lexicon:
lexicon[rule.lhs] = [rule]
else:
lexicon[rule.lhs].append(rule)
return cls.from_rules(rules_list, lexicon=lexicon)
def __repr__(self):
rules = []
lexicon = {}
self.to_rules(rules, lexicon)
rules_str = ','.join([str(rule) for rule in rules])
lexicon_list = []
for key, value in lexicon.items():
if isinstance(key, Nonterminal):
symbol_str = "NT"
for rhs_symbol in value:
symbol_str += ":{}".format(rhs_symbol)
lexicon_list.append(symbol_str)
else:
lexicon_list.append("T:{}".format(value[0].name))
lexicon_str = ','.join(lexicon_list)
return rules_str + ';' + lexicon_str
class Template(object):
"""
A template is a constituency-tree without lexical rules. From a template together with a lexicon, multiple
constituency trees can be formed.
"""
def __init__(self):
self._left_values = []
self._right_values = []
self._leftmost_nonterminal = None
self.rules = []
def add_value(self, value, expandable):
if expandable and not self._leftmost_nonterminal:
self._leftmost_nonterminal = value
elif self._leftmost_nonterminal:
self._right_values.append(value)
else:
self._left_values.append(value)
def has_nonterminal(self):
return self._leftmost_nonterminal is not None
def get_leftmost_nonterminal(self):
assert self.has_nonterminal(), "Trying to get a NT but none present in this derivation."
return self._leftmost_nonterminal
def expand_leftmost_nonterminal(self, rule, expandables):
new_derivation = Template()
new_derivation_symbols = self._left_values + rule.rhs + self._right_values
new_derivation.rules = self.rules.copy()
new_derivation.rules.append(rule)
for value in new_derivation_symbols:
if value in expandables:
new_derivation.add_value(value, expandable=True)
else:
new_derivation.add_value(value, expandable=False)
return new_derivation
def to_derivation(self):
assert not self.has_nonterminal(), "Trying to write a non-terminal to a string."
self.rules.reverse()
return self._left_values, self.rules
class Grammar(object):
RULES = {
"conjunction": [Root(), RootConj(max_recursion=2), VpWrapper(), VpIntransitive(), VpTransitive(), Dp(),
NpWrapper(max_recursion=2), Np()],
"adverb": [Root(), VpWrapper(), VpIntransitive(), VpTransitive(), Dp(),
NpWrapper(max_recursion=2), Np()],
"normal": [Root(), VpIntransitive(), VpTransitive(), Dp(), NpWrapper(max_recursion=2), Np()],
"simple_trans": [Root(), VpTransitive(), Dp(), NpWrapper(max_recursion=1), Np()],
"simple_intrans": [Root(), VpIntransitive(), Dp(), NpWrapper(max_recursion=1), Np()]
}
def __init__(self, vocabulary: ClassVar, max_recursion=1, type_grammar="normal"):
"""
Defines a grammar of NT -> NT rules and NT -> T rules depending on the vocabulary.
:param vocabulary: an instance of class Vocabulary filled with different types of words.
:param max_recursion: Maximum recursion to be allowed in generation of examples.
:param type_grammar: options are 'full', 'adverb', 'normal' and 'simple'. Determines which set of common rules
is chosen.
"""
assert type_grammar in self.RULES, "Specified unsupported type grammar {}".format(type_grammar)
self.type_grammar = type_grammar
if type_grammar == "simple_intrans":
assert len(vocabulary.get_intransitive_verbs()) > 0, "Please specify intransitive verbs."
elif type_grammar == "simple_trans":
assert len(vocabulary.get_transitive_verbs()) > 0, "Please specify transitive verbs."
self.rule_list = self.RULES[type_grammar] + self.lexical_rules(vocabulary.get_intransitive_verbs(),
vocabulary.get_transitive_verbs(),
vocabulary.get_adverbs(),
vocabulary.get_nouns(),
vocabulary.get_color_adjectives(),
vocabulary.get_size_adjectives())
nonterminals = {rule.lhs for rule in self.rule_list}
self.rules = {nonterminal: [] for nonterminal in nonterminals}
self.nonterminals = {nt.name: nt for nt in nonterminals}
self.terminals = {}
self.vocabulary = vocabulary
self.rule_str_to_rules = {}
for rule in self.rule_list:
self.rules[rule.lhs].append(rule)
self.rule_str_to_rules[str(rule)] = rule
self.expandables = set(rule.lhs for rule in self.rule_list if not isinstance(rule, LexicalRule))
self.categories = {
"manner": set(vocabulary.get_adverbs()),
"shape": {n for n in vocabulary.get_nouns()},
"color": set([v for v in vocabulary.get_color_adjectives()]),
"size": set([v for v in vocabulary.get_size_adjectives()])
}
self.word_to_category = {}
for category, words in self.categories.items():
for word in words:
self.word_to_category[word] = category
self.max_recursion = max_recursion
self.all_templates = []
self.all_derivations = {}
self.command_statistics = self.empty_command_statistics()
@staticmethod
def empty_command_statistics():
return {
VV_intransitive: {},
VV_transitive: {},
NN: {},
JJ: {},
RB: {}
}
def reset_grammar(self):
self.command_statistics = self.empty_command_statistics()
self.all_templates.clear()
self.all_derivations.clear()
def lexical_rules(self, verbs_intrans: List[str], verbs_trans: List[str], adverbs: List[str], nouns: List[str],
color_adjectives: List[str], size_adjectives: List[str]) -> list:
"""
Instantiate the lexical rules with the sampled words from the vocabulary.
"""
assert size_adjectives or color_adjectives, "Please specify words for at least one of size_adjectives or "\
"color_adjectives."
all_rules = []
vv_intrans_rules = [
LexicalRule(lhs=VV_intransitive, word=verb, sem_type=EVENT, specs=Weights(action=verb, is_transitive=False))
for verb in verbs_intrans
]
all_rules += vv_intrans_rules
if self.type_grammar != "simple":
vv_trans_rules = [
LexicalRule(lhs=VV_transitive, word=verb, sem_type=EVENT, specs=Weights(action=verb, is_transitive=True))
for verb in verbs_trans
]
all_rules += vv_trans_rules
if self.type_grammar == "adverb" or self.type_grammar == "full":
rb_rules = [LexicalRule(lhs=RB, word=word, sem_type=EVENT, specs=Weights(manner=word)) for word in adverbs]
all_rules += rb_rules
nn_rules = [LexicalRule(lhs=NN, word=word, sem_type=ENTITY, specs=Weights(noun=word)) for word in nouns]
all_rules += nn_rules
jj_rules = []
if color_adjectives:
jj_rules.extend([LexicalRule(lhs=JJ, word=word, sem_type=ENTITY, specs=Weights(adjective_type=COLOR))
for word in color_adjectives])
if size_adjectives:
jj_rules.extend([LexicalRule(lhs=JJ, word=word, sem_type=ENTITY, specs=Weights(adjective_type=SIZE))
for word in size_adjectives])
all_rules += jj_rules
return all_rules
def sample(self, symbol=ROOT, last_rule=None, recursion=0):
"""
Sample a command from the grammar by recursively sampling rules for each symbol.
:param symbol: current node in constituency tree.
:param last_rule: previous rule sampled.
:param recursion: recursion depth (increases if sample ruled is applied twice).
:return: Derivation
"""
# If the current symbol is a Terminal, close current branch and return.
if isinstance(symbol, Terminal):
return symbol
nonterminal_rules = self.rules[symbol]
# Filter out last rule if max recursion depth is reached.
if recursion == self.max_recursion - 1:
nonterminal_rules = [rule for rule in nonterminal_rules if rule != last_rule]
# Sample a random rule.
next_rule = nonterminal_rules[np.random.randint(len(nonterminal_rules))]
next_recursion = recursion + 1 if next_rule == last_rule else 0
return Derivation(
next_rule,
tuple(self.sample(next_symbol, next_rule, next_recursion) for next_symbol in next_rule.rhs),
meta={"recursion": recursion}
)
def generate_all(self, current_template: Template, all_templates: list, rule_use_counter: dict):
# If the template contains no non-terminals, we close this branch.
if not current_template.has_nonterminal():
all_templates.append(current_template.to_derivation())
return
# Retrieve the leftmost non-terminal to expand.
leftmost_nonterminal = current_template.get_leftmost_nonterminal()
# Get all possible RHS replacements and start a new derivation branch for each of them.
rules_leftmost_nonterminal = self.rules[leftmost_nonterminal]
for rule_leftmost_nonterminal in rules_leftmost_nonterminal:
# Lexical rules are not expandable
if isinstance(rule_leftmost_nonterminal, LexicalRule):
continue
# Each branch gets its own rule usage counter.
rule_use_counter_copy = rule_use_counter.copy()
# If this rule has already been applied in the current branch..
if rule_leftmost_nonterminal in rule_use_counter_copy.keys():
# ..do not use it again if it has been applied more than a maximum allowed number of times.
if rule_use_counter[rule_leftmost_nonterminal] >= rule_leftmost_nonterminal.max_recursion:
continue
rule_use_counter_copy[rule_leftmost_nonterminal] += 1
else:
rule_use_counter_copy[rule_leftmost_nonterminal] = 1
# Get the next derivation by replacing the leftmost NT with its RHS.
next_template = current_template.expand_leftmost_nonterminal(rule_leftmost_nonterminal,
self.expandables)
# Start a new derivation branch for this RHS.
self.generate_all(next_template, all_templates, rule_use_counter_copy)
def form_commands_from_template(self, derivation_template: list, derivation_rules: list):
"""
Replaces all NT's in a template with the possible T's and forms all possible commands with those.
If multiple the same NT's follow each other, e.g. a JJ JJ JJ NN, for each following JJ the possible words
will be halved over the possibilities, meaning no words will repeat themselves (e.g. the red red circle),
this does mean that whenever the max. recursion depth for a rule is larger than the log(n) where n is the number
of words for that particular rule, this does not have an effect.
:param derivation_template: list of NT's, e.g. [VV_intrans, 'to', 'a', JJ, JJ, NN, RB]
:param derivation_rules: list of rules that build up the constituency tree for this template
:return: all possible combinations where all NT's are replaced by the words from the lexicon.
"""
# In the templates, replace each lexical rule with the possible words from the lexicon
replaced_template = []
previous_symbol = None
lexicon = {}
for symbol in derivation_template:
if isinstance(symbol, Nonterminal):
possible_words = [s.name for s in self.rules[symbol]]
for rule in self.rules[symbol]:
lexicon[rule.name] = rule
if previous_symbol == symbol:
previous_words = replaced_template.pop()
first_words, second_words = self.split_on_category(previous_words)
replaced_template.append(first_words)
replaced_template.append(second_words)
else:
replaced_template.append(possible_words)
else:
lexicon[symbol.name] = symbol
replaced_template.append([symbol.name])
previous_symbol = symbol
# Generate all possible commands from the templates.
all_commands = [command for command in product(*replaced_template)]
all_derivations = []
for command in all_commands:
command_lexicon = {}
for word, symbol in zip(command, derivation_template):
if symbol not in command_lexicon:
command_lexicon[symbol] = [lexicon[word]]
else:
command_lexicon[symbol] = [lexicon[word]] + command_lexicon[symbol]
if isinstance(symbol, Nonterminal):
if word not in self.command_statistics[symbol].keys():
self.command_statistics[symbol][word] = 1
else:
self.command_statistics[symbol][word] += 1
derivation = Derivation.from_rules(derivation_rules.copy(), symbol=ROOT, lexicon=command_lexicon)
assert ' '.join(derivation.words()) == ' '.join(command), "Derivation and command not the same."
all_derivations.append(derivation)
return all_derivations
def generate_all_commands(self):
# Generate all possible templates from the grammar.
initial_template = Template()
initial_template.add_value(value=ROOT, expandable=True)
self.generate_all(current_template=initial_template, all_templates=self.all_templates,
rule_use_counter={})
# For each template, form all possible commands by combining it with the lexicon.
for i, (derivation_template, derivation_rules) in enumerate(self.all_templates):
derivations = self.form_commands_from_template(derivation_template, derivation_rules)
self.all_derivations[i] = derivations
def split_on_category(self, words_list):
first_category_words = [words_list[0]]
second_category_words = []
first_category = self.category(words_list[0])
for word in words_list[1:]:
if self.category(word) == first_category:
first_category_words.append(word)
else:
second_category_words.append(word)
return first_category_words, second_category_words
def category(self, function):
return self.word_to_category.get(function)
def is_coherent(self, logical_form):
"""
Returns true for coherent logical forms, false otherwise. A command's logical form is coherent the
arguments of a variable have all unique categories. E.g. in coherent would be: 'the red blue circle'.
"""
for variable in logical_form.variables:
functions = [term.function for term in logical_form.terms if variable in term.arguments]
categories = [self.category(function) for function in functions]
categories = [c for c in categories if c is not None]
if len(categories) != len(set(categories)):
return False
return True
def __str__(self):
output_str = ""
for rule in self.rule_list:
output_str += rule.__str__() + ';'
return output_str
| {
"content_hash": "aabba01c8ec33e45e8e5e0e037ba959e",
"timestamp": "",
"source": "github",
"line_count": 600,
"max_line_length": 121,
"avg_line_length": 41.3,
"alnum_prop": 0.5968523002421308,
"repo_name": "LauraRuis/groundedSCAN",
"id": "a9d5678a0410e142f5adc72636eaa259b2565f88",
"size": "24841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GroundedScan/grammar.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "284405"
}
],
"symlink_target": ""
} |
"""Implementation of SQLAlchemy backend."""
import datetime
import functools
import re
import warnings
from nova import block_device
from nova import db
from nova import exception
from nova import flags
from nova import utils
from nova import log as logging
from nova.compute import aggregate_states
from nova.compute import vm_states
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from sqlalchemy import and_
from sqlalchemy import or_
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql import func
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql.expression import literal_column
FLAGS = flags.FLAGS
flags.DECLARE('reserved_host_disk_mb', 'nova.scheduler.host_manager')
flags.DECLARE('reserved_host_memory_mb', 'nova.scheduler.host_manager')
LOG = logging.getLogger(__name__)
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
warnings.warn(_('Use of empty request context is deprecated'),
DeprecationWarning)
raise Exception('die')
return context.is_admin
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
def require_instance_exists(f):
"""Decorator to require the specified instance to exist.
Requires the wrapped function to use context and instance_id as
their first two arguments.
"""
def wrapper(context, instance_id, *args, **kwargs):
db.instance_get(context, instance_id)
return f(context, instance_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
def require_volume_exists(f):
"""Decorator to require the specified volume to exist.
Requires the wrapped function to use context and volume_id as
their first two arguments.
"""
def wrapper(context, volume_id, *args, **kwargs):
db.volume_get(context, volume_id)
return f(context, volume_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
Requires the wrapped function to use context and aggregate_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, aggregate_id, *args, **kwargs):
db.aggregate_get(context, aggregate_id)
return f(context, aggregate_id, *args, **kwargs)
return wrapper
def model_query(context, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
session = kwargs.get('session') or get_session()
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
query = session.query(*args)
if read_deleted == 'no':
query = query.filter_by(deleted=False)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter_by(deleted=True)
else:
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
if project_only and is_user_context(context):
query = query.filter_by(project_id=context.project_id)
return query
def exact_filter(query, model, filters, legal_keys):
"""Applies exact match filtering to a query.
Returns the updated query. Modifies filters argument to remove
filters consumed.
:param query: query to apply filters to
:param model: model object the query applies to, for IN-style
filtering
:param filters: dictionary of filters; values that are lists,
tuples, sets, or frozensets cause an 'IN' test to
be performed, while exact matching ('==' operator)
is used for other values
:param legal_keys: list of keys to apply exact filtering to
"""
filter_dict = {}
# Walk through all the keys
for key in legal_keys:
# Skip ones we're not filtering on
if key not in filters:
continue
# OK, filtering on this key; what value do we search for?
value = filters.pop(key)
if isinstance(value, (list, tuple, set, frozenset)):
# Looking for values in a list; apply to query directly
column_attr = getattr(model, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter_by(**filter_dict)
return query
###################
@require_admin_context
def service_destroy(context, service_id):
session = get_session()
with session.begin():
service_ref = service_get(context, service_id, session=session)
service_ref.delete(session=session)
if service_ref.topic == 'compute' and service_ref.compute_node:
for c in service_ref.compute_node:
c.delete(session=session)
@require_admin_context
def service_get(context, service_id, session=None):
result = model_query(context, models.Service, session=session).\
options(joinedload('compute_node')).\
filter_by(id=service_id).\
first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def service_get_all(context, disabled=None):
query = model_query(context, models.Service)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
@require_admin_context
def service_get_all_by_host(context, host):
return model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
@require_admin_context
def service_get_all_compute_by_host(context, host):
result = model_query(context, models.Service, read_deleted="no").\
options(joinedload('compute_node')).\
filter_by(host=host).\
filter_by(topic="compute").\
all()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
@require_admin_context
def _service_get_all_topic_subquery(context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
return model_query(context, models.Service,
func.coalesce(sort_value, 0),
session=session, read_deleted="no").\
filter_by(topic=topic).\
filter_by(disabled=False).\
outerjoin((subq, models.Service.host == subq.c.host)).\
order_by(sort_value).\
all()
@require_admin_context
def service_get_all_compute_sorted(context):
session = get_session()
with session.begin():
# NOTE(vish): The intended query is below
# SELECT services.*, COALESCE(inst_cores.instance_cores,
# 0)
# FROM services LEFT OUTER JOIN
# (SELECT host, SUM(instances.vcpus) AS instance_cores
# FROM instances GROUP BY host) AS inst_cores
# ON services.host = inst_cores.host
topic = 'compute'
label = 'instance_cores'
subq = model_query(context, models.Instance.host,
func.sum(models.Instance.vcpus).label(label),
session=session, read_deleted="no").\
group_by(models.Instance.host).\
subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
subq,
label)
@require_admin_context
def service_get_all_network_sorted(context):
session = get_session()
with session.begin():
topic = 'network'
label = 'network_count'
subq = model_query(context, models.Network.host,
func.count(models.Network.id).label(label),
session=session, read_deleted="no").\
group_by(models.Network.host).\
subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
subq,
label)
@require_admin_context
def service_get_all_volume_sorted(context):
session = get_session()
with session.begin():
topic = 'volume'
label = 'volume_gigabytes'
subq = model_query(context, models.Volume.host,
func.sum(models.Volume.size).label(label),
session=session, read_deleted="no").\
group_by(models.Volume.host).\
subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
subq,
label)
@require_admin_context
def service_get_by_args(context, host, binary):
result = model_query(context, models.Service).\
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@require_admin_context
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not FLAGS.enable_new_services:
service_ref.disabled = True
service_ref.save()
return service_ref
@require_admin_context
def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = service_get(context, service_id, session=session)
service_ref.update(values)
service_ref.save(session=session)
###################
@require_admin_context
def compute_node_get(context, compute_id, session=None):
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
first()
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
return result
@require_admin_context
def compute_node_get_by_service(context, service_id, session=None):
if not session:
session = get_session()
result = model_query(context, models.ComputeNode, session=session).\
filter_by(service_id=service_id).\
first()
if not result:
raise exception.ComputeHostNotFound(host="ServiceID=%s" % service_id)
return result
@require_admin_context
def compute_node_get_all(context, session=None):
return model_query(context, models.ComputeNode, session=session).\
options(joinedload('service')).\
all()
@require_admin_context
def compute_node_get_for_service(context, service_id):
return model_query(context, models.ComputeNode).\
options(joinedload('service')).\
filter_by(service_id=service_id).\
first()
def _get_host_utilization(context, host, ram_mb, disk_gb):
"""Compute the current utilization of a given host."""
instances = instance_get_all_by_host(context, host)
vms = len(instances)
free_ram_mb = ram_mb - FLAGS.reserved_host_memory_mb
free_disk_gb = disk_gb - (FLAGS.reserved_host_disk_mb * 1024)
work = 0
for instance in instances:
free_ram_mb -= instance.memory_mb
free_disk_gb -= instance.root_gb
free_disk_gb -= instance.ephemeral_gb
if instance.vm_state in [vm_states.BUILDING, vm_states.REBUILDING,
vm_states.MIGRATING, vm_states.RESIZING]:
work += 1
return dict(free_ram_mb=free_ram_mb,
free_disk_gb=free_disk_gb,
current_workload=work,
running_vms=vms)
def _adjust_compute_node_values_for_utilization(context, values, session):
service_ref = service_get(context, values['service_id'], session=session)
host = service_ref['host']
ram_mb = values['memory_mb']
disk_gb = values['local_gb']
values.update(_get_host_utilization(context, host, ram_mb, disk_gb))
@require_admin_context
def compute_node_create(context, values, session=None):
"""Creates a new ComputeNode and populates the capacity fields
with the most recent data."""
if not session:
session = get_session()
_adjust_compute_node_values_for_utilization(context, values, session)
with session.begin(subtransactions=True):
compute_node_ref = models.ComputeNode()
session.add(compute_node_ref)
compute_node_ref.update(values)
return compute_node_ref
@require_admin_context
def compute_node_update(context, compute_id, values):
"""Creates a new ComputeNode and populates the capacity fields
with the most recent data."""
session = get_session()
_adjust_compute_node_values_for_utilization(context, values, session)
with session.begin(subtransactions=True):
compute_ref = compute_node_get(context, compute_id, session=session)
compute_ref.update(values)
compute_ref.save(session=session)
# Note: these operations use with_lockmode() ... so this will only work
# reliably with engines that support row-level locking
# (postgres, mysql+innodb and above).
def compute_node_get_by_host(context, host):
"""Get all capacity entries for the given host."""
session = get_session()
with session.begin():
node = session.query(models.ComputeNode).\
options(joinedload('service')).\
filter(models.Service.host == host).\
filter_by(deleted=False).\
with_lockmode('update')
return node.first()
def compute_node_capacity_find(context, minimum_ram_mb, minimum_disk_gb):
"""Get all enabled hosts with enough ram and disk."""
session = get_session()
with session.begin():
return session.query(models.ComputeNode).\
options(joinedload('service')).\
filter(models.ComputeNode.free_ram_mb >= minimum_ram_mb).\
filter(models.ComputeNode.free_disk_gb >= minimum_disk_gb).\
filter(models.Service.disabled == False).\
filter_by(deleted=False).\
with_lockmode('update').all()
def compute_node_utilization_update(context, host, free_ram_mb_delta=0,
free_disk_gb_delta=0, work_delta=0, vm_delta=0):
"""Update a specific ComputeNode entry by a series of deltas.
Do this as a single atomic action and lock the row for the
duration of the operation. Requires that ComputeNode record exist."""
session = get_session()
compute_node = None
with session.begin(subtransactions=True):
compute_node = session.query(models.ComputeNode).\
options(joinedload('service')).\
filter(models.Service.host == host).\
filter_by(deleted=False).\
with_lockmode('update').\
first()
if compute_node is None:
raise exception.NotFound(_("No ComputeNode for %(host)s" %
locals()))
# This table thingy is how we get atomic UPDATE x = x + 1
# semantics.
table = models.ComputeNode.__table__
if free_ram_mb_delta != 0:
compute_node.free_ram_mb = table.c.free_ram_mb + free_ram_mb_delta
if free_disk_gb_delta != 0:
compute_node.free_disk_gb = (table.c.free_disk_gb +
free_disk_gb_delta)
if work_delta != 0:
compute_node.current_workload = (table.c.current_workload +
work_delta)
if vm_delta != 0:
compute_node.running_vms = table.c.running_vms + vm_delta
return compute_node
def compute_node_utilization_set(context, host, free_ram_mb=None,
free_disk_gb=None, work=None, vms=None):
"""Like compute_node_utilization_update() modify a specific host
entry. But this function will set the metrics absolutely
(vs. a delta update).
"""
session = get_session()
compute_node = None
with session.begin(subtransactions=True):
compute_node = session.query(models.ComputeNode).\
options(joinedload('service')).\
filter(models.Service.host == host).\
filter_by(deleted=False).\
with_lockmode('update').\
first()
if compute_node is None:
raise exception.NotFound(_("No ComputeNode for %(host)s" %
locals()))
if free_ram_mb != None:
compute_node.free_ram_mb = free_ram_mb
if free_disk_gb != None:
compute_node.free_disk_gb = free_disk_gb
if work != None:
compute_node.current_workload = work
if vms != None:
compute_node.running_vms = vms
return compute_node
###################
@require_admin_context
def certificate_get(context, certificate_id, session=None):
result = model_query(context, models.Certificate, session=session).\
filter_by(id=certificate_id).\
first()
if not result:
raise exception.CertificateNotFound(certificate_id=certificate_id)
return result
@require_admin_context
def certificate_create(context, values):
certificate_ref = models.Certificate()
for (key, value) in values.iteritems():
certificate_ref[key] = value
certificate_ref.save()
return certificate_ref
@require_admin_context
def certificate_destroy(context, certificate_id):
session = get_session()
with session.begin():
certificate_ref = certificate_get(context,
certificate_id,
session=session)
certificate_ref.delete(session=session)
@require_admin_context
def certificate_get_all_by_project(context, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_admin_context
def certificate_get_all_by_user(context, user_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
all()
@require_admin_context
def certificate_get_all_by_user_and_project(context, user_id, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
@require_admin_context
def certificate_update(context, certificate_id, values):
session = get_session()
with session.begin():
certificate_ref = certificate_get(context,
certificate_id,
session=session)
for (key, value) in values.iteritems():
certificate_ref[key] = value
certificate_ref.save(session=session)
###################
@require_context
def floating_ip_get(context, id):
result = model_query(context, models.FloatingIp, project_only=True).\
filter_by(id=id).\
first()
if not result:
raise exception.FloatingIpNotFound(id=id)
return result
@require_context
def floating_ip_get_pools(context):
session = get_session()
pools = []
for result in session.query(models.FloatingIp.pool).distinct():
pools.append({'name': result[0]})
return pools
@require_context
def floating_ip_allocate_address(context, project_id, pool):
authorize_project_context(context, project_id)
session = get_session()
with session.begin():
floating_ip_ref = model_query(context, models.FloatingIp,
session=session, read_deleted="no").\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(pool=pool).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not floating_ip_ref:
raise exception.NoMoreFloatingIps()
floating_ip_ref['project_id'] = project_id
session.add(floating_ip_ref)
return floating_ip_ref['address']
@require_context
def floating_ip_create(context, values):
floating_ip_ref = models.FloatingIp()
floating_ip_ref.update(values)
floating_ip_ref.save()
return floating_ip_ref['address']
@require_context
def floating_ip_count_by_project(context, project_id):
authorize_project_context(context, project_id)
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
return model_query(context, models.FloatingIp, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
count()
@require_context
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context,
floating_address,
session=session)
fixed_ip_ref = fixed_ip_get_by_address(context,
fixed_address,
session=session)
floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"]
floating_ip_ref.host = host
floating_ip_ref.save(session=session)
@require_context
def floating_ip_deallocate(context, address):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context,
address,
session=session)
floating_ip_ref['project_id'] = None
floating_ip_ref['host'] = None
floating_ip_ref['auto_assigned'] = False
floating_ip_ref.save(session=session)
@require_context
def floating_ip_destroy(context, address):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context,
address,
session=session)
floating_ip_ref.delete(session=session)
@require_context
def floating_ip_disassociate(context, address):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context,
address,
session=session)
fixed_ip_ref = fixed_ip_get(context,
floating_ip_ref['fixed_ip_id'])
if fixed_ip_ref:
fixed_ip_address = fixed_ip_ref['address']
else:
fixed_ip_address = None
floating_ip_ref.fixed_ip_id = None
floating_ip_ref.host = None
floating_ip_ref.save(session=session)
return fixed_ip_address
@require_context
def floating_ip_set_auto_assigned(context, address):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context,
address,
session=session)
floating_ip_ref.auto_assigned = True
floating_ip_ref.save(session=session)
def _floating_ip_get_all(context):
return model_query(context, models.FloatingIp, read_deleted="no")
@require_admin_context
def floating_ip_get_all(context):
floating_ip_refs = _floating_ip_get_all(context).all()
if not floating_ip_refs:
raise exception.NoFloatingIpsDefined()
return floating_ip_refs
@require_admin_context
def floating_ip_get_all_by_host(context, host):
floating_ip_refs = _floating_ip_get_all(context).\
filter_by(host=host).\
all()
if not floating_ip_refs:
raise exception.FloatingIpNotFoundForHost(host=host)
return floating_ip_refs
@require_context
def floating_ip_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
return _floating_ip_get_all(context).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
all()
@require_context
def floating_ip_get_by_address(context, address, session=None):
result = model_query(context, models.FloatingIp, session=session).\
filter_by(address=address).\
first()
if not result:
raise exception.FloatingIpNotFoundForAddress(address=address)
# If the floating IP has a project ID set, check to make sure
# the non-admin user has access.
if result.project_id and is_user_context(context):
authorize_project_context(context, result.project_id)
return result
@require_context
def floating_ip_get_by_fixed_address(context, fixed_address, session=None):
if not session:
session = get_session()
fixed_ip = fixed_ip_get_by_address(context, fixed_address, session)
fixed_ip_id = fixed_ip['id']
return model_query(context, models.FloatingIp, session=session).\
filter_by(fixed_ip_id=fixed_ip_id).\
all()
# NOTE(tr3buchet) please don't invent an exception here, empty list is fine
@require_context
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id, session=None):
if not session:
session = get_session()
return model_query(context, models.FloatingIp, session=session).\
filter_by(fixed_ip_id=fixed_ip_id).\
all()
@require_context
def floating_ip_update(context, address, values):
session = get_session()
with session.begin():
floating_ip_ref = floating_ip_get_by_address(context, address, session)
for (key, value) in values.iteritems():
floating_ip_ref[key] = value
floating_ip_ref.save(session=session)
@require_context
def _dnsdomain_get(context, session, fqdomain):
return model_query(context, models.DNSDomain,
session=session, read_deleted="no").\
filter_by(domain=fqdomain).\
with_lockmode('update').\
first()
@require_context
def dnsdomain_get(context, fqdomain):
session = get_session()
with session.begin():
return _dnsdomain_get(context, session, fqdomain)
@require_admin_context
def _dnsdomain_get_or_create(context, session, fqdomain):
domain_ref = _dnsdomain_get(context, session, fqdomain)
if not domain_ref:
dns_ref = models.DNSDomain()
dns_ref.update({'domain': fqdomain,
'availability_zone': None,
'project_id': None})
return dns_ref
return domain_ref
@require_admin_context
def dnsdomain_register_for_zone(context, fqdomain, zone):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'private'
domain_ref.availability_zone = zone
domain_ref.save(session=session)
@require_admin_context
def dnsdomain_register_for_project(context, fqdomain, project):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'public'
domain_ref.project_id = project
domain_ref.save(session=session)
@require_admin_context
def dnsdomain_unregister(context, fqdomain):
session = get_session()
with session.begin():
session.query(models.DNSDomain).\
filter_by(domain=fqdomain).\
delete()
@require_context
def dnsdomain_list(context):
session = get_session()
records = model_query(context, models.DNSDomain,
session=session, read_deleted="no").\
with_lockmode('update').all()
domains = []
for record in records:
domains.append(record.domain)
return domains
###################
@require_admin_context
def fixed_ip_associate(context, address, instance_id, network_id=None,
reserved=False):
"""Keyword arguments:
reserved -- should be a boolean value(True or False), exact value will be
used to filter on the fixed ip address
"""
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == None)
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=reserved).\
filter_by(address=address).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if fixed_ip_ref is None:
raise exception.FixedIpNotFoundForNetwork(address=address,
network_id=network_id)
if fixed_ip_ref.instance is not None:
raise exception.FixedIpAlreadyInUse(address=address)
if not fixed_ip_ref.network:
fixed_ip_ref.network = network_get(context,
network_id,
session=session)
fixed_ip_ref.instance = instance_get(context,
instance_id,
session=session)
session.add(fixed_ip_ref)
return fixed_ip_ref['address']
@require_admin_context
def fixed_ip_associate_pool(context, network_id, instance_id=None, host=None):
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == None)
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=False).\
filter_by(instance_id=None).\
filter_by(host=None).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not fixed_ip_ref:
raise exception.NoMoreFixedIps()
if fixed_ip_ref['network_id'] is None:
fixed_ip_ref['network'] = network_id
if instance_id:
fixed_ip_ref['instance_id'] = instance_id
if host:
fixed_ip_ref['host'] = host
session.add(fixed_ip_ref)
return fixed_ip_ref['address']
@require_context
def fixed_ip_create(context, values):
fixed_ip_ref = models.FixedIp()
fixed_ip_ref.update(values)
fixed_ip_ref.save()
return fixed_ip_ref['address']
@require_context
def fixed_ip_bulk_create(context, ips):
session = get_session()
with session.begin():
for ip in ips:
model = models.FixedIp()
model.update(ip)
session.add(model)
@require_context
def fixed_ip_disassociate(context, address):
session = get_session()
with session.begin():
fixed_ip_ref = fixed_ip_get_by_address(context,
address,
session=session)
fixed_ip_ref['instance_id'] = None
fixed_ip_ref.save(session=session)
@require_admin_context
def fixed_ip_disassociate_all_by_timeout(context, host, time):
session = get_session()
# NOTE(vish): only update fixed ips that "belong" to this
# host; i.e. the network host or the instance
# host matches. Two queries necessary because
# join with update doesn't work.
host_filter = or_(and_(models.Instance.host == host,
models.Network.multi_host == True),
models.Network.host == host)
fixed_ips = model_query(context, models.FixedIp.id, session=session,
read_deleted="yes").\
filter(models.FixedIp.updated_at < time).\
filter(models.FixedIp.instance_id != None).\
filter(models.FixedIp.allocated == False).\
filter(host_filter).\
all()
result = model_query(context, models.FixedIp, session=session,
read_deleted="yes").\
filter(models.FixedIp.id.in_(fixed_ips)).\
update({'instance_id': None,
'leased': False,
'updated_at': utils.utcnow()},
synchronize_session='fetch')
return result
@require_context
def fixed_ip_get(context, id, session=None):
result = model_query(context, models.FixedIp, session=session).\
filter_by(id=id).\
first()
if not result:
raise exception.FixedIpNotFound(id=id)
# FIXME(sirp): shouldn't we just use project_only here to restrict the
# results?
if is_user_context(context) and result['instance_id'] is not None:
instance = instance_get(context, result['instance_id'], session)
authorize_project_context(context, instance.project_id)
return result
@require_admin_context
def fixed_ip_get_all(context, session=None):
result = model_query(context, models.FixedIp, session=session,
read_deleted="yes").\
all()
if not result:
raise exception.NoFixedIpsDefined()
return result
@require_context
def fixed_ip_get_by_address(context, address, session=None):
result = model_query(context, models.FixedIp, session=session,
read_deleted="yes").\
filter_by(address=address).\
first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
# NOTE(sirp): shouldn't we just use project_only here to restrict the
# results?
if is_user_context(context) and result['instance_id'] is not None:
instance = instance_get(context, result['instance_id'], session)
authorize_project_context(context, instance.project_id)
return result
@require_context
def fixed_ip_get_by_instance(context, instance_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(instance_id=instance_id).\
all()
if not result:
raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
return result
@require_context
def fixed_ip_get_by_network_host(context, network_id, host):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(host=host).\
first()
if not result:
raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
host=host)
return result
@require_context
def fixed_ips_by_virtual_interface(context, vif_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(virtual_interface_id=vif_id).\
all()
return result
@require_admin_context
def fixed_ip_get_network(context, address):
fixed_ip_ref = fixed_ip_get_by_address(context, address)
return fixed_ip_ref.network
@require_context
def fixed_ip_update(context, address, values):
session = get_session()
with session.begin():
fixed_ip_ref = fixed_ip_get_by_address(context,
address,
session=session)
fixed_ip_ref.update(values)
fixed_ip_ref.save(session=session)
###################
@require_context
def virtual_interface_create(context, values):
"""Create a new virtual interface record in the database.
:param values: = dict containing column values
"""
try:
vif_ref = models.VirtualInterface()
vif_ref.update(values)
vif_ref.save()
except IntegrityError:
raise exception.VirtualInterfaceCreateException()
return vif_ref
@require_context
def virtual_interface_update(context, vif_id, values):
"""Update a virtual interface record in the database.
:param vif_id: = id of virtual interface to update
:param values: = values to update
"""
session = get_session()
with session.begin():
vif_ref = virtual_interface_get(context, vif_id, session=session)
vif_ref.update(values)
vif_ref.save(session=session)
return vif_ref
@require_context
def _virtual_interface_query(context, session=None):
return model_query(context, models.VirtualInterface, session=session,
read_deleted="yes")
@require_context
def virtual_interface_get(context, vif_id, session=None):
"""Gets a virtual interface from the table.
:param vif_id: = id of the virtual interface
"""
vif_ref = _virtual_interface_query(context, session=session).\
filter_by(id=vif_id).\
first()
return vif_ref
@require_context
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table.
:param address: = the address of the interface you're looking to get
"""
vif_ref = _virtual_interface_query(context).\
filter_by(address=address).\
first()
return vif_ref
@require_context
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table.
:param vif_uuid: the uuid of the interface you're looking to get
"""
vif_ref = _virtual_interface_query(context).\
filter_by(uuid=vif_uuid).\
first()
return vif_ref
@require_context
@require_instance_exists
def virtual_interface_get_by_instance(context, instance_id):
"""Gets all virtual interfaces for instance.
:param instance_id: = id of the instance to retrieve vifs for
"""
vif_refs = _virtual_interface_query(context).\
filter_by(instance_id=instance_id).\
all()
return vif_refs
@require_context
def virtual_interface_get_by_instance_and_network(context, instance_id,
network_id):
"""Gets virtual interface for instance that's associated with network."""
vif_ref = _virtual_interface_query(context).\
filter_by(instance_id=instance_id).\
filter_by(network_id=network_id).\
first()
return vif_ref
@require_admin_context
def virtual_interface_get_by_network(context, network_id):
"""Gets all virtual_interface on network.
:param network_id: = network to retrieve vifs for
"""
vif_refs = _virtual_interface_query(context).\
filter_by(network_id=network_id).\
all()
return vif_refs
@require_context
def virtual_interface_delete(context, vif_id):
"""Delete virtual interface record from the database.
:param vif_id: = id of vif to delete
"""
session = get_session()
vif_ref = virtual_interface_get(context, vif_id, session)
with session.begin():
session.delete(vif_ref)
@require_context
def virtual_interface_delete_by_instance(context, instance_id):
"""Delete virtual interface records that are associated
with the instance given by instance_id.
:param instance_id: = id of instance
"""
vif_refs = virtual_interface_get_by_instance(context, instance_id)
for vif_ref in vif_refs:
virtual_interface_delete(context, vif_ref['id'])
@require_context
def virtual_interface_get_all(context):
"""Get all vifs"""
vif_refs = _virtual_interface_query(context).all()
return vif_refs
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.iteritems():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
@require_context
def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
"""
values['metadata'] = _metadata_refs(values.get('metadata'),
models.InstanceMetadata)
instance_ref = models.Instance()
instance_ref['uuid'] = str(utils.gen_uuid())
instance_ref.update(values)
session = get_session()
with session.begin():
instance_ref.save(session=session)
# and creat the info_cache table entry for instance
instance_info_cache_create(context, {'instance_id': instance_ref['uuid']})
return instance_ref
@require_admin_context
def instance_data_get_for_project(context, project_id):
result = model_query(context,
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb),
read_deleted="no").\
filter_by(project_id=project_id).\
first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0, result[2] or 0)
@require_context
def instance_destroy(context, instance_id):
session = get_session()
with session.begin():
instance_ref = instance_get(context, instance_id, session=session)
session.query(models.Instance).\
filter_by(id=instance_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_id=instance_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.InstanceMetadata).\
filter_by(instance_id=instance_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.BlockDeviceMapping).\
filter_by(instance_id=instance_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
instance_info_cache_delete(context, instance_ref['uuid'],
session=session)
@require_context
def instance_stop(context, instance_id):
session = get_session()
with session.begin():
session.query(models.Instance).\
filter_by(id=instance_id).\
update({'host': None,
'vm_state': vm_states.STOPPED,
'task_state': None,
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_id=instance_id).\
update({'updated_at': literal_column('updated_at')})
session.query(models.InstanceMetadata).\
filter_by(instance_id=instance_id).\
update({'updated_at': literal_column('updated_at')})
@require_context
def instance_get_by_uuid(context, uuid, session=None):
result = _build_instance_get(context, session=session).\
filter_by(uuid=uuid).\
first()
if not result:
# FIXME(sirp): it would be nice if InstanceNotFound would accept a
# uuid parameter as well
raise exception.InstanceNotFound(instance_id=uuid)
return result
@require_context
def instance_get(context, instance_id, session=None):
result = _build_instance_get(context, session=session).\
filter_by(id=instance_id).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
@require_context
def _build_instance_get(context, session=None):
return model_query(context, models.Instance, session=session,
project_only=True).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache')).\
options(joinedload('volumes')).\
options(joinedload('metadata')).\
options(joinedload('instance_type'))
@require_admin_context
def instance_get_all(context):
return model_query(context, models.Instance).\
options(joinedload('info_cache')).\
options(joinedload('security_groups')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
all()
@require_context
def instance_get_all_by_filters(context, filters):
"""Return instances that match all filters. Deleted instances
will be returned by default, unless there's a filter that says
otherwise"""
def _regexp_filter_by_metadata(instance, meta):
inst_metadata = [{node['key']: node['value']}
for node in instance['metadata']]
if isinstance(meta, list):
for node in meta:
if node not in inst_metadata:
return False
elif isinstance(meta, dict):
for k, v in meta.iteritems():
if {k: v} not in inst_metadata:
return False
return True
def _regexp_filter_by_column(instance, filter_name, filter_re):
try:
v = getattr(instance, filter_name)
except AttributeError:
return True
if v and filter_re.match(str(v)):
return True
return False
session = get_session()
query_prefix = session.query(models.Instance).\
options(joinedload('info_cache')).\
options(joinedload('security_groups')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
order_by(desc(models.Instance.created_at))
# Make a copy of the filters dictionary to use going forward, as we'll
# be modifying it and we shouldn't affect the caller's use of it.
filters = filters.copy()
if 'changes-since' in filters:
changes_since = filters['changes-since']
query_prefix = query_prefix.\
filter(models.Instance.updated_at > changes_since)
if 'deleted' in filters:
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
if filters.pop('deleted'):
deleted = or_(models.Instance.deleted == True,
models.Instance.vm_state == vm_states.SOFT_DELETE)
query_prefix = query_prefix.filter(deleted)
else:
query_prefix = query_prefix.\
filter_by(deleted=False).\
filter(models.Instance.vm_state != vm_states.SOFT_DELETE)
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
filters['project_id'] = context.project_id
else:
filters['user_id'] = context.user_id
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid']
# Filter the query
query_prefix = exact_filter(query_prefix, models.Instance,
filters, exact_match_filter_names)
instances = query_prefix.all()
if not instances:
return []
# Now filter on everything else for regexp matching..
# For filters not in the list, we'll attempt to use the filter_name
# as a column name in Instance..
regexp_filter_funcs = {}
for filter_name in filters.iterkeys():
filter_func = regexp_filter_funcs.get(filter_name, None)
filter_re = re.compile(str(filters[filter_name]))
if filter_func:
filter_l = lambda instance: filter_func(instance, filter_re)
elif filter_name == 'metadata':
filter_l = lambda instance: _regexp_filter_by_metadata(instance,
filters[filter_name])
else:
filter_l = lambda instance: _regexp_filter_by_column(instance,
filter_name, filter_re)
instances = filter(filter_l, instances)
if not instances:
break
return instances
@require_context
def instance_get_active_by_window(context, begin, end=None, project_id=None):
"""Return instances that were active during window."""
session = get_session()
query = session.query(models.Instance)
query = query.filter(or_(models.Instance.terminated_at == None,
models.Instance.terminated_at > begin))
if end:
query = query.filter(models.Instance.launched_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
return query.all()
@require_admin_context
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None):
"""Return instances and joins that were active during window."""
session = get_session()
query = session.query(models.Instance)
query = query.options(joinedload('security_groups')).\
options(joinedload('instance_type')).\
filter(or_(models.Instance.terminated_at == None,
models.Instance.terminated_at > begin))
if end:
query = query.filter(models.Instance.launched_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
return query.all()
@require_admin_context
def _instance_get_all_query(context, project_only=False):
return model_query(context, models.Instance, project_only=project_only).\
options(joinedload('info_cache')).\
options(joinedload('security_groups')).\
options(joinedload('metadata')).\
options(joinedload('instance_type'))
@require_admin_context
def instance_get_all_by_user(context, user_id):
return _instance_get_all_query(context).filter_by(user_id=user_id).all()
@require_admin_context
def instance_get_all_by_host(context, host):
return _instance_get_all_query(context).filter_by(host=host).all()
@require_context
def instance_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
return _instance_get_all_query(context).\
filter_by(project_id=project_id).\
all()
@require_context
def instance_get_all_by_reservation(context, reservation_id):
return _instance_get_all_query(context, project_only=True).\
filter_by(reservation_id=reservation_id).\
all()
@require_admin_context
def instance_get_project_vpn(context, project_id):
return _instance_get_all_query(context).\
filter_by(project_id=project_id).\
filter_by(image_ref=str(FLAGS.vpn_image_id)).\
first()
# NOTE(jkoelker) This is only being left here for compat with floating
# ips. Currently the network_api doesn't return floaters
# in network_info. Once it starts return the model. This
# function and it's call in compute/manager.py on 1829 can
# go away
@require_context
def instance_get_floating_address(context, instance_id):
fixed_ips = fixed_ip_get_by_instance(context, instance_id)
if not fixed_ips:
return None
# NOTE(tr3buchet): this only gets the first fixed_ip
# won't find floating ips associated with other fixed_ips
floating_ips = floating_ip_get_by_fixed_address(context,
fixed_ips[0]['address'])
if not floating_ips:
return None
# NOTE(vish): this just returns the first floating ip
return floating_ips[0]['address']
@require_admin_context
def instance_get_all_hung_in_rebooting(context, reboot_window, session=None):
reboot_window = datetime.datetime.utcnow() - datetime.timedelta(
seconds=reboot_window)
if not session:
session = get_session()
results = session.query(models.Instance).\
filter(models.Instance.updated_at <= reboot_window).\
filter_by(task_state="rebooting").all()
return results
@require_context
def instance_update(context, instance_id, values):
session = get_session()
if utils.is_uuid_like(instance_id):
instance_ref = instance_get_by_uuid(context, instance_id,
session=session)
else:
instance_ref = instance_get(context, instance_id, session=session)
metadata = values.get('metadata')
if metadata is not None:
instance_metadata_update(context,
instance_ref['id'],
values.pop('metadata'),
delete=True)
with session.begin():
instance_ref.update(values)
instance_ref.save(session=session)
return instance_ref
def instance_add_security_group(context, instance_uuid, security_group_id):
"""Associate the given security group with the given instance"""
session = get_session()
with session.begin():
instance_ref = instance_get_by_uuid(context, instance_uuid,
session=session)
security_group_ref = security_group_get(context,
security_group_id,
session=session)
instance_ref.security_groups += [security_group_ref]
instance_ref.save(session=session)
@require_context
def instance_remove_security_group(context, instance_uuid, security_group_id):
"""Disassociate the given security group from the given instance"""
session = get_session()
instance_ref = instance_get_by_uuid(context, instance_uuid,
session=session)
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_id=instance_ref['id']).\
filter_by(security_group_id=security_group_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
action_ref = models.InstanceActions()
action_ref.update(values)
session = get_session()
with session.begin():
action_ref.save(session=session)
return action_ref
@require_admin_context
def instance_get_actions(context, instance_uuid):
"""Return the actions associated to the given instance id"""
session = get_session()
return session.query(models.InstanceActions).\
filter_by(instance_uuid=instance_uuid).\
all()
@require_context
def instance_get_id_to_uuid_mapping(context, ids):
session = get_session()
instances = session.query(models.Instance).\
filter(models.Instance.id.in_(ids)).\
all()
mapping = {}
for instance in instances:
mapping[instance['id']] = instance['uuid']
return mapping
###################
@require_context
def instance_info_cache_create(context, values):
"""Create a new instance cache record in the table.
:param context: = request context object
:param values: = dict containing column values
"""
info_cache = models.InstanceInfoCache()
info_cache.update(values)
session = get_session()
with session.begin():
info_cache.save(session=session)
return info_cache
@require_context
def instance_info_cache_get(context, instance_uuid, session=None):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
:param session: = optional session object
"""
session = session or get_session()
info_cache = session.query(models.InstanceInfoCache).\
filter_by(instance_id=instance_uuid).\
first()
return info_cache
@require_context
def instance_info_cache_update(context, instance_uuid, values,
session=None):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
:param session: = optional session object
"""
session = session or get_session()
info_cache = instance_info_cache_get(context, instance_uuid,
session=session)
if info_cache:
info_cache.update(values)
info_cache.save(session=session)
else:
# NOTE(tr3buchet): just in case someone blows away an instance's
# cache entry
values['instance_id'] = instance_uuid
info_cache = instance_info_cache_create(context, values)
return info_cache
@require_context
def instance_info_cache_delete(context, instance_uuid, session=None):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
:param session: = optional session object
"""
values = {'deleted': True,
'deleted_at': utils.utcnow()}
instance_info_cache_update(context, instance_uuid, values, session)
###################
@require_context
def key_pair_create(context, values):
key_pair_ref = models.KeyPair()
key_pair_ref.update(values)
key_pair_ref.save()
return key_pair_ref
@require_context
def key_pair_destroy(context, user_id, name):
authorize_user_context(context, user_id)
session = get_session()
with session.begin():
key_pair_ref = key_pair_get(context, user_id, name, session=session)
key_pair_ref.delete(session=session)
@require_context
def key_pair_destroy_all_by_user(context, user_id):
authorize_user_context(context, user_id)
session = get_session()
with session.begin():
session.query(models.KeyPair).\
filter_by(user_id=user_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def key_pair_get(context, user_id, name, session=None):
authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair, session=session).\
filter_by(user_id=user_id).\
filter_by(name=name).\
first()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
return result
@require_context
def key_pair_get_all_by_user(context, user_id):
authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
all()
###################
@require_admin_context
def network_associate(context, project_id, force=False):
"""Associate a project with a network.
called by project_get_networks under certain conditions
and network manager add_network_to_project()
only associate if the project doesn't already have a network
or if force is True
force solves race condition where a fresh project has multiple instance
builds simultaneously picked up by multiple network hosts which attempt
to associate the project with multiple networks
force should only be used as a direct consequence of user request
all automated requests should not use force
"""
session = get_session()
with session.begin():
def network_query(project_filter):
return model_query(context, models.Network, session=session,
read_deleted="no").\
filter_by(project_id=project_filter).\
with_lockmode('update').\
first()
if not force:
# find out if project has a network
network_ref = network_query(project_id)
if force or not network_ref:
# in force mode or project doesn't have a network so associate
# with a new network
# get new network
network_ref = network_query(None)
if not network_ref:
raise db.NoMoreNetworks()
# associate with network
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
network_ref['project_id'] = project_id
session.add(network_ref)
return network_ref
@require_admin_context
def network_count(context):
return model_query(context, models.Network).count()
@require_admin_context
def _network_ips_query(context, network_id):
return model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id)
@require_admin_context
def network_count_allocated_ips(context, network_id):
return _network_ips_query(context, network_id).\
filter_by(allocated=True).\
count()
@require_admin_context
def network_count_available_ips(context, network_id):
return _network_ips_query(context, network_id).\
filter_by(allocated=False).\
filter_by(reserved=False).\
count()
@require_admin_context
def network_count_reserved_ips(context, network_id):
return _network_ips_query(context, network_id).\
filter_by(reserved=True).\
count()
@require_admin_context
def network_create_safe(context, values):
network_ref = models.Network()
network_ref['uuid'] = str(utils.gen_uuid())
network_ref.update(values)
try:
network_ref.save()
return network_ref
except IntegrityError:
return None
@require_admin_context
def network_delete_safe(context, network_id):
session = get_session()
with session.begin():
network_ref = network_get(context, network_id=network_id,
session=session)
session.delete(network_ref)
@require_admin_context
def network_disassociate(context, network_id):
network_update(context, network_id, {'project_id': None,
'host': None})
@require_admin_context
def network_disassociate_all(context):
session = get_session()
session.query(models.Network).\
update({'project_id': None,
'updated_at': literal_column('updated_at')})
@require_context
def network_get(context, network_id, session=None):
result = model_query(context, models.Network, session=session,
project_only=True).\
filter_by(id=network_id).\
first()
if not result:
raise exception.NetworkNotFound(network_id=network_id)
return result
@require_admin_context
def network_get_all(context):
result = model_query(context, models.Network, read_deleted="no").all()
if not result:
raise exception.NoNetworksFound()
return result
@require_admin_context
def network_get_all_by_uuids(context, network_uuids, project_id=None):
project_or_none = or_(models.Network.project_id == project_id,
models.Network.project_id == None)
result = model_query(context, models.Network, read_deleted="no").\
filter(models.Network.uuid.in_(network_uuids)).\
filter(project_or_none).\
all()
if not result:
raise exception.NoNetworksFound()
#check if host is set to all of the networks
# returned in the result
for network in result:
if network['host'] is None:
raise exception.NetworkHostNotSet(network_id=network['id'])
#check if the result contains all the networks
#we are looking for
for network_uuid in network_uuids:
found = False
for network in result:
if network['uuid'] == network_uuid:
found = True
break
if not found:
if project_id:
raise exception.NetworkNotFoundForProject(
network_uuid=network_uuid, project_id=context.project_id)
raise exception.NetworkNotFound(network_id=network_uuid)
return result
# NOTE(vish): pylint complains because of the long method name, but
# it fits with the names of the rest of the methods
# pylint: disable=C0103
@require_admin_context
def network_get_associated_fixed_ips(context, network_id):
# FIXME(sirp): since this returns fixed_ips, this would be better named
# fixed_ip_get_all_by_network.
return model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
filter(models.FixedIp.instance_id != None).\
filter(models.FixedIp.virtual_interface_id != None).\
all()
@require_admin_context
def _network_get_query(context, session=None):
return model_query(context, models.Network, session=session,
read_deleted="no")
@require_admin_context
def network_get_by_bridge(context, bridge):
result = _network_get_query(context).filter_by(bridge=bridge).first()
if not result:
raise exception.NetworkNotFoundForBridge(bridge=bridge)
return result
@require_admin_context
def network_get_by_uuid(context, uuid):
result = _network_get_query(context).filter_by(uuid=uuid).first()
if not result:
raise exception.NetworkNotFoundForUUID(uuid=uuid)
return result
@require_admin_context
def network_get_by_cidr(context, cidr):
result = _network_get_query(context).\
filter(or_(models.Network.cidr == cidr,
models.Network.cidr_v6 == cidr)).\
first()
if not result:
raise exception.NetworkNotFoundForCidr(cidr=cidr)
return result
@require_admin_context
def network_get_by_instance(context, instance_id):
# note this uses fixed IP to get to instance
# only works for networks the instance has an IP from
result = _network_get_query(context).\
filter_by(instance_id=instance_id).\
first()
if not result:
raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
return result
@require_admin_context
def network_get_all_by_instance(context, instance_id):
result = _network_get_query(context).\
filter_by(instance_id=instance_id).\
all()
if not result:
raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
return result
@require_admin_context
def network_get_all_by_host(context, host):
# NOTE(vish): return networks that have host set
# or that have a fixed ip with host set
host_filter = or_(models.Network.host == host,
models.FixedIp.host == host)
return _network_get_query(context).\
filter(host_filter).\
all()
@require_admin_context
def network_set_host(context, network_id, host_id):
session = get_session()
with session.begin():
network_ref = _network_get_query(context, session=session).\
filter_by(id=network_id).\
with_lockmode('update').\
first()
if not network_ref:
raise exception.NetworkNotFound(network_id=network_id)
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not network_ref['host']:
network_ref['host'] = host_id
session.add(network_ref)
return network_ref['host']
@require_context
def network_update(context, network_id, values):
session = get_session()
with session.begin():
network_ref = network_get(context, network_id, session=session)
network_ref.update(values)
network_ref.save(session=session)
return network_ref
###################
def queue_get_for(context, topic, physical_node_id):
# FIXME(ja): this should be servername?
return "%s.%s" % (topic, physical_node_id)
###################
@require_admin_context
def iscsi_target_count_by_host(context, host):
return model_query(context, models.IscsiTarget).\
filter_by(host=host).\
count()
@require_admin_context
def iscsi_target_create_safe(context, values):
iscsi_target_ref = models.IscsiTarget()
for (key, value) in values.iteritems():
iscsi_target_ref[key] = value
try:
iscsi_target_ref.save()
return iscsi_target_ref
except IntegrityError:
return None
###################
@require_admin_context
def auth_token_destroy(context, token_id):
session = get_session()
with session.begin():
token_ref = auth_token_get(context, token_id, session=session)
token_ref.delete(session=session)
@require_admin_context
def auth_token_get(context, token_hash, session=None):
result = model_query(context, models.AuthToken, session=session).\
filter_by(token_hash=token_hash).\
first()
if not result:
raise exception.AuthTokenNotFound(token=token_hash)
return result
@require_admin_context
def auth_token_update(context, token_hash, values):
session = get_session()
with session.begin():
token_ref = auth_token_get(context, token_hash, session=session)
token_ref.update(values)
token_ref.save(session=session)
@require_admin_context
def auth_token_create(context, token):
tk = models.AuthToken()
tk.update(token)
tk.save()
return tk
###################
@require_context
def quota_get(context, project_id, resource, session=None):
result = model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
if not result:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
def quota_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
def quota_create(context, project_id, resource, limit):
quota_ref = models.Quota()
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
quota_ref.save()
return quota_ref
@require_admin_context
def quota_update(context, project_id, resource, limit):
session = get_session()
with session.begin():
quota_ref = quota_get(context, project_id, resource, session=session)
quota_ref.hard_limit = limit
quota_ref.save(session=session)
@require_admin_context
def quota_destroy(context, project_id, resource):
session = get_session()
with session.begin():
quota_ref = quota_get(context, project_id, resource, session=session)
quota_ref.delete(session=session)
@require_admin_context
def quota_destroy_all_by_project(context, project_id):
session = get_session()
with session.begin():
quotas = model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
all()
for quota_ref in quotas:
quota_ref.delete(session=session)
###################
@require_admin_context
def volume_allocate_iscsi_target(context, volume_id, host):
session = get_session()
with session.begin():
iscsi_target_ref = model_query(context, models.IscsiTarget,
session=session, read_deleted="no").\
filter_by(volume=None).\
filter_by(host=host).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not iscsi_target_ref:
raise db.NoMoreTargets()
iscsi_target_ref.volume_id = volume_id
session.add(iscsi_target_ref)
return iscsi_target_ref.target_num
@require_admin_context
def volume_attached(context, volume_id, instance_id, mountpoint):
session = get_session()
with session.begin():
volume_ref = volume_get(context, volume_id, session=session)
volume_ref['status'] = 'in-use'
volume_ref['mountpoint'] = mountpoint
volume_ref['attach_status'] = 'attached'
volume_ref.instance = instance_get(context, instance_id,
session=session)
volume_ref.save(session=session)
@require_context
def volume_create(context, values):
values['volume_metadata'] = _metadata_refs(values.get('metadata'),
models.VolumeMetadata)
volume_ref = models.Volume()
volume_ref.update(values)
session = get_session()
with session.begin():
volume_ref.save(session=session)
return volume_ref
@require_admin_context
def volume_data_get_for_project(context, project_id):
result = model_query(context,
func.count(models.Volume.id),
func.sum(models.Volume.size),
read_deleted="no").\
filter_by(project_id=project_id).\
first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def volume_destroy(context, volume_id):
session = get_session()
with session.begin():
session.query(models.Volume).\
filter_by(id=volume_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.IscsiTarget).\
filter_by(volume_id=volume_id).\
update({'volume_id': None})
session.query(models.VolumeMetadata).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
def volume_detached(context, volume_id):
session = get_session()
with session.begin():
volume_ref = volume_get(context, volume_id, session=session)
volume_ref['status'] = 'available'
volume_ref['mountpoint'] = None
volume_ref['attach_status'] = 'detached'
volume_ref.instance = None
volume_ref.save(session=session)
@require_context
def _volume_get_query(context, session=None, project_only=False):
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('instance')).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_type'))
@require_context
def volume_get(context, volume_id, session=None):
result = _volume_get_query(context, session=session, project_only=True).\
filter_by(id=volume_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result
@require_admin_context
def volume_get_all(context):
return _volume_get_query(context).all()
@require_admin_context
def volume_get_all_by_host(context, host):
return _volume_get_query(context).filter_by(host=host).all()
@require_admin_context
def volume_get_all_by_instance(context, instance_id):
result = model_query(context, models.Volume, read_deleted="no").\
options(joinedload('volume_metadata')).\
options(joinedload('volume_type')).\
filter_by(instance_id=instance_id).\
all()
if not result:
raise exception.VolumeNotFoundForInstance(instance_id=instance_id)
return result
@require_context
def volume_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
return _volume_get_query(context).filter_by(project_id=project_id).all()
@require_admin_context
def volume_get_instance(context, volume_id):
result = _volume_get_query(context).filter_by(id=volume_id).first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result.instance
@require_admin_context
def volume_get_iscsi_target_num(context, volume_id):
result = model_query(context, models.IscsiTarget, read_deleted="yes").\
filter_by(volume_id=volume_id).\
first()
if not result:
raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
return result.target_num
@require_context
def volume_update(context, volume_id, values):
session = get_session()
metadata = values.get('metadata')
if metadata is not None:
volume_metadata_update(context,
volume_id,
values.pop('metadata'),
delete=True)
with session.begin():
volume_ref = volume_get(context, volume_id, session=session)
volume_ref.update(values)
volume_ref.save(session=session)
####################
def _volume_metadata_get_query(context, volume_id, session=None):
return model_query(context, models.VolumeMetadata,
session=session, read_deleted="no").\
filter_by(volume_id=volume_id)
@require_context
@require_volume_exists
def volume_metadata_get(context, volume_id):
rows = _volume_metadata_get_query(context, volume_id).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@require_volume_exists
def volume_metadata_delete(context, volume_id, key):
_volume_metadata_get_query(context, volume_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
@require_volume_exists
def volume_metadata_get_item(context, volume_id, key, session=None):
result = _volume_metadata_get_query(context, volume_id, session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.VolumeMetadataNotFound(metadata_key=key,
volume_id=volume_id)
return result
@require_context
@require_volume_exists
def volume_metadata_update(context, volume_id, metadata, delete):
session = get_session()
# Set existing metadata to deleted if delete argument is True
if delete:
original_metadata = volume_metadata_get(context, volume_id)
for meta_key, meta_value in original_metadata.iteritems():
if meta_key not in metadata:
meta_ref = volume_metadata_get_item(context, volume_id,
meta_key, session)
meta_ref.update({'deleted': True})
meta_ref.save(session=session)
meta_ref = None
# Now update all existing items with new values, or create new meta objects
for meta_key, meta_value in metadata.iteritems():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = volume_metadata_get_item(context, volume_id,
meta_key, session)
except exception.VolumeMetadataNotFound, e:
meta_ref = models.VolumeMetadata()
item.update({"key": meta_key, "volume_id": volume_id})
meta_ref.update(item)
meta_ref.save(session=session)
return metadata
###################
@require_context
def snapshot_create(context, values):
snapshot_ref = models.Snapshot()
snapshot_ref.update(values)
session = get_session()
with session.begin():
snapshot_ref.save(session=session)
return snapshot_ref
@require_admin_context
def snapshot_destroy(context, snapshot_id):
session = get_session()
with session.begin():
session.query(models.Snapshot).\
filter_by(id=snapshot_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def snapshot_get(context, snapshot_id, session=None):
result = model_query(context, models.Snapshot, session=session,
project_only=True).\
filter_by(id=snapshot_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return result
@require_admin_context
def snapshot_get_all(context):
return model_query(context, models.Snapshot).all()
@require_context
def snapshot_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
return model_query(context, models.Snapshot).\
filter_by(project_id=project_id).\
all()
@require_context
def snapshot_update(context, snapshot_id, values):
session = get_session()
with session.begin():
snapshot_ref = snapshot_get(context, snapshot_id, session=session)
snapshot_ref.update(values)
snapshot_ref.save(session=session)
###################
def _block_device_mapping_get_query(context, session=None):
return model_query(context, models.BlockDeviceMapping, session=session,
read_deleted="no")
@require_context
def block_device_mapping_create(context, values):
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
session = get_session()
with session.begin():
bdm_ref.save(session=session)
@require_context
def block_device_mapping_update(context, bdm_id, values):
session = get_session()
with session.begin():
_block_device_mapping_get_query(context, session=session).\
filter_by(id=bdm_id).\
update(values)
@require_context
def block_device_mapping_update_or_create(context, values):
session = get_session()
with session.begin():
result = _block_device_mapping_get_query(context, session=session).\
filter_by(instance_id=values['instance_id']).\
filter_by(device_name=values['device_name']).\
first()
if not result:
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
bdm_ref.save(session=session)
else:
result.update(values)
# NOTE(yamahata): same virtual device name can be specified multiple
# times. So delete the existing ones.
virtual_name = values['virtual_name']
if (virtual_name is not None and
block_device.is_swap_or_ephemeral(virtual_name)):
session.query(models.BlockDeviceMapping).\
filter_by(instance_id=values['instance_id']).\
filter_by(virtual_name=virtual_name).\
filter(models.BlockDeviceMapping.device_name !=
values['device_name']).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def block_device_mapping_get_all_by_instance(context, instance_id):
return _block_device_mapping_get_query(context).\
filter_by(instance_id=instance_id).\
all()
@require_context
def block_device_mapping_destroy(context, bdm_id):
session = get_session()
with session.begin():
session.query(models.BlockDeviceMapping).\
filter_by(id=bdm_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def block_device_mapping_destroy_by_instance_and_volume(context, instance_id,
volume_id):
session = get_session()
with session.begin():
_block_device_mapping_get_query(context, session=session).\
filter_by(instance_id=instance_id).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
###################
def _security_group_get_query(context, session=None, read_deleted=None,
project_only=False):
return model_query(context, models.SecurityGroup, session=session,
read_deleted=read_deleted, project_only=project_only).\
options(joinedload_all('rules'))
@require_context
def security_group_get_all(context):
return _security_group_get_query(context).all()
@require_context
def security_group_get(context, security_group_id, session=None):
result = _security_group_get_query(context, session=session,
project_only=True).\
filter_by(id=security_group_id).\
options(joinedload_all('instances')).\
first()
if not result:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
return result
@require_context
def security_group_get_by_name(context, project_id, group_name):
result = _security_group_get_query(context, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(name=group_name).\
options(joinedload_all('instances')).\
first()
if not result:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
return result
@require_context
def security_group_get_by_project(context, project_id):
return _security_group_get_query(context, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_context
def security_group_get_by_instance(context, instance_id):
return _security_group_get_query(context, read_deleted="no").\
join(models.SecurityGroup.instances).\
filter_by(id=instance_id).\
all()
@require_context
def security_group_exists(context, project_id, group_name):
try:
group = security_group_get_by_name(context, project_id, group_name)
return group is not None
except exception.NotFound:
return False
@require_context
def security_group_create(context, values):
security_group_ref = models.SecurityGroup()
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
security_group_ref.update(values)
security_group_ref.save()
return security_group_ref
@require_context
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
session.query(models.SecurityGroup).\
filter_by(id=security_group_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(security_group_id=security_group_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupIngressRule).\
filter_by(group_id=security_group_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def security_group_destroy_all(context, session=None):
if not session:
session = get_session()
with session.begin():
session.query(models.SecurityGroup).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupIngressRule).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
###################
def _security_group_rule_get_query(context, session=None):
return model_query(context, models.SecurityGroupIngressRule,
session=session)
@require_context
def security_group_rule_get(context, security_group_rule_id, session=None):
result = _security_group_rule_get_query(context, session=session).\
filter_by(id=security_group_rule_id).\
first()
if not result:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
return result
@require_context
def security_group_rule_get_by_security_group(context, security_group_id,
session=None):
return _security_group_rule_get_query(context, session=session).\
filter_by(parent_group_id=security_group_id).\
options(joinedload_all('grantee_group.instances')).\
all()
@require_context
def security_group_rule_get_by_security_group_grantee(context,
security_group_id,
session=None):
return _security_group_rule_get_query(context, session=session).\
filter_by(group_id=security_group_id).\
all()
@require_context
def security_group_rule_create(context, values):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
security_group_rule_ref.save()
return security_group_rule_ref
@require_context
def security_group_rule_destroy(context, security_group_rule_id):
session = get_session()
with session.begin():
security_group_rule = security_group_rule_get(context,
security_group_rule_id,
session=session)
security_group_rule.delete(session=session)
###################
@require_admin_context
def provider_fw_rule_create(context, rule):
fw_rule_ref = models.ProviderFirewallRule()
fw_rule_ref.update(rule)
fw_rule_ref.save()
return fw_rule_ref
@require_admin_context
def provider_fw_rule_get_all(context):
return model_query(context, models.ProviderFirewallRule).all()
@require_admin_context
def provider_fw_rule_get_all_by_cidr(context, cidr):
return model_query(context, models.ProviderFirewallRule).\
filter_by(cidr=cidr).\
all()
@require_admin_context
def provider_fw_rule_destroy(context, rule_id):
session = get_session()
with session.begin():
session.query(models.ProviderFirewallRule).\
filter_by(id=rule_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
###################
@require_admin_context
def user_get(context, id, session=None):
result = model_query(context, models.User, session=session).\
filter_by(id=id).\
first()
if not result:
raise exception.UserNotFound(user_id=id)
return result
@require_admin_context
def user_get_by_access_key(context, access_key, session=None):
result = model_query(context, models.User, session=session).\
filter_by(access_key=access_key).\
first()
if not result:
raise exception.AccessKeyNotFound(access_key=access_key)
return result
@require_admin_context
def user_create(context, values):
user_ref = models.User()
user_ref.update(values)
user_ref.save()
return user_ref
@require_admin_context
def user_delete(context, id):
session = get_session()
with session.begin():
session.query(models.UserProjectAssociation).\
filter_by(user_id=id).\
delete()
session.query(models.UserRoleAssociation).\
filter_by(user_id=id).\
delete()
session.query(models.UserProjectRoleAssociation).\
filter_by(user_id=id).\
delete()
user_ref = user_get(context, id, session=session)
session.delete(user_ref)
def user_get_all(context):
return model_query(context, models.User).all()
def user_get_roles(context, user_id):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
return [role.role for role in user_ref['roles']]
def user_get_roles_for_project(context, user_id, project_id):
session = get_session()
with session.begin():
res = session.query(models.UserProjectRoleAssociation).\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
return [association.role for association in res]
def user_remove_project_role(context, user_id, project_id, role):
session = get_session()
with session.begin():
session.query(models.UserProjectRoleAssociation).\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
filter_by(role=role).\
delete()
def user_remove_role(context, user_id, role):
session = get_session()
with session.begin():
res = session.query(models.UserRoleAssociation).\
filter_by(user_id=user_id).\
filter_by(role=role).\
all()
for role in res:
session.delete(role)
def user_add_role(context, user_id, role):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
models.UserRoleAssociation(user=user_ref, role=role).\
save(session=session)
def user_add_project_role(context, user_id, project_id, role):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
project_ref = project_get(context, project_id, session=session)
models.UserProjectRoleAssociation(user_id=user_ref['id'],
project_id=project_ref['id'],
role=role).save(session=session)
def user_update(context, user_id, values):
session = get_session()
with session.begin():
user_ref = user_get(context, user_id, session=session)
user_ref.update(values)
user_ref.save(session=session)
###################
def project_create(context, values):
project_ref = models.Project()
project_ref.update(values)
project_ref.save()
return project_ref
def project_add_member(context, project_id, user_id):
session = get_session()
with session.begin():
project_ref = project_get(context, project_id, session=session)
user_ref = user_get(context, user_id, session=session)
project_ref.members += [user_ref]
project_ref.save(session=session)
def project_get(context, id, session=None):
result = model_query(context, models.Project, session=session,
read_deleted="no").\
filter_by(id=id).\
options(joinedload_all('members')).\
first()
if not result:
raise exception.ProjectNotFound(project_id=id)
return result
def project_get_all(context):
return model_query(context, models.Project).\
options(joinedload_all('members')).\
all()
def project_get_by_user(context, user_id):
user = model_query(context, models.User).\
filter_by(id=user_id).\
options(joinedload_all('projects')).\
first()
if not user:
raise exception.UserNotFound(user_id=user_id)
return user.projects
def project_remove_member(context, project_id, user_id):
session = get_session()
project = project_get(context, project_id, session=session)
user = user_get(context, user_id, session=session)
if user in project.members:
project.members.remove(user)
project.save(session=session)
def project_update(context, project_id, values):
session = get_session()
with session.begin():
project_ref = project_get(context, project_id, session=session)
project_ref.update(values)
project_ref.save(session=session)
def project_delete(context, id):
session = get_session()
with session.begin():
session.query(models.UserProjectAssociation).\
filter_by(project_id=id).\
delete()
session.query(models.UserProjectRoleAssociation).\
filter_by(project_id=id).\
delete()
project_ref = project_get(context, id, session=session)
session.delete(project_ref)
@require_context
def project_get_networks(context, project_id, associate=True):
# NOTE(tr3buchet): as before this function will associate
# a project with a network if it doesn't have one and
# associate is true
result = model_query(context, models.Network, read_deleted="no").\
filter_by(project_id=project_id).\
all()
if not result:
if not associate:
return []
return [network_associate(context, project_id)]
return result
@require_context
def project_get_networks_v6(context, project_id):
return project_get_networks(context, project_id)
###################
@require_admin_context
def migration_create(context, values):
migration = models.Migration()
migration.update(values)
migration.save()
return migration
@require_admin_context
def migration_update(context, id, values):
session = get_session()
with session.begin():
migration = migration_get(context, id, session=session)
migration.update(values)
migration.save(session=session)
return migration
@require_admin_context
def migration_get(context, id, session=None):
result = model_query(context, models.Migration, session=session,
read_deleted="yes").\
filter_by(id=id).\
first()
if not result:
raise exception.MigrationNotFound(migration_id=id)
return result
@require_admin_context
def migration_get_by_instance_and_status(context, instance_uuid, status):
result = model_query(context, models.Migration, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid).\
filter_by(status=status).\
first()
if not result:
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
status=status)
return result
@require_admin_context
def migration_get_all_unconfirmed(context, confirm_window, session=None):
confirm_window = datetime.datetime.utcnow() - datetime.timedelta(
seconds=confirm_window)
return model_query(context, models.Migration, session=session,
read_deleted="yes").\
filter(models.Migration.updated_at <= confirm_window).\
filter_by(status="FINISHED").\
all()
##################
def console_pool_create(context, values):
pool = models.ConsolePool()
pool.update(values)
pool.save()
return pool
def console_pool_get(context, pool_id):
result = model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(id=pool_id).\
first()
if not result:
raise exception.ConsolePoolNotFound(pool_id=pool_id)
return result
def console_pool_get_by_host_type(context, compute_host, host,
console_type):
result = model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
filter_by(compute_host=compute_host).\
options(joinedload('consoles')).\
first()
if not result:
raise exception.ConsolePoolNotFoundForHostType(
host=host, console_type=console_type,
compute_host=compute_host)
return result
def console_pool_get_all_by_host_type(context, host, console_type):
return model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
options(joinedload('consoles')).\
all()
def console_create(context, values):
console = models.Console()
console.update(values)
console.save()
return console
def console_delete(context, console_id):
session = get_session()
with session.begin():
# NOTE(mdragon): consoles are meant to be transient.
session.query(models.Console).\
filter_by(id=console_id).\
delete()
def console_get_by_pool_instance(context, pool_id, instance_id):
result = model_query(context, models.Console, read_deleted="yes").\
filter_by(pool_id=pool_id).\
filter_by(instance_id=instance_id).\
options(joinedload('pool')).\
first()
if not result:
raise exception.ConsoleNotFoundInPoolForInstance(
pool_id=pool_id, instance_id=instance_id)
return result
def console_get_all_by_instance(context, instance_id):
return model_query(context, models.Console, read_deleted="yes").\
filter_by(instance_id=instance_id).\
all()
def console_get(context, console_id, instance_id=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(id=console_id).\
options(joinedload('pool'))
if instance_id is not None:
query = query.filter_by(instance_id=instance_id)
result = query.first()
if not result:
if instance_id:
raise exception.ConsoleNotFoundForInstance(
console_id=console_id, instance_id=instance_id)
else:
raise exception.ConsoleNotFound(console_id=console_id)
return result
##################
@require_admin_context
def instance_type_create(context, values):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
session = get_session()
with session.begin():
try:
instance_type_get_by_name(context, values['name'], session)
raise exception.InstanceTypeExists(name=values['name'])
except exception.InstanceTypeNotFoundByName:
pass
try:
instance_type_get_by_flavor_id(context, values['flavorid'],
session)
raise exception.InstanceTypeExists(name=values['name'])
except exception.FlavorNotFound:
pass
try:
specs = values.get('extra_specs')
specs_refs = []
if specs:
for k, v in specs.iteritems():
specs_ref = models.InstanceTypeExtraSpecs()
specs_ref['key'] = k
specs_ref['value'] = v
specs_refs.append(specs_ref)
values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
instance_type_ref.save(session=session)
except Exception, e:
raise exception.DBError(e)
return _dict_with_extra_specs(instance_type_ref)
def _dict_with_extra_specs(inst_type_query):
"""Takes an instance, volume, or instance type query returned
by sqlalchemy and returns it as a dictionary, converting the
extra_specs entry from a list of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = dict([(x['key'], x['value'])
for x in inst_type_query['extra_specs']])
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
def _instance_type_get_query(context, session=None, read_deleted=None):
return model_query(context, models.InstanceTypes, session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
@require_context
def instance_type_get_all(context, inactive=False, filters=None):
"""
Returns all instance types.
"""
filters = filters or {}
read_deleted = "yes" if inactive else "no"
query = _instance_type_get_query(context, read_deleted=read_deleted)
if 'min_memory_mb' in filters:
query = query.filter(
models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
if 'min_root_gb' in filters:
query = query.filter(
models.InstanceTypes.root_gb >= filters['min_root_gb'])
inst_types = query.order_by("name").all()
return [_dict_with_extra_specs(i) for i in inst_types]
@require_context
def instance_type_get(context, id, session=None):
"""Returns a dict describing specific instance_type"""
result = _instance_type_get_query(context, session=session).\
filter_by(id=id).\
first()
if not result:
raise exception.InstanceTypeNotFound(instance_type_id=id)
return _dict_with_extra_specs(result)
@require_context
def instance_type_get_by_name(context, name, session=None):
"""Returns a dict describing specific instance_type"""
result = _instance_type_get_query(context, session=session).\
filter_by(name=name).\
first()
if not result:
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
return _dict_with_extra_specs(result)
@require_context
def instance_type_get_by_flavor_id(context, flavor_id, session=None):
"""Returns a dict describing specific flavor_id"""
result = _instance_type_get_query(context, session=session).\
filter_by(flavorid=flavor_id).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return _dict_with_extra_specs(result)
@require_admin_context
def instance_type_destroy(context, name):
"""Marks specific instance_type as deleted"""
session = get_session()
with session.begin():
instance_type_ref = instance_type_get_by_name(context, name,
session=session)
instance_type_id = instance_type_ref['id']
session.query(models.InstanceTypes).\
filter_by(id=instance_type_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.InstanceTypeExtraSpecs).\
filter_by(instance_type_id=instance_type_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
####################
@require_admin_context
def zone_create(context, values):
zone = models.Zone()
zone.update(values)
zone.save()
return zone
def _zone_get_by_id_query(context, zone_id, session=None):
return model_query(context, models.Zone, session=session).\
filter_by(id=zone_id)
@require_admin_context
def zone_update(context, zone_id, values):
zone = zone_get(context, zone_id)
zone.update(values)
zone.save()
return zone
@require_admin_context
def zone_delete(context, zone_id):
session = get_session()
with session.begin():
_zone_get_by_id_query(context, zone_id, session=session).\
delete()
@require_admin_context
def zone_get(context, zone_id):
result = _zone_get_by_id_query(context, zone_id).first()
if not result:
raise exception.ZoneNotFound(zone_id=zone_id)
return result
@require_admin_context
def zone_get_all(context):
return model_query(context, models.Zone, read_deleted="yes").all()
####################
def _instance_metadata_get_query(context, instance_id, session=None):
return model_query(context, models.InstanceMetadata, session=session,
read_deleted="no").\
filter_by(instance_id=instance_id)
@require_context
@require_instance_exists
def instance_metadata_get(context, instance_id):
rows = _instance_metadata_get_query(context, instance_id).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@require_instance_exists
def instance_metadata_delete(context, instance_id, key):
_instance_metadata_get_query(context, instance_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
@require_instance_exists
def instance_metadata_get_item(context, instance_id, key, session=None):
result = _instance_metadata_get_query(
context, instance_id, session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.InstanceMetadataNotFound(metadata_key=key,
instance_id=instance_id)
return result
@require_context
@require_instance_exists
def instance_metadata_update(context, instance_id, metadata, delete):
session = get_session()
# Set existing metadata to deleted if delete argument is True
if delete:
original_metadata = instance_metadata_get(context, instance_id)
for meta_key, meta_value in original_metadata.iteritems():
if meta_key not in metadata:
meta_ref = instance_metadata_get_item(context, instance_id,
meta_key, session)
meta_ref.update({'deleted': True})
meta_ref.save(session=session)
meta_ref = None
# Now update all existing items with new values, or create new meta objects
for meta_key, meta_value in metadata.iteritems():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = instance_metadata_get_item(context, instance_id,
meta_key, session)
except exception.InstanceMetadataNotFound, e:
meta_ref = models.InstanceMetadata()
item.update({"key": meta_key, "instance_id": instance_id})
meta_ref.update(item)
meta_ref.save(session=session)
return metadata
####################
@require_admin_context
def agent_build_create(context, values):
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
agent_build_ref.save()
return agent_build_ref
@require_admin_context
def agent_build_get_by_triple(context, hypervisor, os, architecture,
session=None):
return model_query(context, models.AgentBuild, session=session,
read_deleted="no").\
filter_by(hypervisor=hypervisor).\
filter_by(os=os).\
filter_by(architecture=architecture).\
first()
@require_admin_context
def agent_build_get_all(context):
return model_query(context, models.AgentBuild, read_deleted="no").\
all()
@require_admin_context
def agent_build_destroy(context, agent_build_id):
session = get_session()
with session.begin():
model_query(context, models.AgentBuild, session=session,
read_deleted="yes").\
filter_by(id=agent_build_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
def agent_build_update(context, agent_build_id, values):
session = get_session()
with session.begin():
agent_build_ref = model_query(context, models.AgentBuild,
session=session, read_deleted="yes").\
filter_by(id=agent_build_id).\
first()
agent_build_ref.update(values)
agent_build_ref.save(session=session)
####################
@require_context
def bw_usage_get_by_instance(context, instance_id, start_period):
return model_query(context, models.BandwidthUsage, read_deleted="yes").\
filter_by(instance_id=instance_id).\
filter_by(start_period=start_period).\
all()
@require_context
def bw_usage_get_all_by_filters(context, filters):
"""Return bandwidth usage that matches all filters."""
session = get_session()
query_prefix = session.query(models.BandwidthUsage).\
order_by(desc(models.BandwidthUsage.created_at))
# Make a copy of the filters dictionary to use going forward, as we'll
# be modifying it and we shouldn't affect the caller's use of it.
filters = filters.copy()
# Filters for exact matches that we can do along with the SQL query.
exact_match_filter_names = ["instance_id", "network_label",
"start_period", "last_refreshed", "bw_in", "bw_out"]
# Filter the query
query_prefix = exact_filter(query_prefix, models.BandwidthUsage,
filters, exact_match_filter_names)
return query_prefix.all()
@require_context
def bw_usage_update(context,
instance_id,
mac,
start_period,
bw_in, bw_out,
session=None):
if not session:
session = get_session()
with session.begin():
bwusage = model_query(context, models.BandwidthUsage,
read_deleted="yes").\
filter_by(instance_id=instance_id).\
filter_by(start_period=start_period).\
filter_by(mac=mac).\
first()
if not bwusage:
bwusage = models.BandwidthUsage()
bwusage.instance_id = instance_id
bwusage.start_period = start_period
bwusage.mac = mac
bwusage.last_refreshed = utils.utcnow()
bwusage.bw_in = bw_in
bwusage.bw_out = bw_out
bwusage.save(session=session)
####################
def _instance_type_extra_specs_get_query(context, instance_type_id,
session=None):
return model_query(context, models.InstanceTypeExtraSpecs,
session=session, read_deleted="no").\
filter_by(instance_type_id=instance_type_id)
@require_context
def instance_type_extra_specs_get(context, instance_type_id):
rows = _instance_type_extra_specs_get_query(
context, instance_type_id).\
all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
def instance_type_extra_specs_delete(context, instance_type_id, key):
_instance_type_extra_specs_get_query(
context, instance_type_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def instance_type_extra_specs_get_item(context, instance_type_id, key,
session=None):
result = _instance_type_extra_specs_get_query(
context, instance_type_id, session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.InstanceTypeExtraSpecsNotFound(
extra_specs_key=key, instance_type_id=instance_type_id)
return result
@require_context
def instance_type_extra_specs_update_or_create(context, instance_type_id,
specs):
session = get_session()
spec_ref = None
for key, value in specs.iteritems():
try:
spec_ref = instance_type_extra_specs_get_item(
context, instance_type_id, key, session)
except exception.InstanceTypeExtraSpecsNotFound, e:
spec_ref = models.InstanceTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"instance_type_id": instance_type_id,
"deleted": 0})
spec_ref.save(session=session)
return specs
##################
@require_admin_context
def volume_type_create(context, values):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
session = get_session()
with session.begin():
try:
volume_type_get_by_name(context, values['name'], session)
raise exception.VolumeTypeExists(name=values['name'])
except exception.VolumeTypeNotFoundByName:
pass
try:
specs = values.get('extra_specs')
values['extra_specs'] = _metadata_refs(values.get('extra_specs'),
models.VolumeTypeExtraSpecs)
volume_type_ref = models.VolumeTypes()
volume_type_ref.update(values)
volume_type_ref.save()
except Exception, e:
raise exception.DBError(e)
return volume_type_ref
@require_context
def volume_type_get_all(context, inactive=False, filters=None):
"""
Returns a dict describing all volume_types with name as key.
"""
filters = filters or {}
read_deleted = "yes" if inactive else "no"
rows = model_query(context, models.VolumeTypes,
read_deleted=read_deleted).\
options(joinedload('extra_specs')).\
order_by("name").\
all()
# TODO(sirp): this patern of converting rows to a result with extra_specs
# is repeated quite a bit, might be worth creating a method for it
result = {}
for row in rows:
result[row['name']] = _dict_with_extra_specs(row)
return result
@require_context
def volume_type_get(context, id, session=None):
"""Returns a dict describing specific volume_type"""
result = model_query(context, models.VolumeTypes, session=session).\
options(joinedload('extra_specs')).\
filter_by(id=id).\
first()
if not result:
raise exception.VolumeTypeNotFound(volume_type=id)
return _dict_with_extra_specs(result)
@require_context
def volume_type_get_by_name(context, name, session=None):
"""Returns a dict describing specific volume_type"""
result = model_query(context, models.VolumeTypes, session=session).\
options(joinedload('extra_specs')).\
filter_by(name=name).\
first()
if not result:
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
else:
return _dict_with_extra_specs(result)
@require_admin_context
def volume_type_destroy(context, name):
session = get_session()
with session.begin():
volume_type_ref = volume_type_get_by_name(context, name,
session=session)
volume_type_id = volume_type_ref['id']
session.query(models.VolumeTypes).\
filter_by(id=volume_type_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.VolumeTypeExtraSpecs).\
filter_by(volume_type_id=volume_type_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
####################
def _volume_type_extra_specs_query(context, volume_type_id, session=None):
return model_query(context, models.VolumeTypeExtraSpecs, session=session,
read_deleted="no").\
filter_by(volume_type_id=volume_type_id)
@require_context
def volume_type_extra_specs_get(context, volume_type_id):
rows = _volume_type_extra_specs_query(context, volume_type_id).\
all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
def volume_type_extra_specs_delete(context, volume_type_id, key):
_volume_type_extra_specs_query(context, volume_type_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def volume_type_extra_specs_get_item(context, volume_type_id, key,
session=None):
result = _volume_type_extra_specs_query(
context, volume_type_id, session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.VolumeTypeExtraSpecsNotFound(
extra_specs_key=key, volume_type_id=volume_type_id)
return result
@require_context
def volume_type_extra_specs_update_or_create(context, volume_type_id,
specs):
session = get_session()
spec_ref = None
for key, value in specs.iteritems():
try:
spec_ref = volume_type_extra_specs_get_item(
context, volume_type_id, key, session)
except exception.VolumeTypeExtraSpecsNotFound, e:
spec_ref = models.VolumeTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"volume_type_id": volume_type_id,
"deleted": 0})
spec_ref.save(session=session)
return specs
####################
def _vsa_get_query(context, session=None, project_only=False):
return model_query(context, models.VirtualStorageArray, session=session,
project_only=project_only).\
options(joinedload('vsa_instance_type'))
@require_admin_context
def vsa_create(context, values):
"""
Creates Virtual Storage Array record.
"""
try:
vsa_ref = models.VirtualStorageArray()
vsa_ref.update(values)
vsa_ref.save()
except Exception, e:
raise exception.DBError(e)
return vsa_ref
@require_admin_context
def vsa_update(context, vsa_id, values):
"""
Updates Virtual Storage Array record.
"""
session = get_session()
with session.begin():
vsa_ref = vsa_get(context, vsa_id, session=session)
vsa_ref.update(values)
vsa_ref.save(session=session)
return vsa_ref
@require_admin_context
def vsa_destroy(context, vsa_id):
"""
Deletes Virtual Storage Array record.
"""
session = get_session()
with session.begin():
session.query(models.VirtualStorageArray).\
filter_by(id=vsa_id).\
update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def vsa_get(context, vsa_id, session=None):
"""
Get Virtual Storage Array record by ID.
"""
result = _vsa_get_query(context, session=session, project_only=True).\
filter_by(id=vsa_id).\
first()
if not result:
raise exception.VirtualStorageArrayNotFound(id=vsa_id)
return result
@require_admin_context
def vsa_get_all(context):
"""
Get all Virtual Storage Array records.
"""
return _vsa_get_query(context).all()
@require_context
def vsa_get_all_by_project(context, project_id):
"""
Get all Virtual Storage Array records by project ID.
"""
authorize_project_context(context, project_id)
return _vsa_get_query(context).filter_by(project_id=project_id).all()
####################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id"""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(id=image_id).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_id)
return result
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid"""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(uuid=image_uuid).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_uuid)
return result
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid"""
try:
s3_image_ref = models.S3Image()
s3_image_ref.update({'uuid': image_uuid})
s3_image_ref.save()
except Exception, e:
raise exception.DBError(e)
return s3_image_ref
####################
@require_admin_context
def sm_backend_conf_create(context, values):
backend_conf = models.SMBackendConf()
backend_conf.update(values)
backend_conf.save()
return backend_conf
@require_admin_context
def sm_backend_conf_update(context, sm_backend_id, values):
backend_conf = model_query(context, models.SMBackendConf,
read_deleted="yes").\
filter_by(id=sm_backend_id).\
first()
if not backend_conf:
raise exception.NotFound(
_("No backend config with id %(sm_backend_id)s") % locals())
backend_conf.update(values)
backend_conf.save()
return backend_conf
@require_admin_context
def sm_backend_conf_delete(context, sm_backend_id):
# FIXME(sirp): for consistency, shouldn't this just mark as deleted with
# `purge` actually deleting the record?
session = get_session()
with session.begin():
model_query(context, models.SMBackendConf, session=session,
read_deleted="yes").\
filter_by(id=sm_backend_id).\
delete()
@require_admin_context
def sm_backend_conf_get(context, sm_backend_id):
result = model_query(context, models.SMBackendConf, read_deleted="yes").\
filter_by(id=sm_backend_id).\
first()
if not result:
raise exception.NotFound(_("No backend config with id "
"%(sm_backend_id)s") % locals())
return result
@require_admin_context
def sm_backend_conf_get_by_sr(context, sr_uuid):
session = get_session()
# FIXME(sirp): shouldn't this have a `first()` qualifier attached?
return model_query(context, models.SMBackendConf, read_deleted="yes").\
filter_by(sr_uuid=sr_uuid)
@require_admin_context
def sm_backend_conf_get_all(context):
return model_query(context, models.SMBackendConf, read_deleted="yes").\
all()
####################
def _sm_flavor_get_query(context, sm_flavor_label, session=None):
return model_query(context, models.SMFlavors, session=session,
read_deleted="yes").\
filter_by(label=sm_flavor_label)
@require_admin_context
def sm_flavor_create(context, values):
sm_flavor = models.SMFlavors()
sm_flavor.update(values)
sm_flavor.save()
return sm_flavor
@require_admin_context
def sm_flavor_update(context, sm_flavor_label, values):
sm_flavor = sm_flavor_get(context, sm_flavor_label)
sm_flavor.update(values)
sm_flavor.save()
return sm_flavor
@require_admin_context
def sm_flavor_delete(context, sm_flavor_label):
session = get_session()
with session.begin():
_sm_flavor_get_query(context, sm_flavor_label).delete()
@require_admin_context
def sm_flavor_get(context, sm_flavor_label):
result = _sm_flavor_get_query(context, sm_flavor_label).first()
if not result:
raise exception.NotFound(
_("No sm_flavor called %(sm_flavor)s") % locals())
return result
@require_admin_context
def sm_flavor_get_all(context):
return model_query(context, models.SMFlavors, read_deleted="yes").all()
###############################
def _sm_volume_get_query(context, volume_id, session=None):
return model_query(context, models.SMVolume, session=session,
read_deleted="yes").\
filter_by(id=volume_id)
def sm_volume_create(context, values):
sm_volume = models.SMVolume()
sm_volume.update(values)
sm_volume.save()
return sm_volume
def sm_volume_update(context, volume_id, values):
sm_volume = sm_volume_get(context, volume_id)
sm_volume.update(values)
sm_volume.save()
return sm_volume
def sm_volume_delete(context, volume_id):
session = get_session()
with session.begin():
_sm_volume_get_query(context, volume_id, session=session).delete()
def sm_volume_get(context, volume_id):
result = _sm_volume_get_query(context, volume_id).first()
if not result:
raise exception.NotFound(
_("No sm_volume with id %(volume_id)s") % locals())
return result
def sm_volume_get_all(context):
return model_query(context, models.SMVolume, read_deleted="yes").all()
################
def _aggregate_get_query(context, model_class, id_field, id,
session=None, read_deleted='yes'):
return model_query(context, model_class, session=session,
read_deleted=read_deleted).filter(id_field == id)
@require_admin_context
def aggregate_create(context, values, metadata=None):
try:
aggregate = models.Aggregate()
aggregate.update(values)
aggregate.operational_state = aggregate_states.CREATED
aggregate.save()
except exception.DBError:
raise exception.AggregateNameExists(aggregate_name=values['name'])
if metadata:
aggregate_metadata_add(context, aggregate.id, metadata)
return aggregate
@require_admin_context
def aggregate_get(context, aggregate_id, read_deleted='no'):
aggregate = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id, aggregate_id,
read_deleted=read_deleted).first()
if not aggregate:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
return aggregate
@require_admin_context
def aggregate_update(context, aggregate_id, values):
session = get_session()
aggregate = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id, aggregate_id,
session=session,
read_deleted='no').first()
if aggregate:
metadata = values.get('metadata')
if metadata is not None:
aggregate_metadata_add(context,
aggregate_id,
values.pop('metadata'),
set_delete=True)
with session.begin():
aggregate.update(values)
aggregate.save(session=session)
values['metadata'] = metadata
return aggregate
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
@require_admin_context
def aggregate_delete(context, aggregate_id):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id, aggregate_id,
read_deleted='no')
if query.first():
query.update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
@require_admin_context
def aggregate_get_all(context, read_deleted='yes'):
return model_query(context,
models.Aggregate,
read_deleted=read_deleted).all()
@require_admin_context
@require_aggregate_exists
def aggregate_metadata_get(context, aggregate_id, read_deleted='no'):
rows = model_query(context,
models.AggregateMetadata,
read_deleted=read_deleted).\
filter_by(aggregate_id=aggregate_id).all()
return dict([(r['key'], r['value']) for r in rows])
@require_admin_context
@require_aggregate_exists
def aggregate_metadata_delete(context, aggregate_id, key):
query = _aggregate_get_query(context,
models.AggregateMetadata,
models.AggregateMetadata.aggregate_id,
aggregate_id, read_deleted='no').\
filter_by(key=key)
if query.first():
query.update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
else:
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
metadata_key=key)
@require_admin_context
@require_aggregate_exists
def aggregate_metadata_get_item(context, aggregate_id, key,
session=None, read_deleted='yes'):
result = _aggregate_get_query(context,
models.AggregateMetadata,
models.AggregateMetadata.aggregate_id,
aggregate_id, session=session,
read_deleted=read_deleted).\
filter_by(key=key).first()
if not result:
raise exception.AggregateMetadataNotFound(metadata_key=key,
aggregate_id=aggregate_id)
return result
@require_admin_context
@require_aggregate_exists
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
session = get_session()
if set_delete:
original_metadata = aggregate_metadata_get(context, aggregate_id)
for meta_key, meta_value in original_metadata.iteritems():
if meta_key not in metadata:
meta_ref = aggregate_metadata_get_item(context, aggregate_id,
meta_key, session)
meta_ref.update({'deleted': True})
meta_ref.save(session=session)
meta_ref = None
for meta_key, meta_value in metadata.iteritems():
item = {"value": meta_value}
try:
meta_ref = aggregate_metadata_get_item(context, aggregate_id,
meta_key, session)
if meta_ref.deleted:
item.update({'deleted': False, 'deleted_at': None,
'updated_at': literal_column('updated_at')})
except exception.AggregateMetadataNotFound:
meta_ref = models.AggregateMetadata()
item.update({"key": meta_key, "aggregate_id": aggregate_id})
meta_ref.update(item)
meta_ref.save(session=session)
return metadata
@require_admin_context
@require_aggregate_exists
def aggregate_host_get_all(context, aggregate_id, read_deleted='yes'):
rows = model_query(context,
models.AggregateHost,
read_deleted=read_deleted).\
filter_by(aggregate_id=aggregate_id).all()
return [r.host for r in rows]
@require_admin_context
@require_aggregate_exists
def aggregate_host_delete(context, aggregate_id, host):
query = _aggregate_get_query(context,
models.AggregateHost,
models.AggregateHost.aggregate_id,
aggregate_id,
read_deleted='no').filter_by(host=host)
if query.first():
query.update({'deleted': True,
'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
else:
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
host=host)
@require_admin_context
@require_aggregate_exists
def aggregate_host_add(context, aggregate_id, host):
session = get_session()
host_ref = _aggregate_get_query(context,
models.AggregateHost,
models.AggregateHost.aggregate_id,
aggregate_id,
session=session,
read_deleted='yes').\
filter_by(host=host).first()
if not host_ref:
try:
host_ref = models.AggregateHost()
values = {"host": host, "aggregate_id": aggregate_id, }
host_ref.update(values)
host_ref.save(session=session)
except exception.DBError:
raise exception.AggregateHostConflict(host=host)
elif host_ref.deleted:
host_ref.update({'deleted': False,
'deleted_at': None,
'updated_at': literal_column('updated_at')})
host_ref.save(session=session)
else:
raise exception.AggregateHostExists(host=host,
aggregate_id=aggregate_id)
return host_ref
################
def instance_fault_create(context, values):
"""Create a new InstanceFault."""
fault_ref = models.InstanceFault()
fault_ref.update(values)
fault_ref.save()
return dict(fault_ref.iteritems())
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
rows = model_query(context, models.InstanceFault, read_deleted='no').\
filter(models.InstanceFault.instance_uuid.in_(
instance_uuids)).\
order_by(desc("created_at")).\
all()
output = {}
for instance_uuid in instance_uuids:
output[instance_uuid] = []
for row in rows:
data = dict(row.iteritems())
output[row['instance_uuid']].append(data)
return output
| {
"content_hash": "c715e8ca27b03190e0c5e840ac0e0618",
"timestamp": "",
"source": "github",
"line_count": 4473,
"max_line_length": 79,
"avg_line_length": 32.800134138162306,
"alnum_prop": 0.5905394813072965,
"repo_name": "russellb/nova",
"id": "ed9286eff4ab539a589121e835c73e14ec6fdf69",
"size": "147554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/db/sqlalchemy/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4974"
},
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "5611148"
},
{
"name": "Shell",
"bytes": "25380"
}
],
"symlink_target": ""
} |
"""Submit a Spark SQL job to a cluster."""
from apitools.base.py import encoding
from googlecloudsdk.api_lib.dataproc import base_classes
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA)
class SparkSql(base_classes.JobSubmitter):
"""Submit a Spark SQL job to a cluster.
Submit a Spark SQL job to a cluster.
## EXAMPLES
To submit a Spark SQL job with a local script, run:
$ {command} --cluster my_cluster --file my_queries.ql
To submit a Spark SQL job with inline queries, run:
$ {command} --cluster my_cluster -e "CREATE EXTERNAL TABLE foo(bar int) LOCATION 'gs://my_bucket/'" -e "SELECT * FROM foo WHERE bar > 2"
"""
@staticmethod
def Args(parser):
super(SparkSql, SparkSql).Args(parser)
SparkSqlBase.Args(parser)
def ConfigureJob(self, job, args):
SparkSqlBase.ConfigureJob(
self.context['dataproc_messages'],
job,
self.BuildLoggingConfig(args.driver_log_levels),
self.files_by_type,
args)
super(SparkSql, self).ConfigureJob(job, args)
def PopulateFilesByType(self, args):
self.files_by_type.update(SparkSqlBase.GetFilesByType(args))
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class SparkSqlBeta(base_classes.JobSubmitterBeta):
"""Submit a Spark SQL job to a cluster.
Submit a Spark SQL job to a cluster.
## EXAMPLES
To submit a Spark SQL job with a local script, run:
$ {command} --cluster my_cluster --file my_queries.ql
To submit a Spark SQL job with inline queries, run:
$ {command} --cluster my_cluster -e "CREATE EXTERNAL TABLE foo(bar int) LOCATION 'gs://my_bucket/'" -e "SELECT * FROM foo WHERE bar > 2"
"""
@staticmethod
def Args(parser):
super(SparkSqlBeta, SparkSqlBeta).Args(parser)
SparkSqlBase.Args(parser)
def ConfigureJob(self, job, args):
SparkSqlBase.ConfigureJob(
self.context['dataproc_messages'],
job,
self.BuildLoggingConfig(args.driver_log_levels),
self.files_by_type,
args)
super(SparkSqlBeta, self).ConfigureJob(job, args)
def PopulateFilesByType(self, args):
self.files_by_type.update(SparkSqlBase.GetFilesByType(args))
class SparkSqlBase(object):
"""Submit a Spark SQL job to a cluster."""
@staticmethod
def Args(parser):
"""Parses command-line arguments specific to submitting SparkSql jobs."""
driver = parser.add_mutually_exclusive_group(required=True)
driver.add_argument(
'--execute', '-e',
metavar='QUERY',
dest='queries',
action='append',
default=[],
help='A Spark SQL query to execute as part of the job.')
driver.add_argument(
'--file', '-f',
help=('HCFS URI of file containing Spark SQL script to execute as '
'the job.'))
parser.add_argument(
'--jars',
type=arg_parsers.ArgList(),
metavar='JAR',
default=[],
help=('Comma separated list of jar files to be provided to the '
'executor and driver classpaths. May contain UDFs.'))
parser.add_argument(
'--params',
type=arg_parsers.ArgDict(),
metavar='PARAM=VALUE',
help='A list of key value pairs to set variables in the Hive queries.')
parser.add_argument(
'--properties',
type=arg_parsers.ArgDict(),
metavar='PROPERTY=VALUE',
help='A list of key value pairs to configure Hive.')
parser.add_argument(
'--driver-log-levels',
type=arg_parsers.ArgDict(),
metavar='PACKAGE=LEVEL',
help=('A list of package to log4j log level pairs to configure driver '
'logging. For example: root=FATAL,com.example=INFO'))
@staticmethod
def GetFilesByType(args):
return {
'jars': args.jars,
'file': args.file}
@staticmethod
def ConfigureJob(messages, job, log_config, files_by_type, args):
"""Populates the sparkSqlJob member of the given job."""
spark_sql_job = messages.SparkSqlJob(
jarFileUris=files_by_type['jars'],
queryFileUri=files_by_type['file'],
loggingConfig=log_config)
if args.queries:
spark_sql_job.queryList = messages.QueryList(queries=args.queries)
if args.params:
spark_sql_job.scriptVariables = encoding.DictToMessage(
args.params, messages.SparkSqlJob.ScriptVariablesValue)
if args.properties:
spark_sql_job.properties = encoding.DictToMessage(
args.properties, messages.SparkSqlJob.PropertiesValue)
job.sparkSqlJob = spark_sql_job
| {
"content_hash": "9e2280b34a9a8f7eed07f8550e1683b3",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 140,
"avg_line_length": 31.534246575342465,
"alnum_prop": 0.6639878366637706,
"repo_name": "Sorsly/subtle",
"id": "e9669d69479339eab107ce9bbf41a5026e8bc673",
"size": "5200",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/surface/dataproc/jobs/submit/spark_sql.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("logs", "0004_auto_20180116_2202")]
operations = [
migrations.AlterField(
model_name="emaillog",
name="status",
field=models.CharField(
choices=[
("open", "Open"),
("ok", "Delivered"),
("spambounce", "Spam-bounce"),
("softbounce", "Soft-bounce"),
("hardbounce", "Hard-bounce"),
("dropped", "Dropped"),
("deferred", "Deferred"),
("unknown", "Unknown"),
],
default="unknown",
max_length=20,
),
)
]
| {
"content_hash": "37ae99830d9015eadcc4a6a03f1d608b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 56,
"avg_line_length": 29.51851851851852,
"alnum_prop": 0.4203262233375157,
"repo_name": "watchdogpolska/feder",
"id": "d484d1637184f4afba8daea18e51f174c506c83b",
"size": "846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feder/letters/logs/migrations/0005_auto_20180325_2244.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "624"
},
{
"name": "HTML",
"bytes": "183421"
},
{
"name": "JavaScript",
"bytes": "6245"
},
{
"name": "Makefile",
"bytes": "2086"
},
{
"name": "Python",
"bytes": "574027"
},
{
"name": "SCSS",
"bytes": "40546"
},
{
"name": "Shell",
"bytes": "214"
}
],
"symlink_target": ""
} |
class Klass(object):
def __getattr__(self, name):
"""
Locate the function with the dotted
attribute.
"""
def traverse(parent, child):
if instance(parent, str):
parent = getattr(self, parent)
return getattr(parent, child)
return reduce(traverse, name.split('.'))
| {
"content_hash": "1908c6d6e2f5f4122e4aef18a421ee8e",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 48,
"avg_line_length": 29.75,
"alnum_prop": 0.5322128851540616,
"repo_name": "ActiveState/code",
"id": "c7f611dd2a27b521f02a58e25bb5f9e1432d7591",
"size": "357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/578398_Traverse_dotted_attribute_object_using/recipe-578398.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
""" s3 support for remote file interactivity """
from typing import IO, Any, Optional, Tuple
from urllib.parse import urlparse as parse_url
from pandas.compat._optional import import_optional_dependency
from pandas._typing import FilePathOrBuffer
s3fs = import_optional_dependency(
"s3fs", extra="The s3fs package is required to handle s3 files."
)
def _strip_schema(url):
"""Returns the url without the s3:// part"""
result = parse_url(url, allow_fragments=False)
return result.netloc + result.path
def get_file_and_filesystem(
filepath_or_buffer: FilePathOrBuffer, mode: Optional[str] = None
) -> Tuple[IO, Any]:
from botocore.exceptions import NoCredentialsError
if mode is None:
mode = "rb"
fs = s3fs.S3FileSystem(anon=False)
try:
file = fs.open(_strip_schema(filepath_or_buffer), mode)
except (FileNotFoundError, NoCredentialsError):
# boto3 has troubles when trying to access a public file
# when credentialed...
# An OSError is raised if you have credentials, but they
# aren't valid for that bucket.
# A NoCredentialsError is raised if you don't have creds
# for that bucket.
fs = s3fs.S3FileSystem(anon=True)
file = fs.open(_strip_schema(filepath_or_buffer), mode)
return file, fs
def get_filepath_or_buffer(
filepath_or_buffer: FilePathOrBuffer,
encoding: Optional[str] = None,
compression: Optional[str] = None,
mode: Optional[str] = None,
) -> Tuple[IO, Optional[str], Optional[str], bool]:
file, _fs = get_file_and_filesystem(filepath_or_buffer, mode=mode)
return file, None, compression, True
| {
"content_hash": "5205b351849af2081a2e2d8b17e3e383",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 70,
"avg_line_length": 33.32,
"alnum_prop": 0.6872749099639855,
"repo_name": "kushalbhola/MyStuff",
"id": "7e0a37e8cba209b2f61a30390d82a36f518f183a",
"size": "1666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Practice/PythonApplication/env/Lib/site-packages/pandas/io/s3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1330"
},
{
"name": "C#",
"bytes": "332967"
},
{
"name": "CSS",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "7539"
},
{
"name": "Java",
"bytes": "14860"
},
{
"name": "JavaScript",
"bytes": "9843"
},
{
"name": "Jupyter Notebook",
"bytes": "374013"
},
{
"name": "PowerShell",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "6511820"
},
{
"name": "Tcl",
"bytes": "24289"
},
{
"name": "TypeScript",
"bytes": "15697"
}
],
"symlink_target": ""
} |
"Generate a table of random passwords for people to use"
import random
import string
# No 0Oi1lIL chars
#print(string.printable[:26*2+10])
characters = "23456789abcdefghjkmnpqrstuvwxyzABCDEFGHJKMNPQRSTUVWXYZ"
print(characters)
def password(digits=10):
return ''.join(random.choice(characters) for x in range(digits))
# 20 rows of passwords
for i in range(20):
passwords = [password() for i in range(5)]
print("\t".join(passwords))
print()
print("Hit enter to continue")
ignored = input() | {
"content_hash": "ad3f118d9f9538c2610a4485f9da9dcf",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 69,
"avg_line_length": 23.136363636363637,
"alnum_prop": 0.7328094302554028,
"repo_name": "AnthonyBriggs/Python-101",
"id": "6df5636c18de3469d4f67337f849a8353f0da30e",
"size": "510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "passwords/passwords.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "320"
},
{
"name": "HTML",
"bytes": "3070"
},
{
"name": "JavaScript",
"bytes": "1359"
},
{
"name": "Python",
"bytes": "502125"
}
],
"symlink_target": ""
} |
import setuptools
setuptools.setup(
name='universe',
version='0.0.1',
author='Jeff Bradberry',
author_email='jeff.bradberry@gmail.com',
description='A space 4X game',
url='https://github.com/jbradberry/universe',
packages=setuptools.find_packages(),
classifiers=[
'Development Status :: 1 - Planning',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Games/Entertainment :: Turn Based Strategy',
],
)
| {
"content_hash": "4ae3a25e51c1487cd9cd3fd40d439cd0",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 62,
"avg_line_length": 29.157894736842106,
"alnum_prop": 0.631768953068592,
"repo_name": "jbradberry/universe",
"id": "c96c8164a3f12f280a573e0a06759f06313ad323",
"size": "554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12844"
}
],
"symlink_target": ""
} |
__version__=''' $Id: styles.py 3231 2008-06-03 16:42:41Z rgbecker $ '''
from reportlab.lib.colors import white, black
from reportlab.lib.enums import TA_LEFT, TA_CENTER
###########################################################
# This class provides an 'instance inheritance'
# mechanism for its descendants, simpler than acquisition
# but not as far-reaching
###########################################################
class PropertySet:
defaults = {}
def __init__(self, name, parent=None, **kw):
"""When initialized, it copies the class defaults;
then takes a copy of the attributes of the parent
if any. All the work is done in init - styles
should cost little to use at runtime."""
# step one - validate the hell out of it
assert not self.defaults.has_key('name'), "Class Defaults may not contain a 'name' attribute"
assert not self.defaults.has_key('parent'), "Class Defaults may not contain a 'parent' attribute"
if parent:
assert parent.__class__ == self.__class__, "Parent style must have same class as new style"
#step two
self.name = name
self.parent = parent
self.__dict__.update(self.defaults)
#step two - copy from parent if any. Try to be
# very strict that only keys in class defaults are
# allowed, so they cannot inherit
self.refresh()
#step three - copy keywords if any
for (key, value) in kw.items():
self.__dict__[key] = value
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, self.name)
def refresh(self):
"""re-fetches attributes from the parent on demand;
use if you have been hacking the styles. This is
used by __init__"""
if self.parent:
for (key, value) in self.parent.__dict__.items():
if (key not in ['name','parent']):
self.__dict__[key] = value
def listAttrs(self, indent=''):
print indent + 'name =', self.name
print indent + 'parent =', self.parent
keylist = self.__dict__.keys()
keylist.sort()
keylist.remove('name')
keylist.remove('parent')
for key in keylist:
value = self.__dict__.get(key, None)
print indent + '%s = %s' % (key, value)
class ParagraphStyle(PropertySet):
defaults = {
'fontName':'Times-Roman',
'fontSize':10,
'leading':12,
'leftIndent':0,
'rightIndent':0,
'firstLineIndent':0,
'alignment':TA_LEFT,
'spaceBefore':0,
'spaceAfter':0,
'bulletFontName':'Times-Roman',
'bulletFontSize':10,
'bulletIndent':0,
'textColor': black,
'backColor':None,
'wordWrap':None,
'borderWidth': 0,
'borderPadding': 0,
'borderColor': None,
'borderRadius': None,
'allowWidows': 1,
'allowOrphans': 0,
'textTransform':None, #uppercase lowercase (captitalize not yet) or None or absent
}
class LineStyle(PropertySet):
defaults = {
'width':1,
'color': black
}
def prepareCanvas(self, canvas):
"""You can ask a LineStyle to set up the canvas for drawing
the lines."""
canvas.setLineWidth(1)
#etc. etc.
class StyleSheet1:
"""This may or may not be used. The idea is to
1. slightly simplify construction of stylesheets;
2. enforce rules to validate styles when added
(e.g. we may choose to disallow having both
'heading1' and 'Heading1' - actual rules are
open to discussion);
3. allow aliases and alternate style lookup
mechanisms
4. Have a place to hang style-manipulation
methods (save, load, maybe support a GUI
editor)
Access is via getitem, so they can be
compatible with plain old dictionaries.
"""
def __init__(self):
self.byName = {}
self.byAlias = {}
def __getitem__(self, key):
try:
return self.byAlias[key]
except KeyError:
try:
return self.byName[key]
except KeyError:
raise KeyError, "Style '%s' not found in stylesheet" % key
def has_key(self, key):
if self.byAlias.has_key(key):
return 1
elif self.byName.has_key(key):
return 1
else:
return 0
def add(self, style, alias=None):
key = style.name
if self.byName.has_key(key):
raise KeyError, "Style '%s' already defined in stylesheet" % key
if self.byAlias.has_key(key):
raise KeyError, "Style name '%s' is already an alias in stylesheet" % key
if alias:
if self.byName.has_key(alias):
raise KeyError, "Style '%s' already defined in stylesheet" % alias
if self.byAlias.has_key(alias):
raise KeyError, "Alias name '%s' is already an alias in stylesheet" % alias
#passed all tests? OK, add it
self.byName[key] = style
if alias:
self.byAlias[alias] = style
def list(self):
styles = self.byName.items()
styles.sort()
alii = {}
for (alias, style) in self.byAlias.items():
alii[style] = alias
for (name, style) in styles:
alias = alii.get(style, None)
print name, alias
style.listAttrs(' ')
print
def testStyles():
pNormal = ParagraphStyle('Normal',None)
pNormal.fontName = 'Times-Roman'
pNormal.fontSize = 12
pNormal.leading = 14.4
pNormal.listAttrs()
print
pPre = ParagraphStyle('Literal', pNormal)
pPre.fontName = 'Courier'
pPre.listAttrs()
return pNormal, pPre
def getSampleStyleSheet():
"""Returns a stylesheet object"""
stylesheet = StyleSheet1()
stylesheet.add(ParagraphStyle(name='Normal',
fontName='Times-Roman',
fontSize=10,
leading=12)
)
stylesheet.add(ParagraphStyle(name='BodyText',
parent=stylesheet['Normal'],
spaceBefore=6)
)
stylesheet.add(ParagraphStyle(name='Italic',
parent=stylesheet['BodyText'],
fontName = 'Times-Italic')
)
stylesheet.add(ParagraphStyle(name='Heading1',
parent=stylesheet['Normal'],
fontName = 'Times-Bold',
fontSize=18,
leading=22,
spaceAfter=6),
alias='h1')
stylesheet.add(ParagraphStyle(name='Title',
parent=stylesheet['Normal'],
fontName = 'Times-Bold',
fontSize=18,
leading=22,
alignment=TA_CENTER,
spaceAfter=6),
alias='title')
stylesheet.add(ParagraphStyle(name='Heading2',
parent=stylesheet['Normal'],
fontName = 'Times-Bold',
fontSize=14,
leading=18,
spaceBefore=12,
spaceAfter=6),
alias='h2')
stylesheet.add(ParagraphStyle(name='Heading3',
parent=stylesheet['Normal'],
fontName = 'Times-BoldItalic',
fontSize=12,
leading=14,
spaceBefore=12,
spaceAfter=6),
alias='h3')
stylesheet.add(ParagraphStyle(name='Bullet',
parent=stylesheet['Normal'],
firstLineIndent=0,
spaceBefore=3),
alias='bu')
stylesheet.add(ParagraphStyle(name='Definition',
parent=stylesheet['Normal'],
firstLineIndent=0,
leftIndent=36,
bulletIndent=0,
spaceBefore=6,
bulletFontName='Times-BoldItalic'),
alias='df')
stylesheet.add(ParagraphStyle(name='Code',
parent=stylesheet['Normal'],
fontName='Courier',
fontSize=8,
leading=8.8,
firstLineIndent=0,
leftIndent=36))
return stylesheet
| {
"content_hash": "ee8b73b583a63689e37ae7cdbd03e787",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 105,
"avg_line_length": 36.08045977011494,
"alnum_prop": 0.47424869916109164,
"repo_name": "jwheare/digest",
"id": "f3540f0a92335b88282135ddf7e0af34ba82079a",
"size": "9603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/reportlab/lib/styles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "75169"
},
{
"name": "Python",
"bytes": "3874016"
}
],
"symlink_target": ""
} |
from setuptools import setup
from etcli import __version__
with open('README.rst') as f:
readme = f.read()
setup(
name='et-cli',
version=__version__,
packages=['etcli'],
description='ExactTarget CLI Tool',
long_description=readme,
url='https://github.com/tzmfreedom/et-cli',
author='makoto tajitsu',
author_email='makoto_tajitsu@hotmail.co.jp',
license='MIT',
scripts=['bin/et', 'bin/et.cmd'],
install_requires=[
'FuelSDK',
],
)
| {
"content_hash": "f01e0b7b44b58a8a61f4dde6a16bb2f5",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 48,
"avg_line_length": 23.38095238095238,
"alnum_prop": 0.6252545824847251,
"repo_name": "tzmfreedom/et-cli",
"id": "cb248693ef8480976c5b36eeaca8d42300ce8bfe",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "30"
},
{
"name": "Python",
"bytes": "33323"
}
],
"symlink_target": ""
} |
import socket
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.Nodes import Node, Nodes
from PLC.Interfaces import Interface, Interfaces
from PLC.Auth import Auth
from PLC.POD import udp_pod
class RebootNode(Method):
"""
Sends the specified node a specially formatted UDP packet which
should cause it to reboot immediately.
Admins can reboot any node. Techs and PIs can only reboot nodes at
their site.
Returns 1 if the packet was successfully sent (which only whether
the packet was sent, not whether the reboot was successful).
"""
roles = ['admin', 'pi', 'tech']
accepts = [
Auth(),
Mixed(Node.fields['node_id'],
Node.fields['hostname'])
]
returns = Parameter(int, '1 if successful')
def call(self, auth, node_id_or_hostname):
# Get account information
nodes = Nodes(self.api, [node_id_or_hostname])
if not nodes:
raise PLCInvalidArgument("No such node")
node = nodes[0]
# Authenticated function
assert self.caller is not None
# If we are not an admin, make sure that the caller is a
# member of the site at which the node is located.
if 'admin' not in self.caller['roles']:
if node['site_id'] not in self.caller['site_ids']:
raise PLCPermissionDenied("Not allowed to delete nodes from specified site")
session = node['session']
if not session:
raise PLCInvalidArgument("No session key on record for that node (i.e., has never successfully booted)")
session = session.strip()
# Only use the hostname as a backup, try to use the primary ID
# address instead.
host = node['hostname']
interfaces = Interfaces(self.api, node['interface_ids'])
for interface in interfaces:
if interface['is_primary'] == 1:
host = interface['ip']
break
try:
udp_pod(host, session)
except socket.error as e:
# Ignore socket errors
pass
self.event_objects = {'Node': [node['node_id']]}
self.message = "RebootNode called"
return 1
| {
"content_hash": "75e21f7db1ee4c63c90a33c20c799e2b",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 116,
"avg_line_length": 31.17808219178082,
"alnum_prop": 0.6164323374340949,
"repo_name": "dreibh/planetlab-lxc-plcapi",
"id": "a6e0439dab7d76cea75cd772087b1ce8f68d2b5c",
"size": "2276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PLC/Methods/RebootNode.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "724"
},
{
"name": "Makefile",
"bytes": "2995"
},
{
"name": "PHP",
"bytes": "574445"
},
{
"name": "PLpgSQL",
"bytes": "2764"
},
{
"name": "Perl",
"bytes": "1350"
},
{
"name": "Python",
"bytes": "871238"
},
{
"name": "Shell",
"bytes": "31392"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import re
import unittest2
import warnings
import httpretty as hp
from coinbase.client import Client
from coinbase.client import OAuthClient
from coinbase.error import APIError
from coinbase.error import TwoFactorTokenRequired
from coinbase.error import UnexpectedDataFormatError
from coinbase.model import APIObject
from coinbase.model import Account
from coinbase.model import Address
from coinbase.model import Button
from coinbase.model import Money
from coinbase.model import Order
from coinbase.model import Transaction
from coinbase.model import Transfer
# Hide all warning output.
warnings.showwarning = lambda *a, **k: None
# Dummy API key values for use in tests
api_key = 'fakeapikey'
api_secret = 'fakeapisecret'
client_id = 'fakeid'
client_secret = 'fakesecret'
access_token = 'fakeaccesstoken'
refresh_token = 'fakerefreshtoken'
class TestAccount(unittest2.TestCase):
@hp.activate
def test_delete(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
self.assertTrue(uri.endswith(account.id))
self.assertEqual(request.body.decode(), '')
return (200, headers, json.dumps(data))
hp.register_uri(hp.DELETE, re.compile('.*'), body=server_response)
data = {'success': False}
with self.assertRaises(APIError):
account.delete()
data = {'success': True}
self.assertIsNone(account.delete())
@hp.activate
def test_set_primary(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
account.primary = None
def server_response(request, uri, headers):
self.assertTrue(uri.endswith('%s/primary' % account.id))
self.assertEqual(request.body.decode(), '')
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
data = {'success': False}
with self.assertRaises(APIError):
account.set_primary()
self.assertIsNone(account.primary) # Primary status should not have changed.
data = {'success': True}
account.set_primary()
self.assertTrue(account.primary) # Primary status should have changed.
@hp.activate
def test_modify(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
account.name = initial_name = 'Wallet'
def server_response(request, uri, headers):
self.assertTrue(uri.endswith(account.id))
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
name = request_data.get('account', {}).get('name')
assert name == new_name
return (200, headers, json.dumps(data))
new_name = 'Vault'
data = {'success': False, 'account': {'name': new_name}}
hp.register_uri(hp.PUT, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
account.modify(new_name)
self.assertEqual(account.name, initial_name)
data = {'success': True, 'account': {'name': new_name}}
account.modify(new_name)
self.assertEqual(account.name, new_name)
data = {'success': True, 'account': 'nottherighttype'}
with self.assertRaises(UnexpectedDataFormatError):
account.modify(new_name)
@hp.activate
def test_get_balance(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
account.balance = initial_balance = lambda: None # Initial value
def server_response(request, uri, headers):
self.assertTrue(uri.endswith('%s/balance' % account.id))
self.assertEqual(request.body.decode(), '')
return (200, headers, json.dumps(data))
data = {'currency': 'USD', 'amount': '10.00'}
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
balance = account.get_balance()
self.assertIsInstance(balance, Money)
# Fetching the current balance should not modify the balance attribute on
# the Account object.
self.assertEqual(account.balance, initial_balance)
@hp.activate
def test_get_address(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
self.assertTrue(uri.endswith('%s/address' % account.id))
self.assertEqual(request.body.decode(), '')
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {'address': 'a',
'callback_url': None,
'label': None,
'success': False}
with self.assertRaises(APIError):
account.get_address()
data = {'badkey': 'bar',
'success': True}
with self.assertRaises(UnexpectedDataFormatError):
account.get_address()
data = {'address': 'a',
'callback_url': None,
'label': None,
'success': True}
address = account.get_address()
self.assertIsInstance(address, Address)
@hp.activate
def test_get_addresses(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
data = {
'total_count': 3,
'current_page': 1,
'num_pages': 1,
'addresses': [
{'address': {
'label': '',
'address': 'foo',
'callback_url': '',
'id': '1'
}},
{'address': {
'label': '',
'address': 'foo',
'callback_url': '',
'id': '2'
}},
{'address': {
'label': '',
'address': 'foo',
'callback_url': '',
'id': '3'
}},
],
}
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
response = account.get_addresses()
self.assertIsInstance(response, APIObject)
self.assertEqual(len(response.addresses), 3)
for address in response.addresses:
self.assertIsInstance(address, Address)
@hp.activate
def test_create_address(self):
def server_response(request, uri, headers):
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
address = request_data.get('address')
assert isinstance(address, dict)
if label is not None:
assert address.get('label') == label
if callback_url is not None:
assert address.get('callback_url') == callback_url
return (200, headers, json.dumps(data))
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
label, callback_url = ('label', 'http://example.com/')
data = {'success': False,
'address': 'foo',
'label': label,
'callback_url': callback_url}
with self.assertRaises(APIError):
account.create_address(label, callback_url)
label, callback_url = ('label', 'http://example.com/')
data = {'success': True, 'arbkey': 'bar'}
with self.assertRaises(UnexpectedDataFormatError):
account.create_address(label, callback_url)
label, callback_url = ('label', 'http://example.com/')
data = {'success': True,
'address': 'foo',
'label': label,
'callback_url': callback_url}
address = account.create_address(label, callback_url)
self.assertIsInstance(address, Address)
label, callback_url = (None, None)
data = {'success': True,
'address': 'foo',
'label': label,
'callback_url': callback_url}
address = account.create_address()
self.assertIsInstance(address, Address)
@hp.activate
def test_get_transactions(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
data = {
'total_count': 3,
'current_page': 1,
'num_pages': 1,
'transactions': [
{'transaction': {'id': '1'}},
{'transaction': {'id': '2'}},
{'transaction': {'id': '3'}},
],
}
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
response = account.get_transactions()
self.assertIsInstance(response, APIObject)
self.assertEqual(len(response.transactions), 3)
for transaction in response.transactions:
self.assertIsInstance(transaction, Transaction)
@hp.activate
def test_get_transaction(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
transaction_id = 'faketransactionid'
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {'missing_transaction_key': True}
with self.assertRaises(UnexpectedDataFormatError):
account.get_transaction(transaction_id)
data = {'transaction': 'not-the-right-type'}
with self.assertRaises(UnexpectedDataFormatError):
account.get_transaction(transaction_id)
data = {'transaction': {'id': '1'}}
transaction = account.get_transaction(transaction_id)
self.assertIsInstance(transaction, Transaction)
@hp.activate
def test_transfer_money(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
base_kwargs = {
'to_account_id': 'fake-account-id',
'amount': '12.0 BTC',
}
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string=None, amount_currency_iso=None)
account.transfer_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string=None, amount_currency_iso='USD')
account.transfer_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string='12.0', amount_currency_iso=None)
account.transfer_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string='12.0', amount_currency_iso='USD')
account.transfer_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string=None, amount_currency_iso='USD')
account.transfer_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string='12.0', amount_currency_iso=None)
account.transfer_money(**kwargs)
def server_response(request, uri, headers):
try: req = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
tx_data = req.get('transaction')
self.assertIsInstance(tx_data, dict)
self.assertEqual(len(tx_data), len(kwargs))
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transaction': {'id': '1'}}
kwargs = base_kwargs.copy()
account.transfer_money(**kwargs)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transaction': 'wrong-type'}
kwargs = base_kwargs.copy()
account.transfer_money(**kwargs)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transaction-key': True}
kwargs = base_kwargs.copy()
account.transfer_money(**kwargs)
data = {'success': True, 'transaction': {'id': '1'}}
kwargs = base_kwargs.copy()
tx = account.transfer_money(**kwargs)
self.assertIsInstance(tx, Transaction)
@hp.activate
def test_send_money(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
base_kwargs = {
'to_btc_address': 'some-btc-address',
'amount': '12.0 BTC',
}
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string=None, amount_currency_iso=None)
account.send_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string=None, amount_currency_iso='USD')
account.send_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string='12.0', amount_currency_iso=None)
account.send_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string='12.0', amount_currency_iso='USD')
account.send_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string=None, amount_currency_iso='USD')
account.send_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string='12.0', amount_currency_iso=None)
account.send_money(**kwargs)
def server_response(request, uri, headers):
try: req = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
tx_data = req.get('transaction')
self.assertIsInstance(tx_data, dict)
self.assertEqual(len(tx_data), len(kwargs))
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transaction': {'id': '1'}}
kwargs = base_kwargs.copy()
account.send_money(**kwargs)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transaction': 'wrong-type'}
kwargs = base_kwargs.copy()
account.send_money(**kwargs)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transaction-key': True}
kwargs = base_kwargs.copy()
account.send_money(**kwargs)
data = {'success': True, 'transaction': {'id': '1'}}
kwargs = base_kwargs.copy()
tx = account.send_money(**kwargs)
self.assertIsInstance(tx, Transaction)
oauth_account = Account(
OAuthClient(client_id, client_secret, access_token, refresh_token))
oauth_account.id = 'fakeaccountid'
hp.reset()
def server_response(request, uri, headers):
try: req = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
tx_data = req.get('transaction')
self.assertIsInstance(tx_data, dict)
if two_factor_token:
self.assertEqual(request.headers.get('CB-2FA-Token'), two_factor_token)
self.assertIsNone(tx_data.get('CB-2FA-Token'))
return (200, headers, json.dumps(data))
return (402, headers, '')
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
kwargs = base_kwargs.copy()
kwargs['two_factor_token'] = two_factor_token = None
with self.assertRaises(TwoFactorTokenRequired):
oauth_account.send_money(**kwargs)
kwargs['two_factor_token'] = two_factor_token = 'sometoken'
tx = oauth_account.send_money(**kwargs)
self.assertIsInstance(tx, Transaction)
@hp.activate
def test_request_money(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
base_kwargs = {
'from_email_address': 'some-btc-address',
'amount': '12.0 BTC',
}
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string=None, amount_currency_iso=None)
account.request_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string=None, amount_currency_iso='USD')
account.request_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string='12.0', amount_currency_iso=None)
account.request_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(
amount='12.0', amount_string='12.0', amount_currency_iso='USD')
account.request_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string=None, amount_currency_iso='USD')
account.request_money(**kwargs)
with self.assertRaises(ValueError):
kwargs = base_kwargs.copy()
kwargs.update(amount=None, amount_string='12.0', amount_currency_iso=None)
account.request_money(**kwargs)
def server_response(request, uri, headers):
try: req = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
tx_data = req.get('transaction')
self.assertIsInstance(tx_data, dict)
self.assertEqual(len(tx_data), len(kwargs))
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transaction': {'id': '1'}}
kwargs = base_kwargs.copy()
account.request_money(**kwargs)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transaction': 'wrong-type'}
kwargs = base_kwargs.copy()
account.request_money(**kwargs)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transaction-key': True}
kwargs = base_kwargs.copy()
account.request_money(**kwargs)
data = {'success': True, 'transaction': {'id': '1'}}
kwargs = base_kwargs.copy()
tx = account.request_money(**kwargs)
self.assertIsInstance(tx, Transaction)
@hp.activate
def test_get_transfers(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {
'total_count': 3,
'current_page': 1,
'num_pages': 1,
'transfers': [
{'transfer': {'id': '1'}},
{'transfer': {'id': '2'}},
{'transfer': {'id': '3'}},
],
}
response = account.get_transfers()
self.assertIsInstance(response, APIObject)
self.assertEqual(len(response.transfers), 3)
for transfer in response.transfers:
self.assertIsInstance(transfer, Transfer)
@hp.activate
def test_get_transfer(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
transfer_id = 'faketransferid'
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {'missing_transfer_key': True}
with self.assertRaises(UnexpectedDataFormatError):
account.get_transfer(transfer_id)
data = {'transfer': 'not-the-right-type'}
with self.assertRaises(UnexpectedDataFormatError):
account.get_transfer(transfer_id)
data = {'transfer': {'id': '1'}}
transfer = account.get_transfer(transfer_id)
self.assertIsInstance(transfer, Transfer)
@hp.activate
def test_get_button(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
button_code = 'fakebuttoncode'
def server_response(request, uri, headers):
self.assertEqual(request.body.decode(), '')
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {'button': 'not-the-right-type'}
with self.assertRaises(UnexpectedDataFormatError):
account.get_button(button_code)
data = {'missing-button-key': True}
with self.assertRaises(UnexpectedDataFormatError):
account.get_button(button_code)
data = {'button': {'code': button_code}}
button = account.get_button(button_code)
self.assertIsInstance(button, Button)
data = {'badkey': 'bar',
'success': True}
with self.assertRaises(UnexpectedDataFormatError):
account.get_address()
data = {'address': 'a',
'callback_url': None,
'label': None,
'success': True}
address = account.get_address()
self.assertIsInstance(address, Address)
@hp.activate
def test_create_button(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
button_data = request_data.get('button')
self.assertIsInstance(button_data, dict)
for key in ['name', 'price_string', 'price_currency_iso']:
self.assertTrue(key in button_data)
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
name = 'b-name'
price_string = 'b-price'
price_currency_iso = 'BTC'
with self.assertRaises(APIError):
data = {
'success': False,
'button': {
'name': name,
'price_string': price_string,
'price_currency_iso': price_currency_iso,
},
}
account.create_button(name, price_string, price_currency_iso)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'button': 'wrong-type'}
account.create_button(name, price_string, price_currency_iso)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-button-key': True}
account.create_button(name, price_string, price_currency_iso)
data = {
'success': True,
'button': {
'name': name,
'price_string': price_string,
'price_currency_iso': price_currency_iso,
},
}
button = account.create_button(name, price_string, price_currency_iso)
self.assertIsInstance(button, Button)
@hp.activate
def test_get_orders(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {
'total_count': 3,
'current_page': 1,
'num_pages': 1,
'orders': [
{'order': {'id': '1'}},
{'order': {'id': '2'}},
{'order': {'id': '3'}},
],
}
response = account.get_orders()
self.assertIsInstance(response, APIObject)
self.assertEqual(len(response.orders), 3)
for order in response.orders:
self.assertIsInstance(order, Order)
@hp.activate
def test_get_order(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
order_id = 'fakeorderid'
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {'missing_order_key': True}
with self.assertRaises(UnexpectedDataFormatError):
account.get_order(order_id)
data = {'order': 'not-the-right-type'}
with self.assertRaises(UnexpectedDataFormatError):
account.get_order(order_id)
data = {'order': {'id': '1'}}
order = account.get_order(order_id)
self.assertIsInstance(order, Order)
@hp.activate
def test_create_order(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
button_data = request_data.get('button')
self.assertIsInstance(button_data, dict)
for key in ['name', 'price_string', 'price_currency_iso']:
self.assertTrue(key in button_data)
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
name = 'b-name'
price_string = 'b-price'
price_currency_iso = 'BTC'
with self.assertRaises(APIError):
data = {
'success': False,
'order': {
'name': name,
'price_string': price_string,
'price_currency_iso': price_currency_iso,
},
}
account.create_order(name, price_string, price_currency_iso)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'order': 'wrong-type'}
account.create_order(name, price_string, price_currency_iso)
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-order-key': True}
account.create_order(name, price_string, price_currency_iso)
data = {
'success': True,
'order': {
'name': name,
'price_string': price_string,
'price_currency_iso': price_currency_iso,
},
}
order = account.create_order(name, price_string, price_currency_iso)
self.assertIsInstance(order, Order)
@hp.activate
def test_buy(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
self.assertEqual(request_data.get('account_id'), account.id)
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transfer': {'id': 'transferid'}}
account.buy('1.0')
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transfer': 'wrong-type'}
account.buy('1.0')
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transfer-key': True}
account.buy('1.0')
data = {'success': True, 'transfer': {'id': 'transferid'}}
transfer = account.buy('1.0')
self.assertIsInstance(transfer, Transfer)
@hp.activate
def test_sell(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
def server_response(request, uri, headers):
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
self.assertEqual(request_data.get('account_id'), account.id)
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transfer': {'id': 'transferid'}}
account.sell('1.0')
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transfer': 'wrong-type'}
account.sell('1.0')
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transfer-key': True}
account.sell('1.0')
data = {'success': True, 'transfer': {'id': 'transferid'}}
transfer = account.sell('1.0')
self.assertIsInstance(transfer, Transfer)
class TestButton(unittest2.TestCase):
@hp.activate
def test_get_orders(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
initial_name = 'name'
initial_price_string = '12.0'
initial_price_currency_iso = 'USD'
button = account.load({
'button': {
'id': '1',
'name': initial_name,
'price_string': initial_price_string,
'price_currency_iso': initial_price_currency_iso,
'code': 'buttoncode',
},
}).button
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.GET, re.compile('.*'), body=server_response)
data = {
'total_count': 3,
'current_page': 1,
'num_pages': 1,
'orders': [
{'order': {'id': '1'}},
{'order': {'id': '2'}},
{'order': {'id': '3'}},
],
}
response = button.get_orders()
self.assertIsInstance(response, APIObject)
self.assertEqual(len(response.orders), 3)
for order in response.orders:
self.assertIsInstance(order, Order)
@hp.activate
def test_create_order(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
initial_name = 'name'
initial_price_string = '12.0'
initial_price_currency_iso = 'USD'
button = account.load({
'button': {
'id': '1',
'name': initial_name,
'price_string': initial_price_string,
'price_currency_iso': initial_price_currency_iso,
'code': 'buttoncode',
},
}).button
def server_response(request, uri, headers):
self.assertEqual(request.body.decode(), '')
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
name = 'b-name'
price_string = 'b-price'
price_currency_iso = 'BTC'
with self.assertRaises(APIError):
data = {
'success': False,
'order': {
'name': name,
'price_string': price_string,
'price_currency_iso': price_currency_iso,
},
}
button.create_order()
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'order': 'wrong-type'}
button.create_order()
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-order-key': True}
button.create_order()
data = {
'success': True,
'order': {
'name': name,
'price_string': price_string,
'price_currency_iso': price_currency_iso,
},
}
order = button.create_order()
self.assertIsInstance(order, Order)
class TestMoney(unittest2.TestCase):
def test_str_representation(self):
money = APIObject(None).load({
'amount': '12.0',
'currency': 'BTC',
})
self.assertIsInstance(money, Money)
self.assertTrue(str(money).endswith('BTC 12.0'))
money2 = APIObject(None).load({
'amount': '12.0',
'currency': 'BTC',
'foo': 'Bar',
})
self.assertIsInstance(money2, Money)
self.assertTrue(str(money2).endswith('}'))
class TestOrder(unittest2.TestCase):
@hp.activate
def test_refund(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
order = account.load({
'order': {
'id': '1',
'custom': 'custom',
'button': {
'id': 'fakeid',
'code': 'acode'
},
},
}).order
def server_response(request, uri, headers):
try: req_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
order_data = req_data.get('order')
self.assertIsInstance(order_data, dict)
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(UnexpectedDataFormatError):
data = {'order': 'wrong-type'}
order.refund('USD')
with self.assertRaises(UnexpectedDataFormatError):
data = {'missing-order-key': True}
order.refund('USD')
data = {'order': {'id': '1'}}
refunded = order.refund('USD')
self.assertEqual(refunded, data['order'])
self.assertIsInstance(refunded, Order)
class TestTransaction(unittest2.TestCase):
@hp.activate
def test_resend(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
transaction = account.load({'transaction': {'id': '1' }}).transaction
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.PUT, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False}
transaction.resend()
data = {'success': True}
self.assertTrue(transaction.resend())
@hp.activate
def test_complete(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
transaction = account.load({'transaction': {'id': '1' }}).transaction
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.PUT, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transaction': {'id': '1'}}
transaction.complete()
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transaction': 'wrong-type'}
transaction.complete()
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transaction-key': True}
transaction.complete()
data = {'success': True, 'transaction': {'id': '1'}}
tx = transaction.complete()
self.assertIsInstance(tx, Transaction)
@hp.activate
def test_cancel(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
transaction = account.load({'transaction': {'id': '1' }}).transaction
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.DELETE, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False}
transaction.cancel()
data = {'success': True}
self.assertTrue(transaction.cancel())
class TestTransfer(unittest2.TestCase):
@hp.activate
def test_commit(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
transfer = account.load({'transfer': {'id': '1' }}).transfer
def server_response(request, uri, headers):
try: json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
return (200, headers, json.dumps(data))
hp.register_uri(hp.POST, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
data = {'success': False, 'transfer': {'id': '1'}}
transfer.commit()
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'transfer': 'wrong-type'}
transfer.commit()
with self.assertRaises(UnexpectedDataFormatError):
data = {'success': True, 'missing-transfer-key': True}
transfer.commit()
data = {'success': True, 'transfer': {'id': '1'}}
tx = transfer.commit()
self.assertIsInstance(tx, Transfer)
class TestUser(unittest2.TestCase):
@hp.activate
def test_modify(self):
account = Account(Client(api_key, api_secret))
account.id = 'fakeaccountid'
initial_native_currency = 'USD',
initial_time_zone = 'Pacific Time (US & Canada)'
initial_name = 'Test User'
user = account.load({
'user': {
'id': '1',
'name': initial_name,
'native_currency': initial_native_currency,
'time_zone': initial_time_zone,
},
}).user
with self.assertRaises(ValueError):
user.modify()
def server_response(request, uri, headers):
self.assertTrue(uri.endswith(user.id))
try: request_data = json.loads(request.body.decode())
except ValueError: raise AssertionError("request body was malformed.")
user_data = request_data.get('user')
self.assertIsInstance(user_data, dict)
return (200, headers, json.dumps(data))
hp.register_uri(hp.PUT, re.compile('.*'), body=server_response)
with self.assertRaises(APIError):
new_name = 'Fake Name'
data = {
'success': False,
'user': {
'id': user.id,
'name': new_name,
'native_currency': initial_native_currency,
'time_zone': initial_time_zone,
},
}
user.modify(name=new_name)
self.assertEqual(user.name, initial_name)
self.assertEqual(user.native_currency, initial_native_currency)
self.assertEqual(user.time_zone, initial_time_zone)
with self.assertRaises(UnexpectedDataFormatError):
new_name = 'Fake Name'
data = {'success': True, 'user': 'wrong-type'}
user.modify(name=new_name)
self.assertEqual(user.name, initial_name)
self.assertEqual(user.native_currency, initial_native_currency)
self.assertEqual(user.time_zone, initial_time_zone)
with self.assertRaises(UnexpectedDataFormatError):
new_name = 'Fake Name'
data = {'success': True, 'missing-user-key': True}
user.modify(name=new_name)
self.assertEqual(user.name, initial_name)
self.assertEqual(user.native_currency, initial_native_currency)
self.assertEqual(user.time_zone, initial_time_zone)
new_name = 'Fake Name'
new_native_currency = 'CAD'
new_time_zone = 'Eastern'
data = {
'success': True,
'user': {
'id': user.id,
'name': new_name,
'native_currency': new_native_currency,
'time_zone': new_time_zone,
},
}
user.modify(name=new_name,
time_zone=new_time_zone,
native_currency=new_native_currency)
self.assertEqual(user.name, new_name)
self.assertEqual(user.native_currency, new_native_currency)
self.assertEqual(user.time_zone, new_time_zone)
| {
"content_hash": "bbff5a977414f0dfff3da6d584a65aff",
"timestamp": "",
"source": "github",
"line_count": 1139,
"max_line_length": 80,
"avg_line_length": 34.55926251097454,
"alnum_prop": 0.6380865279577268,
"repo_name": "jorilallo/coinbase-python",
"id": "2c06d6cc22a6de75a15c206705d8ed65e98dd0e7",
"size": "39379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "223"
},
{
"name": "Python",
"bytes": "122172"
}
],
"symlink_target": ""
} |
"""
This should results in an average return of -20 by the end of training.
Usually hits -30 around epoch 50.
Note that one epoch = 5k steps, so 200 epochs = 1 million steps.
"""
import gym
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.obs_dict_replay_buffer import ObsDictRelabelingBuffer
from rlkit.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import GoalConditionedPathCollector
from rlkit.policies.argmax import ArgmaxDiscretePolicy
from rlkit.torch.dqn.dqn import DQNTrainer
from rlkit.torch.her.her import HERTrainer
from rlkit.torch.networks import ConcatMlp
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
try:
import multiworld.envs.gridworlds
except ImportError as e:
print("To run this example, you need to install `multiworld`. See "
"https://github.com/vitchyr/multiworld.")
raise e
def experiment(variant):
expl_env = gym.make('GoalGridworld-v0')
eval_env = gym.make('GoalGridworld-v0')
obs_dim = expl_env.observation_space.spaces['observation'].low.size
goal_dim = expl_env.observation_space.spaces['desired_goal'].low.size
action_dim = expl_env.action_space.n
qf = ConcatMlp(
input_size=obs_dim + goal_dim,
output_size=action_dim,
hidden_sizes=[400, 300],
)
target_qf = ConcatMlp(
input_size=obs_dim + goal_dim,
output_size=action_dim,
hidden_sizes=[400, 300],
)
eval_policy = ArgmaxDiscretePolicy(qf)
exploration_strategy = EpsilonGreedy(
action_space=expl_env.action_space,
)
expl_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=exploration_strategy,
policy=eval_policy,
)
replay_buffer = ObsDictRelabelingBuffer(
env=eval_env,
**variant['replay_buffer_kwargs']
)
observation_key = 'observation'
desired_goal_key = 'desired_goal'
eval_path_collector = GoalConditionedPathCollector(
eval_env,
eval_policy,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
)
expl_path_collector = GoalConditionedPathCollector(
expl_env,
expl_policy,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
)
trainer = DQNTrainer(
qf=qf,
target_qf=target_qf,
**variant['trainer_kwargs']
)
trainer = HERTrainer(trainer)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
variant = dict(
algo_kwargs=dict(
num_epochs=100,
max_path_length=50,
num_eval_steps_per_epoch=1000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=1000,
min_num_steps_before_training=1000,
batch_size=128,
),
trainer_kwargs=dict(
discount=0.99,
),
replay_buffer_kwargs=dict(
max_size=100000,
fraction_goals_rollout_goals=0.2, # equal to k = 4 in HER paper
fraction_goals_env_goals=0.0,
),
)
setup_logger('her-dqn-gridworld-experiment', variant=variant)
experiment(variant)
| {
"content_hash": "e41daac2e92e9c0baca1fef797a0ccdc",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 80,
"avg_line_length": 32.06086956521739,
"alnum_prop": 0.6650393273664226,
"repo_name": "google-research/DBAP-algorithm",
"id": "5a7503d834b18604c89f19ecbbad0de1ea5af994",
"size": "4263",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "third_party/rlkit_library/examples/her/her_dqn_gridworld.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5294"
}
],
"symlink_target": ""
} |
'''OpenGL extension NV.explicit_multisample
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_NV_explicit_multisample'
_DEPRECATED = False
GL_SAMPLE_POSITION_NV = constant.Constant( 'GL_SAMPLE_POSITION_NV', 0x8E50 )
GL_SAMPLE_MASK_NV = constant.Constant( 'GL_SAMPLE_MASK_NV', 0x8E51 )
GL_SAMPLE_MASK_VALUE_NV = constant.Constant( 'GL_SAMPLE_MASK_VALUE_NV', 0x8E52 )
GL_TEXTURE_BINDING_RENDERBUFFER_NV = constant.Constant( 'GL_TEXTURE_BINDING_RENDERBUFFER_NV', 0x8E53 )
glget.addGLGetConstant( GL_TEXTURE_BINDING_RENDERBUFFER_NV, (1,) )
GL_TEXTURE_RENDERBUFFER_DATA_STORE_BINDING_NV = constant.Constant( 'GL_TEXTURE_RENDERBUFFER_DATA_STORE_BINDING_NV', 0x8E54 )
glget.addGLGetConstant( GL_TEXTURE_RENDERBUFFER_DATA_STORE_BINDING_NV, (1,) )
GL_TEXTURE_RENDERBUFFER_NV = constant.Constant( 'GL_TEXTURE_RENDERBUFFER_NV', 0x8E55 )
GL_SAMPLER_RENDERBUFFER_NV = constant.Constant( 'GL_SAMPLER_RENDERBUFFER_NV', 0x8E56 )
GL_INT_SAMPLER_RENDERBUFFER_NV = constant.Constant( 'GL_INT_SAMPLER_RENDERBUFFER_NV', 0x8E57 )
GL_UNSIGNED_INT_SAMPLER_RENDERBUFFER_NV = constant.Constant( 'GL_UNSIGNED_INT_SAMPLER_RENDERBUFFER_NV', 0x8E58 )
GL_MAX_SAMPLE_MASK_WORDS_NV = constant.Constant( 'GL_MAX_SAMPLE_MASK_WORDS_NV', 0x8E59 )
glget.addGLGetConstant( GL_MAX_SAMPLE_MASK_WORDS_NV, (1,) )
glGetMultisamplefvNV = platform.createExtensionFunction(
'glGetMultisamplefvNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLuint,arrays.GLfloatArray,),
doc='glGetMultisamplefvNV(GLenum(pname), GLuint(index), GLfloatArray(val)) -> None',
argNames=('pname','index','val',),
deprecated=_DEPRECATED,
)
glSampleMaskIndexedNV = platform.createExtensionFunction(
'glSampleMaskIndexedNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLuint,constants.GLbitfield,),
doc='glSampleMaskIndexedNV(GLuint(index), GLbitfield(mask)) -> None',
argNames=('index','mask',),
deprecated=_DEPRECATED,
)
glTexRenderbufferNV = platform.createExtensionFunction(
'glTexRenderbufferNV',dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum,constants.GLuint,),
doc='glTexRenderbufferNV(GLenum(target), GLuint(renderbuffer)) -> None',
argNames=('target','renderbuffer',),
deprecated=_DEPRECATED,
)
def glInitExplicitMultisampleNV():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| {
"content_hash": "0c86fcf66c1c79efab40d407c6cae68b",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 124,
"avg_line_length": 45.75438596491228,
"alnum_prop": 0.7841257668711656,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "b25340f078a89db09477f28c58d277affe60971e",
"size": "2608",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/raw/GL/NV/explicit_multisample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import vb2py.vbparser
import re
import glob
import vb2py.utils
import os
import sys
from docutils.core import publish_string
#
# Turn off logging
import vb2py.extensions
vb2py.extensions.disableLogging()
vb2py.vbparser.log.setLevel(0) # Don't print all logging stuff
def doAutomaticVBConversion(txt):
"""Convert VB code in the text to Python"""
vb = re.compile(r'<p>VB(\(.*?\))?:</p>.*?<pre class="literal-block">(.*?)</pre>',
re.DOTALL+re.MULTILINE)
def convertVB(match):
"""Convert the match"""
if match.groups()[0]:
mod = getattr(vb2py.vbparser, match.groups()[0][1:-1])()
else:
mod = vb2py.vbparser.VBCodeModule()
mod.importStatements = lambda x : ""
m = vb2py.vbparser.parseVB(match.groups()[1], container=mod)
return '<table style="code-table">' \
'<tr><th class="code-header">VB</th><th class="code-header">Python</th></tr>' \
'<tr><td class="vb-code-cell"><pre>%s</pre></td>' \
'<td class="python-code-cell"><pre>%s</pre></td></tr></table>' % (
match.groups()[1].replace("\n", "<br>"),
m.renderAsCode(1).replace("\n", "<br>"))
return vb.sub(convertVB, txt)
def addToTemplate(text, template, token):
"""Add the text to the template"""
template_text = open(template, "r").read()
template_text = template_text.replace(token, text)
return template_text
if __name__ == "__main__":
add_to_template = 0
if len(sys.argv) == 1:
files = glob.glob(os.path.join("rst", "*.rst"))
else:
files = sys.argv[1:]
settings = {
'embed_stylesheet' : False,
'stylesheet' : 'default.css',
'stylesheet_path' : '',
}
print "\nvb2Py documentation generator\n"
for fn in files:
print "Processing '%s' ..." % fn,
if not fn.startswith("rst/"):
print "Source must be in rst/"
break
if not fn.endswith(".rst"):
print "Source must be reStructuredText"
break
target_file = "doc/" + fn[4:-4] + ".html"
print "to %s ..." % target_file
txt = open(fn, "r").read()
base_html_text = publish_string(writer_name = 'html',
source = txt,
settings_overrides = settings)
marked_up_text = doAutomaticVBConversion(base_html_text)
if add_to_template:
marked_up_text = addToTemplate(marked_up_text, sys.argv[2], "DOCSGOHERE")
open(target_file, "w").write(marked_up_text)
print "Done!"
| {
"content_hash": "b7db448227e7d00d63fd2ca15a67a10e",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 94,
"avg_line_length": 31.174418604651162,
"alnum_prop": 0.5479298769116001,
"repo_name": "mvz/vb2py",
"id": "ffea821a90fb696806c4da4024dad7e3048383b9",
"size": "2681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/makedocs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "38050"
},
{
"name": "Python",
"bytes": "630966"
},
{
"name": "Shell",
"bytes": "40"
},
{
"name": "Visual Basic",
"bytes": "100566"
}
],
"symlink_target": ""
} |
import os
from django.conf import settings
DEBUG = getattr(settings, 'STATICASSETS_DEBUG', settings.DEBUG)
MIMETYPES = {
'.css': 'text/css',
'.js': 'application/javascript'
}
MIMETYPES.update(**getattr(settings, 'MIMETYPES', {}))
DIRS = getattr(settings, 'STATICASSETS_DIRS', getattr(settings, 'STATICFILES_DIRS'))
CACHE_DIR = os.path.join(getattr(settings, 'STATIC_ROOT', '/tmp'), 'staticassets-cache')
FINDER = getattr(settings, 'STATICASSETS_FINDER', 'staticassets.finder.AssetFinder')
PREPROCESSORS = getattr(settings, 'STATICASSETS_PREPROCESSORS', (
('application/javascript', 'staticassets.processors.DirectiveProcessor'),
('text/css', 'staticassets.processors.DirectiveProcessor')
))
POSTPROCESSORS = getattr(settings, 'STATICASSETS_POSTPROCESSORS', (
('text/css', 'staticassets.processors.ResolvePathsProcessor'),
('application/javascript', 'staticassets.processors.CommonjsProcessor'),
))
BUNDLEPROCESSORS = getattr(settings, 'STATICASSETS_BUNDLEPROCESSORS', tuple())
COMPILERS = {
'.sass': 'staticassets.compilers.Sass',
'.scss': 'staticassets.compilers.Sass',
'.styl': 'staticassets.compilers.Stylus',
'.less': 'staticassets.compilers.Less',
'.jst': 'staticassets.compilers.Jst',
'.ejs': 'staticassets.compilers.Ejs',
'.coffee': 'staticassets.compilers.CoffeeScript'
}
COMPILERS.update(**getattr(settings, 'STATICASSETS_COMPILERS', {}))
COMPRESSION = getattr(settings, 'STATICASSETS_COMPRESSION', not DEBUG)
COMPRESSORS = {
'application/javascript': 'staticassets.compressors.UglifyJSCompressor',
'text/css': 'staticassets.compressors.YUICompressor'
}
COMPRESSORS.update(**getattr(settings, 'STATICASSETS_COMPRESSORS', {}))
AVAILABLE_EXTENSIONS = MIMETYPES.keys() + COMPILERS.keys()
MANIFESTS = getattr(settings, 'STATICASSETS_MANIFESTS', tuple())
EJS_TEMPLATE_SETTINGS = getattr(settings, 'STATICASSETS_EJS_TEMPLATE_SETTINGS', '{}')
| {
"content_hash": "dc1193a6170f9650d01184f2ed5ac70c",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 88,
"avg_line_length": 34.92727272727273,
"alnum_prop": 0.735033836543467,
"repo_name": "davidelias/django-staticassets",
"id": "589a16b44bb2850978695c9e7823a97b8fb7c811",
"size": "1921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "staticassets/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "653"
},
{
"name": "CoffeeScript",
"bytes": "58"
},
{
"name": "JavaScript",
"bytes": "379"
},
{
"name": "Python",
"bytes": "50370"
},
{
"name": "Shell",
"bytes": "142"
}
],
"symlink_target": ""
} |
from .resource import Resource
class ActivityLogAlertResource(Resource):
"""An activity log alert resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource Id
:vartype id: str
:ivar name: Azure resource name
:vartype name: str
:ivar type: Azure resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:param scopes: Required. A list of resourceIds that will be used as
prefixes. The alert will only apply to activityLogs with resourceIds that
fall under one of these prefixes. This list must include at least one
item.
:type scopes: list[str]
:param enabled: Indicates whether this activity log alert is enabled. If
an activity log alert is not enabled, then none of its actions will be
activated. Default value: True .
:type enabled: bool
:param condition: Required. The condition that will cause this alert to
activate.
:type condition: ~azure.mgmt.monitor.models.ActivityLogAlertAllOfCondition
:param actions: Required. The actions that will activate when the
condition is met.
:type actions: ~azure.mgmt.monitor.models.ActivityLogAlertActionList
:param description: A description of this activity log alert.
:type description: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'scopes': {'required': True},
'condition': {'required': True},
'actions': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'scopes': {'key': 'properties.scopes', 'type': '[str]'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'condition': {'key': 'properties.condition', 'type': 'ActivityLogAlertAllOfCondition'},
'actions': {'key': 'properties.actions', 'type': 'ActivityLogAlertActionList'},
'description': {'key': 'properties.description', 'type': 'str'},
}
def __init__(self, *, location: str, scopes, condition, actions, tags=None, enabled: bool=True, description: str=None, **kwargs) -> None:
super(ActivityLogAlertResource, self).__init__(location=location, tags=tags, **kwargs)
self.scopes = scopes
self.enabled = enabled
self.condition = condition
self.actions = actions
self.description = description
| {
"content_hash": "3f1699c5ac6d32e323478fa9d0285eb4",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 141,
"avg_line_length": 40.957142857142856,
"alnum_prop": 0.6358562957795605,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "bc03347d612de8f02f627ec3328368c076e88b2a",
"size": "3341",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-monitor/azure/mgmt/monitor/models/activity_log_alert_resource_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
from typing import MutableMapping, MutableSequence
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.assuredworkloads.v1",
manifest={
"CreateWorkloadRequest",
"UpdateWorkloadRequest",
"DeleteWorkloadRequest",
"GetWorkloadRequest",
"ListWorkloadsRequest",
"ListWorkloadsResponse",
"Workload",
"CreateWorkloadOperationMetadata",
"RestrictAllowedResourcesRequest",
"RestrictAllowedResourcesResponse",
"AcknowledgeViolationRequest",
"AcknowledgeViolationResponse",
"TimeWindow",
"ListViolationsRequest",
"ListViolationsResponse",
"GetViolationRequest",
"Violation",
},
)
class CreateWorkloadRequest(proto.Message):
r"""Request for creating a workload.
Attributes:
parent (str):
Required. The resource name of the new Workload's parent.
Must be of the form
``organizations/{org_id}/locations/{location_id}``.
workload (google.cloud.assuredworkloads_v1.types.Workload):
Required. Assured Workload to create
external_id (str):
Optional. A identifier associated with the
workload and underlying projects which allows
for the break down of billing costs for a
workload. The value provided for the identifier
will add a label to the workload and contained
projects with the identifier as the value.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
workload: "Workload" = proto.Field(
proto.MESSAGE,
number=2,
message="Workload",
)
external_id: str = proto.Field(
proto.STRING,
number=3,
)
class UpdateWorkloadRequest(proto.Message):
r"""Request for Updating a workload.
Attributes:
workload (google.cloud.assuredworkloads_v1.types.Workload):
Required. The workload to update. The workload's ``name``
field is used to identify the workload to be updated.
Format:
organizations/{org_id}/locations/{location_id}/workloads/{workload_id}
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The list of fields to be updated.
"""
workload: "Workload" = proto.Field(
proto.MESSAGE,
number=1,
message="Workload",
)
update_mask: field_mask_pb2.FieldMask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteWorkloadRequest(proto.Message):
r"""Request for deleting a Workload.
Attributes:
name (str):
Required. The ``name`` field is used to identify the
workload. Format:
organizations/{org_id}/locations/{location_id}/workloads/{workload_id}
etag (str):
Optional. The etag of the workload.
If this is provided, it must match the server's
etag.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
etag: str = proto.Field(
proto.STRING,
number=2,
)
class GetWorkloadRequest(proto.Message):
r"""Request for fetching a workload.
Attributes:
name (str):
Required. The resource name of the Workload to fetch. This
is the workload's relative path in the API, formatted as
"organizations/{organization_id}/locations/{location_id}/workloads/{workload_id}".
For example,
"organizations/123/locations/us-east1/workloads/assured-workload-1".
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class ListWorkloadsRequest(proto.Message):
r"""Request for fetching workloads in an organization.
Attributes:
parent (str):
Required. Parent Resource to list workloads from. Must be of
the form ``organizations/{org_id}/locations/{location}``.
page_size (int):
Page size.
page_token (str):
Page token returned from previous request.
Page token contains context from previous
request. Page token needs to be passed in the
second and following requests.
filter (str):
A custom filter for filtering by properties
of a workload. At this time, only filtering by
labels is supported.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
page_size: int = proto.Field(
proto.INT32,
number=2,
)
page_token: str = proto.Field(
proto.STRING,
number=3,
)
filter: str = proto.Field(
proto.STRING,
number=4,
)
class ListWorkloadsResponse(proto.Message):
r"""Response of ListWorkloads endpoint.
Attributes:
workloads (MutableSequence[google.cloud.assuredworkloads_v1.types.Workload]):
List of Workloads under a given parent.
next_page_token (str):
The next page token. Return empty if reached
the last page.
"""
@property
def raw_page(self):
return self
workloads: MutableSequence["Workload"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="Workload",
)
next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
class Workload(proto.Message):
r"""A Workload object for managing highly regulated workloads of
cloud customers.
Attributes:
name (str):
Optional. The resource name of the workload.
Format:
organizations/{organization}/locations/{location}/workloads/{workload}
Read-only.
display_name (str):
Required. The user-assigned display name of
the Workload. When present it must be between 4
to 30 characters. Allowed characters are:
lowercase and uppercase letters, numbers,
hyphen, and spaces.
Example: My Workload
resources (MutableSequence[google.cloud.assuredworkloads_v1.types.Workload.ResourceInfo]):
Output only. The resources associated with
this workload. These resources will be created
when creating the workload. If any of the
projects already exist, the workload creation
will fail. Always read only.
compliance_regime (google.cloud.assuredworkloads_v1.types.Workload.ComplianceRegime):
Required. Immutable. Compliance Regime
associated with this workload.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Immutable. The Workload creation
timestamp.
billing_account (str):
Optional. The billing account used for the resources which
are direct children of workload. This billing account is
initially associated with the resources created as part of
Workload creation. After the initial creation of these
resources, the customer can change the assigned billing
account. The resource name has the form
``billingAccounts/{billing_account_id}``. For example,
``billingAccounts/012345-567890-ABCDEF``.
etag (str):
Optional. ETag of the workload, it is
calculated on the basis of the Workload
contents. It will be used in Update & Delete
operations.
labels (MutableMapping[str, str]):
Optional. Labels applied to the workload.
provisioned_resources_parent (str):
Input only. The parent resource for the resources managed by
this Assured Workload. May be either empty or a folder
resource which is a child of the Workload parent. If not
specified all resources are created under the parent
organization. Format: folders/{folder_id}
kms_settings (google.cloud.assuredworkloads_v1.types.Workload.KMSSettings):
Input only. Settings used to create a CMEK crypto key. When
set, a project with a KMS CMEK key is provisioned. This
field is deprecated as of Feb 28, 2022. In order to create a
Keyring, callers should specify, ENCRYPTION_KEYS_PROJECT or
KEYRING in ResourceSettings.resource_type field.
resource_settings (MutableSequence[google.cloud.assuredworkloads_v1.types.Workload.ResourceSettings]):
Input only. Resource properties that are used
to customize workload resources. These
properties (such as custom project id) will be
used to create workload resources if possible.
This field is optional.
kaj_enrollment_state (google.cloud.assuredworkloads_v1.types.Workload.KajEnrollmentState):
Output only. Represents the KAJ enrollment
state of the given workload.
enable_sovereign_controls (bool):
Optional. Indicates the sovereignty status of
the given workload. Currently meant to be used
by Europe/Canada customers.
saa_enrollment_response (google.cloud.assuredworkloads_v1.types.Workload.SaaEnrollmentResponse):
Output only. Represents the SAA enrollment
response of the given workload. SAA enrollment
response is queried during GetWorkload call. In
failure cases, user friendly error message is
shown in SAA details page.
compliant_but_disallowed_services (MutableSequence[str]):
Output only. Urls for services which are
compliant for this Assured Workload, but which
are currently disallowed by the
ResourceUsageRestriction org policy. Invoke
RestrictAllowedResources endpoint to allow your
project developers to use these services in
their environment.".
partner (google.cloud.assuredworkloads_v1.types.Workload.Partner):
Optional. Compliance Regime associated with
this workload.
"""
class ComplianceRegime(proto.Enum):
r"""Supported Compliance Regimes."""
COMPLIANCE_REGIME_UNSPECIFIED = 0
IL4 = 1
CJIS = 2
FEDRAMP_HIGH = 3
FEDRAMP_MODERATE = 4
US_REGIONAL_ACCESS = 5
HIPAA = 6
HITRUST = 7
EU_REGIONS_AND_SUPPORT = 8
CA_REGIONS_AND_SUPPORT = 9
ITAR = 10
AU_REGIONS_AND_US_SUPPORT = 11
ASSURED_WORKLOADS_FOR_PARTNERS = 12
class KajEnrollmentState(proto.Enum):
r"""Key Access Justifications(KAJ) Enrollment State."""
KAJ_ENROLLMENT_STATE_UNSPECIFIED = 0
KAJ_ENROLLMENT_STATE_PENDING = 1
KAJ_ENROLLMENT_STATE_COMPLETE = 2
class Partner(proto.Enum):
r"""Supported Assured Workloads Partners."""
PARTNER_UNSPECIFIED = 0
LOCAL_CONTROLS_BY_S3NS = 1
class ResourceInfo(proto.Message):
r"""Represent the resources that are children of this Workload.
Attributes:
resource_id (int):
Resource identifier. For a project this represents
project_number.
resource_type (google.cloud.assuredworkloads_v1.types.Workload.ResourceInfo.ResourceType):
Indicates the type of resource.
"""
class ResourceType(proto.Enum):
r"""The type of resource."""
RESOURCE_TYPE_UNSPECIFIED = 0
CONSUMER_PROJECT = 1
CONSUMER_FOLDER = 4
ENCRYPTION_KEYS_PROJECT = 2
KEYRING = 3
resource_id: int = proto.Field(
proto.INT64,
number=1,
)
resource_type: "Workload.ResourceInfo.ResourceType" = proto.Field(
proto.ENUM,
number=2,
enum="Workload.ResourceInfo.ResourceType",
)
class KMSSettings(proto.Message):
r"""Settings specific to the Key Management Service. This message is
deprecated. In order to create a Keyring, callers should specify,
ENCRYPTION_KEYS_PROJECT or KEYRING in ResourceSettings.resource_type
field.
Attributes:
next_rotation_time (google.protobuf.timestamp_pb2.Timestamp):
Required. Input only. Immutable. The time at
which the Key Management Service will
automatically create a new version of the crypto
key and mark it as the primary.
rotation_period (google.protobuf.duration_pb2.Duration):
Required. Input only. Immutable. [next_rotation_time] will
be advanced by this period when the Key Management Service
automatically rotates a key. Must be at least 24 hours and
at most 876,000 hours.
"""
next_rotation_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
rotation_period: duration_pb2.Duration = proto.Field(
proto.MESSAGE,
number=2,
message=duration_pb2.Duration,
)
class ResourceSettings(proto.Message):
r"""Represent the custom settings for the resources to be
created.
Attributes:
resource_id (str):
Resource identifier. For a project this represents
project_id. If the project is already taken, the workload
creation will fail. For KeyRing, this represents the
keyring_id. For a folder, don't set this value as folder_id
is assigned by Google.
resource_type (google.cloud.assuredworkloads_v1.types.Workload.ResourceInfo.ResourceType):
Indicates the type of resource. This field should be
specified to correspond the id to the right resource type
(CONSUMER_FOLDER or ENCRYPTION_KEYS_PROJECT)
display_name (str):
User-assigned resource display name.
If not empty it will be used to create a
resource with the specified name.
"""
resource_id: str = proto.Field(
proto.STRING,
number=1,
)
resource_type: "Workload.ResourceInfo.ResourceType" = proto.Field(
proto.ENUM,
number=2,
enum="Workload.ResourceInfo.ResourceType",
)
display_name: str = proto.Field(
proto.STRING,
number=3,
)
class SaaEnrollmentResponse(proto.Message):
r"""Signed Access Approvals (SAA) enrollment response.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
setup_status (google.cloud.assuredworkloads_v1.types.Workload.SaaEnrollmentResponse.SetupState):
Indicates SAA enrollment status of a given
workload.
This field is a member of `oneof`_ ``_setup_status``.
setup_errors (MutableSequence[google.cloud.assuredworkloads_v1.types.Workload.SaaEnrollmentResponse.SetupError]):
Indicates SAA enrollment setup error if any.
"""
class SetupState(proto.Enum):
r"""Setup state of SAA enrollment."""
SETUP_STATE_UNSPECIFIED = 0
STATUS_PENDING = 1
STATUS_COMPLETE = 2
class SetupError(proto.Enum):
r"""Setup error of SAA enrollment."""
SETUP_ERROR_UNSPECIFIED = 0
ERROR_INVALID_BASE_SETUP = 1
ERROR_MISSING_EXTERNAL_SIGNING_KEY = 2
ERROR_NOT_ALL_SERVICES_ENROLLED = 3
ERROR_SETUP_CHECK_FAILED = 4
setup_status: "Workload.SaaEnrollmentResponse.SetupState" = proto.Field(
proto.ENUM,
number=1,
optional=True,
enum="Workload.SaaEnrollmentResponse.SetupState",
)
setup_errors: MutableSequence[
"Workload.SaaEnrollmentResponse.SetupError"
] = proto.RepeatedField(
proto.ENUM,
number=2,
enum="Workload.SaaEnrollmentResponse.SetupError",
)
name: str = proto.Field(
proto.STRING,
number=1,
)
display_name: str = proto.Field(
proto.STRING,
number=2,
)
resources: MutableSequence[ResourceInfo] = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=ResourceInfo,
)
compliance_regime: ComplianceRegime = proto.Field(
proto.ENUM,
number=4,
enum=ComplianceRegime,
)
create_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
billing_account: str = proto.Field(
proto.STRING,
number=6,
)
etag: str = proto.Field(
proto.STRING,
number=9,
)
labels: MutableMapping[str, str] = proto.MapField(
proto.STRING,
proto.STRING,
number=10,
)
provisioned_resources_parent: str = proto.Field(
proto.STRING,
number=13,
)
kms_settings: KMSSettings = proto.Field(
proto.MESSAGE,
number=14,
message=KMSSettings,
)
resource_settings: MutableSequence[ResourceSettings] = proto.RepeatedField(
proto.MESSAGE,
number=15,
message=ResourceSettings,
)
kaj_enrollment_state: KajEnrollmentState = proto.Field(
proto.ENUM,
number=17,
enum=KajEnrollmentState,
)
enable_sovereign_controls: bool = proto.Field(
proto.BOOL,
number=18,
)
saa_enrollment_response: SaaEnrollmentResponse = proto.Field(
proto.MESSAGE,
number=20,
message=SaaEnrollmentResponse,
)
compliant_but_disallowed_services: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=24,
)
partner: Partner = proto.Field(
proto.ENUM,
number=25,
enum=Partner,
)
class CreateWorkloadOperationMetadata(proto.Message):
r"""Operation metadata to give request details of CreateWorkload.
Attributes:
create_time (google.protobuf.timestamp_pb2.Timestamp):
Optional. Time when the operation was
created.
display_name (str):
Optional. The display name of the workload.
parent (str):
Optional. The parent of the workload.
compliance_regime (google.cloud.assuredworkloads_v1.types.Workload.ComplianceRegime):
Optional. Compliance controls that should be
applied to the resources managed by the
workload.
"""
create_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
display_name: str = proto.Field(
proto.STRING,
number=2,
)
parent: str = proto.Field(
proto.STRING,
number=3,
)
compliance_regime: "Workload.ComplianceRegime" = proto.Field(
proto.ENUM,
number=4,
enum="Workload.ComplianceRegime",
)
class RestrictAllowedResourcesRequest(proto.Message):
r"""Request for restricting list of available resources in
Workload environment.
Attributes:
name (str):
Required. The resource name of the Workload. This is the
workloads's relative path in the API, formatted as
"organizations/{organization_id}/locations/{location_id}/workloads/{workload_id}".
For example,
"organizations/123/locations/us-east1/workloads/assured-workload-1".
restriction_type (google.cloud.assuredworkloads_v1.types.RestrictAllowedResourcesRequest.RestrictionType):
Required. The type of restriction for using
gcp products in the Workload environment.
"""
class RestrictionType(proto.Enum):
r"""The type of restriction."""
RESTRICTION_TYPE_UNSPECIFIED = 0
ALLOW_ALL_GCP_RESOURCES = 1
ALLOW_COMPLIANT_RESOURCES = 2
name: str = proto.Field(
proto.STRING,
number=1,
)
restriction_type: RestrictionType = proto.Field(
proto.ENUM,
number=2,
enum=RestrictionType,
)
class RestrictAllowedResourcesResponse(proto.Message):
r"""Response for restricting the list of allowed resources."""
class AcknowledgeViolationRequest(proto.Message):
r"""Request for acknowledging the violation
Next Id: 4
Attributes:
name (str):
Required. The resource name of the Violation
to acknowledge. Format:
organizations/{organization}/locations/{location}/workloads/{workload}/violations/{violation}
comment (str):
Required. Business justification explaining
the need for violation acknowledgement
non_compliant_org_policy (str):
Optional. This field is deprecated and will be removed in
future version of the API. Name of the OrgPolicy which was
modified with non-compliant change and resulted in this
violation. Format:
projects/{project_number}/policies/{constraint_name}
folders/{folder_id}/policies/{constraint_name}
organizations/{organization_id}/policies/{constraint_name}
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
comment: str = proto.Field(
proto.STRING,
number=2,
)
non_compliant_org_policy: str = proto.Field(
proto.STRING,
number=3,
)
class AcknowledgeViolationResponse(proto.Message):
r"""Response for violation acknowledgement"""
class TimeWindow(proto.Message):
r"""Interval defining a time window.
Attributes:
start_time (google.protobuf.timestamp_pb2.Timestamp):
The start of the time window.
end_time (google.protobuf.timestamp_pb2.Timestamp):
The end of the time window.
"""
start_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
end_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=2,
message=timestamp_pb2.Timestamp,
)
class ListViolationsRequest(proto.Message):
r"""Request for fetching violations in an organization.
Attributes:
parent (str):
Required. The Workload name. Format
``organizations/{org_id}/locations/{location}/workloads/{workload}``.
interval (google.cloud.assuredworkloads_v1.types.TimeWindow):
Optional. Specifies the time window for retrieving active
Violations. When specified, retrieves Violations that were
active between start_time and end_time.
page_size (int):
Optional. Page size.
page_token (str):
Optional. Page token returned from previous
request.
filter (str):
Optional. A custom filter for filtering by
the Violations properties.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
interval: "TimeWindow" = proto.Field(
proto.MESSAGE,
number=2,
message="TimeWindow",
)
page_size: int = proto.Field(
proto.INT32,
number=3,
)
page_token: str = proto.Field(
proto.STRING,
number=4,
)
filter: str = proto.Field(
proto.STRING,
number=5,
)
class ListViolationsResponse(proto.Message):
r"""Response of ListViolations endpoint.
Attributes:
violations (MutableSequence[google.cloud.assuredworkloads_v1.types.Violation]):
List of Violations under a Workload.
next_page_token (str):
The next page token. Returns empty if reached
the last page.
"""
@property
def raw_page(self):
return self
violations: MutableSequence["Violation"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="Violation",
)
next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
class GetViolationRequest(proto.Message):
r"""Request for fetching a Workload Violation.
Attributes:
name (str):
Required. The resource name of the Violation
to fetch (ie. Violation.name). Format:
organizations/{organization}/locations/{location}/workloads/{workload}/violations/{violation}
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class Violation(proto.Message):
r"""Workload monitoring Violation.
Attributes:
name (str):
Output only. Immutable. Name of the Violation. Format:
organizations/{organization}/locations/{location}/workloads/{workload_id}/violations/{violations_id}
description (str):
Output only. Description for the Violation.
e.g. OrgPolicy gcp.resourceLocations has non
compliant value.
begin_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time of the event which
triggered the Violation.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The last time when the Violation
record was updated.
resolve_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time of the event which fixed
the Violation. If the violation is ACTIVE this
will be empty.
category (str):
Output only. Category under which this
violation is mapped. e.g. Location, Service
Usage, Access, Encryption, etc.
state (google.cloud.assuredworkloads_v1.types.Violation.State):
Output only. State of the violation
org_policy_constraint (str):
Output only. Immutable. The
org-policy-constraint that was incorrectly
changed, which resulted in this violation.
audit_log_link (str):
Output only. Immutable. Audit Log Link for
violated resource Format:
https://console.cloud.google.com/logs/query;query={logName}{protoPayload.resourceName}{timeRange}{folder}
non_compliant_org_policy (str):
Output only. Immutable. Name of the OrgPolicy which was
modified with non-compliant change and resulted this
violation. Format:
projects/{project_number}/policies/{constraint_name}
folders/{folder_id}/policies/{constraint_name}
organizations/{organization_id}/policies/{constraint_name}
remediation (google.cloud.assuredworkloads_v1.types.Violation.Remediation):
Output only. Compliance violation remediation
acknowledged (bool):
Output only. A boolean that indicates if the
violation is acknowledged
acknowledgement_time (google.protobuf.timestamp_pb2.Timestamp):
Optional. Timestamp when this violation was
acknowledged last. This will be absent when
acknowledged field is marked as false.
This field is a member of `oneof`_ ``_acknowledgement_time``.
exception_audit_log_link (str):
Output only. Immutable. Audit Log link to
find business justification provided for
violation exception. Format:
https://console.cloud.google.com/logs/query;query={logName}{protoPayload.resourceName}{protoPayload.methodName}{timeRange}{organization}
"""
class State(proto.Enum):
r"""Violation State Values"""
STATE_UNSPECIFIED = 0
RESOLVED = 2
UNRESOLVED = 3
EXCEPTION = 4
class Remediation(proto.Message):
r"""Represents remediation guidance to resolve compliance
violation for AssuredWorkload
Attributes:
instructions (google.cloud.assuredworkloads_v1.types.Violation.Remediation.Instructions):
Required. Remediation instructions to resolve
violations
compliant_values (MutableSequence[str]):
Values that can resolve the violation
For example: for list org policy violations,
this will either be the list of allowed or
denied values
remediation_type (google.cloud.assuredworkloads_v1.types.Violation.Remediation.RemediationType):
Output only. Reemediation type based on the
type of org policy values violated
"""
class RemediationType(proto.Enum):
r"""Classifying remediation into various types based on the kind
of violation. For example, violations caused due to changes in
boolean org policy requires different remediation instructions
compared to violation caused due to changes in allowed values of
list org policy.
"""
REMEDIATION_TYPE_UNSPECIFIED = 0
REMEDIATION_BOOLEAN_ORG_POLICY_VIOLATION = 1
REMEDIATION_LIST_ALLOWED_VALUES_ORG_POLICY_VIOLATION = 2
REMEDIATION_LIST_DENIED_VALUES_ORG_POLICY_VIOLATION = 3
REMEDIATION_RESTRICT_CMEK_CRYPTO_KEY_PROJECTS_ORG_POLICY_VIOLATION = 4
class Instructions(proto.Message):
r"""Instructions to remediate violation
Attributes:
gcloud_instructions (google.cloud.assuredworkloads_v1.types.Violation.Remediation.Instructions.Gcloud):
Remediation instructions to resolve violation
via gcloud cli
console_instructions (google.cloud.assuredworkloads_v1.types.Violation.Remediation.Instructions.Console):
Remediation instructions to resolve violation
via cloud console
"""
class Gcloud(proto.Message):
r"""Remediation instructions to resolve violation via gcloud cli
Attributes:
gcloud_commands (MutableSequence[str]):
Gcloud command to resolve violation
steps (MutableSequence[str]):
Steps to resolve violation via gcloud cli
additional_links (MutableSequence[str]):
Additional urls for more information about
steps
"""
gcloud_commands: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=1,
)
steps: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=2,
)
additional_links: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=3,
)
class Console(proto.Message):
r"""Remediation instructions to resolve violation via cloud
console
Attributes:
console_uris (MutableSequence[str]):
Link to console page where violations can be
resolved
steps (MutableSequence[str]):
Steps to resolve violation via cloud console
additional_links (MutableSequence[str]):
Additional urls for more information about
steps
"""
console_uris: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=1,
)
steps: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=2,
)
additional_links: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=3,
)
gcloud_instructions: "Violation.Remediation.Instructions.Gcloud" = (
proto.Field(
proto.MESSAGE,
number=1,
message="Violation.Remediation.Instructions.Gcloud",
)
)
console_instructions: "Violation.Remediation.Instructions.Console" = (
proto.Field(
proto.MESSAGE,
number=2,
message="Violation.Remediation.Instructions.Console",
)
)
instructions: "Violation.Remediation.Instructions" = proto.Field(
proto.MESSAGE,
number=1,
message="Violation.Remediation.Instructions",
)
compliant_values: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=2,
)
remediation_type: "Violation.Remediation.RemediationType" = proto.Field(
proto.ENUM,
number=3,
enum="Violation.Remediation.RemediationType",
)
name: str = proto.Field(
proto.STRING,
number=1,
)
description: str = proto.Field(
proto.STRING,
number=2,
)
begin_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
update_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
resolve_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
category: str = proto.Field(
proto.STRING,
number=6,
)
state: State = proto.Field(
proto.ENUM,
number=7,
enum=State,
)
org_policy_constraint: str = proto.Field(
proto.STRING,
number=8,
)
audit_log_link: str = proto.Field(
proto.STRING,
number=11,
)
non_compliant_org_policy: str = proto.Field(
proto.STRING,
number=12,
)
remediation: Remediation = proto.Field(
proto.MESSAGE,
number=13,
message=Remediation,
)
acknowledged: bool = proto.Field(
proto.BOOL,
number=14,
)
acknowledgement_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=15,
optional=True,
message=timestamp_pb2.Timestamp,
)
exception_audit_log_link: str = proto.Field(
proto.STRING,
number=16,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "787eb92f9b186a1d26d7117de4186c74",
"timestamp": "",
"source": "github",
"line_count": 1007,
"max_line_length": 148,
"avg_line_length": 34.93545183714002,
"alnum_prop": 0.6049175667993177,
"repo_name": "googleapis/python-assured-workloads",
"id": "6581717f6e72cc9888636078fda3ff815bba2e1e",
"size": "35780",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/assuredworkloads_v1/types/assuredworkloads.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "768919"
},
{
"name": "Shell",
"bytes": "30693"
}
],
"symlink_target": ""
} |
"""Convert clasp & gringo outputs to ASP-GV visualization script JSON files.
This script can be used to convert clasp (monotonically) timestamped output, and optionally gringo text output, to JSON files which are accepted by the ASP-GV javascript package.
Usage:
./convert_outputs_to_json.py --edge-pred=<predicate> --clasp-out=<filename>
[(--cost-pred=<predicate> --gringo-out=<filename>) --directed]
--json-data=<filename> --json-time=<filename> --json-soln=<filename>
[--not-opt --not-timestamped]
Options:
-e <predicate> --edge-pred=<predicate> Name of the chosen edge/2
predicate.
-c <filename> --clasp-out=<filename> Name of the file where the
timestamped clasp output has
been saved.
-o <predicate> --cost-pred=<predicate> Name of the cost/3 predicate.
-g <filename> --gringo-out=<filename> Name of the file where the
output of "gringo -t" has
been saved.
-i --directed Indicates that the graph is a
directed graph. If not given,
the graph is assumed to be
undirected.
-d <filename> --json-data=<filename> Path where to save the JSON
file containing the graph data.
-t <filename> --json-time=<filename> Path where to save the JSON
file containing the timing
information.
-s <filename> --json-soln=<filename> Path where to save the JSON
file containing the solutions
of each optimization step.
--not-opt Flag to indicate that clasp
output is not from an
optimization, but still in
some "smart order" that can
be visualized.
--not-timestamped Flag to indicate that clasp
output is not prepended with
a timestamp. Time delta will
be 4.0 seconds.
Examples:
* ./convert_outputs_to_json.py -e cycle -c results/tsp-1.out \\
* -o cost -g ground-text/tsp-1.out -i \\
* -d json/tsp-1/data.json \\
* -t json/tsp-1/time.json \\
* -s json/tsp-1/soln.json
*
* ./convert_outputs_to_json.py -e edge -c results/econ0/out \\
* -d json/econ0/data.json \\
* -t json/econ0/time.json \\
* -s json/econ0/soln.json
*
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from docopt import docopt
from aspgvconvert import convert
# Parse arguments.
args = docopt(__doc__, version='0.1')
# Run conversion
if convert.files_to_json(
args['--edge-pred'],
args['--clasp-out'],
not args['--not-timestamped'],
not args['--not-opt'],
args['--cost-pred'],
args['--gringo-out'],
args['--directed'],
args['--json-data'],
args['--json-time'],
args['--json-soln']
):
print('Success!')
else:
print('Conversion failed :(')
# EOF
| {
"content_hash": "7ea148b704e691e9eecc52efb1c70d77",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 178,
"avg_line_length": 45.588235294117645,
"alnum_prop": 0.45729032258064517,
"repo_name": "vukk/asp-gv-convert-py",
"id": "cea896d907ad2244529f72ecc1cf4ce31030ac0e",
"size": "3897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "convert_outputs_to_json.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33590"
}
],
"symlink_target": ""
} |
"""
github3.github
==============
This module contains the main GitHub session object.
"""
from __future__ import unicode_literals
import json
from .auths import Authorization
from .decorators import (requires_auth, requires_basic_auth,
requires_app_credentials)
from .events import Event
from .gists import Gist
from .issues import Issue, issue_params
from .models import GitHubCore
from .orgs import Membership, Organization, Team
from .pulls import PullRequest
from .repos.repo import Repository, repo_issue_params
from .search import (CodeSearchResult, IssueSearchResult,
RepositorySearchResult, UserSearchResult)
from .structs import SearchIterator
from . import users
from .notifications import Thread
from .licenses import License
from uritemplate import URITemplate
class GitHub(GitHubCore):
"""Stores all the session information.
There are two ways to log into the GitHub API
::
from github3 import login
g = login(user, password)
g = login(token=token)
g = login(user, token=token)
or
::
from github3 import GitHub
g = GitHub(user, password)
g = GitHub(token=token)
g = GitHub(user, token=token)
This is simple backward compatibility since originally there was no way to
call the GitHub object with authentication parameters.
"""
def __init__(self, username='', password='', token=''):
super(GitHub, self).__init__({})
if token:
self.login(username, token=token)
elif username and password:
self.login(username, password)
def _repr(self):
if self.session.auth:
return '<GitHub [{0[0]}]>'.format(self.session.auth)
return '<GitHub at 0x{0:x}>'.format(id(self))
@requires_auth
def add_email_addresses(self, addresses=[]):
"""Add the email addresses in ``addresses`` to the authenticated
user's account.
:param list addresses: (optional), email addresses to be added
:returns: list of :class:`~github3.users.Email`
"""
json = []
if addresses:
url = self._build_url('user', 'emails')
json = self._json(self._post(url, data=addresses), 201)
return [users.Email(email) for email in json] if json else []
def all_events(self, number=-1, etag=None):
"""Iterate over public events.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Event <github3.events.Event>`\ s
"""
url = self._build_url('events')
return self._iter(int(number), url, Event, etag=etag)
def all_organizations(self, number=-1, since=None, etag=None,
per_page=None):
"""Iterate over every organization in the order they were created.
:param int number: (optional), number of organizations to return.
Default: -1, returns all of them
:param int since: (optional), last organization id seen (allows
restarting this iteration)
:param str etag: (optional), ETag from a previous request to the same
endpoint
:param int per_page: (optional), number of organizations to list per
request
:returns: generator of :class:`Organization
<github3.orgs.Organization>`
"""
url = self._build_url('organizations')
return self._iter(int(number), url, Organization,
params={'since': since, 'per_page': per_page},
etag=etag)
def all_repositories(self, number=-1, since=None, etag=None,
per_page=None):
"""Iterate over every repository in the order they were created.
:param int number: (optional), number of repositories to return.
Default: -1, returns all of them
:param int since: (optional), last repository id seen (allows
restarting this iteration)
:param str etag: (optional), ETag from a previous request to the same
endpoint
:param int per_page: (optional), number of repositories to list per
request
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
url = self._build_url('repositories')
return self._iter(int(number), url, Repository,
params={'since': since, 'per_page': per_page},
etag=etag)
def all_users(self, number=-1, etag=None, per_page=None, since=None):
"""Iterate over every user in the order they signed up for GitHub.
.. versionchanged:: 1.0.0
Inserted the ``since`` parameter after the ``number`` parameter.
:param int number: (optional), number of users to return. Default: -1,
returns all of them
:param int since: (optional), ID of the last user that you've seen.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:param int per_page: (optional), number of users to list per request
:returns: generator of :class:`~github3.users.ShortUser`
"""
url = self._build_url('users')
return self._iter(int(number), url, users.ShortUser, etag=etag,
params={'per_page': per_page, 'since': since})
@requires_basic_auth
def authorization(self, id_num):
"""Get information about authorization ``id``.
:param int id_num: (required), unique id of the authorization
:returns: :class:`Authorization <Authorization>`
"""
json = None
if int(id_num) > 0:
url = self._build_url('authorizations', str(id_num))
json = self._json(self._get(url), 200)
return self._instance_or_null(Authorization, json)
@requires_basic_auth
def authorizations(self, number=-1, etag=None):
"""Iterate over authorizations for the authenticated user. This will
return a 404 if you are using a token for authentication.
:param int number: (optional), number of authorizations to return.
Default: -1 returns all available authorizations
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Authorization <Authorization>`\ s
"""
url = self._build_url('authorizations')
return self._iter(int(number), url, Authorization, etag=etag)
def authorize(self, username, password, scopes=None, note='', note_url='',
client_id='', client_secret=''):
"""Obtain an authorization token.
The retrieved token will allow future consumers to use the API without
a username and password.
:param str username: (required)
:param str password: (required)
:param list scopes: (optional), areas you want this token to apply to,
i.e., 'gist', 'user'
:param str note: (optional), note about the authorization
:param str note_url: (optional), url for the application
:param str client_id: (optional), 20 character OAuth client key for
which to create a token
:param str client_secret: (optional), 40 character OAuth client secret
for which to create the token
:returns: :class:`Authorization <Authorization>`
"""
json = None
if username and password:
url = self._build_url('authorizations')
data = {'note': note, 'note_url': note_url,
'client_id': client_id, 'client_secret': client_secret}
if scopes:
data['scopes'] = scopes
with self.session.temporary_basic_auth(username, password):
json = self._json(self._post(url, data=data), 201)
return self._instance_or_null(Authorization, json)
def check_authorization(self, access_token):
"""Check an authorization created by a registered application.
OAuth applications can use this method to check token validity
without hitting normal rate limits because of failed login attempts.
If the token is valid, it will return True, otherwise it will return
False.
:returns: bool
"""
p = self.session.params
auth = (p.get('client_id'), p.get('client_secret'))
if access_token and auth:
url = self._build_url('applications', str(auth[0]), 'tokens',
str(access_token))
resp = self._get(url, auth=auth, params={
'client_id': None, 'client_secret': None
})
return self._boolean(resp, 200, 404)
return False
def create_gist(self, description, files, public=True):
"""Create a new gist.
If no login was provided, it will be anonymous.
:param str description: (required), description of gist
:param dict files: (required), file names with associated dictionaries
for content, e.g. ``{'spam.txt': {'content': 'File contents
...'}}``
:param bool public: (optional), make the gist public if True
:returns: :class:`Gist <github3.gists.Gist>`
"""
new_gist = {'description': description, 'public': public,
'files': files}
url = self._build_url('gists')
json = self._json(self._post(url, data=new_gist), 201)
return self._instance_or_null(Gist, json)
@requires_auth
def create_issue(self, owner, repository, title, body=None, assignee=None,
milestone=None, labels=[]):
"""Create an issue on the project 'repository' owned by 'owner'
with title 'title'.
``body``, ``assignee``, ``milestone``, ``labels`` are all optional.
.. warning::
This method retrieves the repository first and then uses it to
create an issue. If you're making several issues, you should use
:py:meth:`repository <github3.github.GitHub.repository>` and then
use :py:meth:`create_issue
<github3.repos.repo.Repository.create_issue>`
:param str owner: (required), login of the owner
:param str repository: (required), repository name
:param str title: (required), Title of issue to be created
:param str body: (optional), The text of the issue, markdown
formatted
:param str assignee: (optional), Login of person to assign
the issue to
:param int milestone: (optional), id number of the milestone to
attribute this issue to (e.g. ``m`` is a :class:`Milestone
<github3.issues.Milestone>` object, ``m.number`` is what you pass
here.)
:param list labels: (optional), List of label names.
:returns: :class:`Issue <github3.issues.Issue>` if successful
"""
repo = None
if owner and repository and title:
repo = self.repository(owner, repository)
if repo is not None:
return repo.create_issue(title, body, assignee, milestone, labels)
return self._instance_or_null(Issue, None)
@requires_auth
def create_key(self, title, key, read_only=False):
"""Create a new key for the authenticated user.
:param str title: (required), key title
:param str key: (required), actual key contents, accepts path
as a string or file-like object
:param bool read_only: (optional), restrict key access to read-only,
default to False
:returns: :class:`Key <github3.users.Key>`
"""
json = None
if title and key:
data = {'title': title, 'key': key, 'read_only': read_only}
url = self._build_url('user', 'keys')
req = self._post(url, data=data)
json = self._json(req, 201)
return self._instance_or_null(users.Key, json)
@requires_auth
def create_repository(self, name, description='', homepage='',
private=False, has_issues=True, has_wiki=True,
auto_init=False, gitignore_template=''):
"""Create a repository for the authenticated user.
:param str name: (required), name of the repository
:param str description: (optional)
:param str homepage: (optional)
:param str private: (optional), If ``True``, create a
private repository. API default: ``False``
:param bool has_issues: (optional), If ``True``, enable
issues for this repository. API default: ``True``
:param bool has_wiki: (optional), If ``True``, enable the
wiki for this repository. API default: ``True``
:param bool auto_init: (optional), auto initialize the repository
:param str gitignore_template: (optional), name of the git template to
use; ignored if auto_init = False.
:returns: :class:`Repository <github3.repos.Repository>`
.. warning: ``name`` should be no longer than 100 characters
"""
url = self._build_url('user', 'repos')
data = {'name': name, 'description': description,
'homepage': homepage, 'private': private,
'has_issues': has_issues, 'has_wiki': has_wiki,
'auto_init': auto_init,
'gitignore_template': gitignore_template}
json = self._json(self._post(url, data=data), 201)
return self._instance_or_null(Repository, json)
@requires_auth
def delete_email_addresses(self, addresses=[]):
"""Delete the email addresses in ``addresses`` from the
authenticated user's account.
:param list addresses: (optional), email addresses to be removed
:returns: bool
"""
url = self._build_url('user', 'emails')
return self._boolean(self._delete(url, data=json.dumps(addresses)),
204, 404)
@requires_auth
def emails(self, number=-1, etag=None):
"""Iterate over email addresses for the authenticated user.
:param int number: (optional), number of email addresses to return.
Default: -1 returns all available email addresses
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of dicts
"""
url = self._build_url('user', 'emails')
return self._iter(int(number), url, users.Email, etag=etag)
def emojis(self):
"""Retrieves a dictionary of all of the emojis that GitHub supports.
:returns: dictionary where the key is what would be in between the
colons and the value is the URL to the image, e.g., ::
{
'+1': 'https://github.global.ssl.fastly.net/images/...',
# ...
}
"""
url = self._build_url('emojis')
return self._json(self._get(url), 200, include_cache_info=False)
@requires_basic_auth
def feeds(self):
"""List GitHub's timeline resources in Atom format.
:returns: dictionary parsed to include URITemplates
"""
def replace_href(feed_dict):
if not feed_dict:
return feed_dict
ret_dict = {}
# Let's pluck out what we're most interested in, the href value
href = feed_dict.pop('href', None)
# Then we update the return dictionary with the rest of the values
ret_dict.update(feed_dict)
if href is not None:
# So long as there is something to template, let's template it
ret_dict['href'] = URITemplate(href)
return ret_dict
url = self._build_url('feeds')
json = self._json(self._get(url), 200, include_cache_info=False)
if json is None: # If something went wrong, get out early
return None
# We have a response body to parse
feeds = {}
# Let's pop out the old links so we don't have to skip them below
old_links = json.pop('_links', {})
_links = {}
# If _links is in the response JSON, iterate over that and recreate it
# so that any templates contained inside can be turned into
# URITemplates
for key, value in old_links.items():
if isinstance(value, list):
# If it's an array/list of links, let's replace that with a
# new list of links
_links[key] = [replace_href(d) for d in value]
else:
# Otherwise, just use the new value
_links[key] = replace_href(value)
# Start building up our return dictionary
feeds['_links'] = _links
for key, value in json.items():
# This should roughly be the same logic as above.
if isinstance(value, list):
feeds[key] = [URITemplate(v) for v in value]
else:
feeds[key] = URITemplate(value)
return feeds
@requires_auth
def follow(self, username):
"""Make the authenticated user follow the provided username.
:param str username: (required), user to follow
:returns: bool
"""
resp = False
if username:
url = self._build_url('user', 'following', username)
resp = self._boolean(self._put(url), 204, 404)
return resp
def followed_by(self, username, number=-1, etag=None):
r"""Iterate over users being followed by ``username``.
.. versionadded:: 1.0.0
This replaces iter_following('sigmavirus24').
:param str username: (required), login of the user to check
:param int number: (optional), number of people to return. Default: -1
returns all people you follow
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`~github3.users.ShortUser`\ s
"""
url = self._build_url('users', username, 'following')
return self._iter(int(number), url, users.ShortUser, etag=etag)
@requires_auth
def followers(self, number=-1, etag=None):
r"""Iterate over followers of the authenticated user.
.. versionadded:: 1.0.0
This replaces iter_followers().
:param int number: (optional), number of followers to return. Default:
-1 returns all followers
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`~github3.users.ShortUser`\ s
"""
url = self._build_url('user', 'followers')
return self._iter(int(number), url, users.ShortUser, etag=etag)
def followers_of(self, username, number=-1, etag=None):
r"""Iterate over followers of ``username``.
.. versionadded:: 1.0.0
This replaces iter_followers('sigmavirus24').
:param str username: (required), login of the user to check
:param int number: (optional), number of followers to return. Default:
-1 returns all followers
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`~github3.users.ShortUser`\ s
"""
url = self._build_url('users', username, 'followers')
return self._iter(int(number), url, users.ShortUser, etag=etag)
@requires_auth
def following(self, number=-1, etag=None):
r"""Iterate over users the authenticated user is following.
.. versionadded:: 1.0.0
This replaces iter_following().
:param int number: (optional), number of people to return. Default: -1
returns all people you follow
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`~github3.users.ShortUser`\ s
"""
url = self._build_url('user', 'following')
return self._iter(int(number), url, users.ShortUser, etag=etag)
def gist(self, id_num):
"""Retrieve the gist using the specified id number.
:param int id_num: (required), unique id of the gist
:returns: :class:`Gist <github3.gists.Gist>`
"""
url = self._build_url('gists', str(id_num))
json = self._json(self._get(url), 200)
return self._instance_or_null(Gist, json)
@requires_auth
def gists(self, number=-1, etag=None):
"""Retrieve the authenticated user's gists.
.. versionadded:: 1.0
:param int number: (optional), number of gists to return. Default: -1,
returns all available gists
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Gist <github3.gists.Gist>`\ s
"""
url = self._build_url('gists')
return self._iter(int(number), url, Gist, etag=etag)
def gists_by(self, username, number=-1, etag=None):
"""Iterate over the gists owned by a user.
.. versionadded:: 1.0
:param str username: login of the user who owns the gists
:param int number: (optional), number of gists to return. Default: -1
returns all available gists
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Gist <github3.gists.Gist>`\ s
"""
url = self._build_url('users', username, 'gists')
return self._iter(int(number), url, Gist, etag=etag)
def gitignore_template(self, language):
"""Return the template for language.
:returns: str
"""
url = self._build_url('gitignore', 'templates', language)
json = self._json(self._get(url), 200)
if not json:
return ''
return json.get('source', '')
def gitignore_templates(self):
"""Return the list of available templates.
:returns: list of template names
"""
url = self._build_url('gitignore', 'templates')
return self._json(self._get(url), 200) or []
@requires_auth
def is_following(self, username):
"""Check if the authenticated user is following login.
:param str username: (required), login of the user to check if the
authenticated user is checking
:returns: bool
"""
json = False
if username:
url = self._build_url('user', 'following', username)
json = self._boolean(self._get(url), 204, 404)
return json
@requires_auth
def is_starred(self, username, repo):
"""Check if the authenticated user starred username/repo.
:param str username: (required), owner of repository
:param str repo: (required), name of repository
:returns: bool
"""
json = False
if username and repo:
url = self._build_url('user', 'starred', username, repo)
json = self._boolean(self._get(url), 204, 404)
return json
def issue(self, username, repository, number):
"""Fetch issue from owner/repository.
:param str username: (required), owner of the repository
:param str repository: (required), name of the repository
:param int number: (required), issue number
:return: :class:`Issue <github3.issues.Issue>`
"""
json = None
if username and repository and int(number) > 0:
url = self._build_url('repos', username, repository, 'issues',
str(number))
json = self._json(self._get(url), 200)
return self._instance_or_null(Issue, json)
@requires_auth
def issues(self, filter='', state='', labels='', sort='', direction='',
since=None, number=-1, etag=None):
"""List all of the authenticated user's (and organization's) issues.
.. versionchanged:: 0.9.0
- The ``state`` parameter now accepts 'all' in addition to 'open'
and 'closed'.
:param str filter: accepted values:
('assigned', 'created', 'mentioned', 'subscribed')
api-default: 'assigned'
:param str state: accepted values: ('all', 'open', 'closed')
api-default: 'open'
:param str labels: comma-separated list of label names, e.g.,
'bug,ui,@high'
:param str sort: accepted values: ('created', 'updated', 'comments')
api-default: created
:param str direction: accepted values: ('asc', 'desc')
api-default: desc
:param since: (optional), Only issues after this date will
be returned. This can be a `datetime` or an ISO8601 formatted
date string, e.g., 2012-05-20T23:10:27Z
:type since: datetime or string
:param int number: (optional), number of issues to return.
Default: -1 returns all issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Issue <github3.issues.Issue>`
"""
url = self._build_url('issues')
# issue_params will handle the since parameter
params = issue_params(filter, state, labels, sort, direction, since)
return self._iter(int(number), url, Issue, params, etag)
def issues_on(self, username, repository, milestone=None, state=None,
assignee=None, mentioned=None, labels=None, sort=None,
direction=None, since=None, number=-1, etag=None):
"""List issues on owner/repository. Only owner and repository are
required.
.. versionchanged:: 0.9.0
- The ``state`` parameter now accepts 'all' in addition to 'open'
and 'closed'.
:param str username: login of the owner of the repository
:param str repository: name of the repository
:param int milestone: None, '*', or ID of milestone
:param str state: accepted values: ('all', 'open', 'closed')
api-default: 'open'
:param str assignee: '*' or login of the user
:param str mentioned: login of the user
:param str labels: comma-separated list of label names, e.g.,
'bug,ui,@high'
:param str sort: accepted values: ('created', 'updated', 'comments')
api-default: created
:param str direction: accepted values: ('asc', 'desc')
api-default: desc
:param since: (optional), Only issues after this date will
be returned. This can be a `datetime` or an ISO8601 formatted
date string, e.g., 2012-05-20T23:10:27Z
:type since: datetime or string
:param int number: (optional), number of issues to return.
Default: -1 returns all issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Issue <github3.issues.Issue>`\ s
"""
if username and repository:
url = self._build_url('repos', username, repository, 'issues')
params = repo_issue_params(milestone, state, assignee, mentioned,
labels, sort, direction, since)
return self._iter(int(number), url, Issue, params=params,
etag=etag)
return iter([])
@requires_auth
def key(self, id_num):
"""Gets the authenticated user's key specified by id_num.
:param int id_num: (required), unique id of the key
:returns: :class:`Key <github3.users.Key>`
"""
json = None
if int(id_num) > 0:
url = self._build_url('user', 'keys', str(id_num))
json = self._json(self._get(url), 200)
return self._instance_or_null(users.Key, json)
@requires_auth
def keys(self, number=-1, etag=None):
"""Iterate over public keys for the authenticated user.
:param int number: (optional), number of keys to return. Default: -1
returns all your keys
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Key <github3.users.Key>`\ s
"""
url = self._build_url('user', 'keys')
return self._iter(int(number), url, users.Key, etag=etag)
def license(self, name):
"""Retrieve the license specified by the name.
:param string name: (required), name of license
:returns: :class:`License <github3.licenses.License>`
"""
url = self._build_url('licenses', name)
json = self._json(self._get(url, headers=License.CUSTOM_HEADERS), 200)
return self._instance_or_null(License, json)
def licenses(self, number=-1, etag=None):
"""Iterate over open source licenses.
:returns: generator of :class:`License <github3.licenses.License>`
"""
url = self._build_url('licenses')
return self._iter(int(number), url, License, etag=etag,
headers=License.CUSTOM_HEADERS)
def login(self, username=None, password=None, token=None,
two_factor_callback=None):
"""Logs the user into GitHub for protected API calls.
:param str username: login name
:param str password: password for the login
:param str token: OAuth token
:param func two_factor_callback: (optional), function you implement to
provide the Two Factor Authentication code to GitHub when necessary
"""
if username and password:
self.session.basic_auth(username, password)
elif token:
self.session.token_auth(token)
# The Session method handles None for free.
self.session.two_factor_auth_callback(two_factor_callback)
def markdown(self, text, mode='', context='', raw=False):
"""Render an arbitrary markdown document.
:param str text: (required), the text of the document to render
:param str mode: (optional), 'markdown' or 'gfm'
:param str context: (optional), only important when using mode 'gfm',
this is the repository to use as the context for the rendering
:param bool raw: (optional), renders a document like a README.md, no
gfm, no context
:returns: str (or unicode on Python 2) -- HTML formatted text
"""
data = None
json = False
headers = {}
if raw:
url = self._build_url('markdown', 'raw')
data = text
headers['content-type'] = 'text/plain'
else:
url = self._build_url('markdown')
data = {}
if text:
data['text'] = text
if mode in ('markdown', 'gfm'):
data['mode'] = mode
if context:
data['context'] = context
json = True
html = ''
if data:
req = self._post(url, data=data, json=json, headers=headers)
if req.ok:
html = req.text
return html
@requires_auth
def me(self):
"""Retrieve the info for the authenticated user.
.. versionadded:: 1.0
This was separated from the ``user`` method.
:returns: The representation of the authenticated user.
:rtype: :class:`~github3.users.AuthenticatedUser`
"""
url = self._build_url('user')
json = self._json(self._get(url), 200)
return self._instance_or_null(users.AuthenticatedUser, json)
@requires_auth
def membership_in(self, organization):
"""Retrieve the user's membership in the specified organization."""
url = self._build_url('user', 'memberships', 'orgs',
str(organization))
json = self._json(self._get(url), 200)
return self._instance_or_null(Membership, json)
def meta(self):
"""Returns a dictionary with arrays of addresses in CIDR format
specifying theaddresses that the incoming service hooks will originate
from.
.. versionadded:: 0.5
"""
url = self._build_url('meta')
return self._json(self._get(url), 200) or {}
@requires_auth
def notifications(self, all=False, participating=False, number=-1,
etag=None):
"""Iterate over the user's notification.
:param bool all: (optional), iterate over all notifications
:param bool participating: (optional), only iterate over notifications
in which the user is participating
:param int number: (optional), how many notifications to return
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`Thread <github3.notifications.Thread>`
"""
params = None
if all is True:
params = {'all': 'true'}
elif participating is True:
params = {'participating': 'true'}
url = self._build_url('notifications')
return self._iter(int(number), url, Thread, params, etag=etag)
def octocat(self, say=None):
"""Returns an easter egg of the API.
:params str say: (optional), pass in what you'd like Octocat to say
:returns: ascii art of Octocat
:rtype: str (or unicode on Python 3)
"""
url = self._build_url('octocat')
req = self._get(url, params={'s': say})
return req.text if req.ok else ''
def organization(self, username):
"""Returns a Organization object for the login name
:param str username: (required), login name of the org
:returns: :class:`Organization <github3.orgs.Organization>`
"""
url = self._build_url('orgs', username)
json = self._json(self._get(url), 200)
return self._instance_or_null(Organization, json)
@requires_auth
def organization_issues(self, name, filter='', state='', labels='',
sort='', direction='', since=None, number=-1,
etag=None):
"""Iterate over the organization's issues if the authenticated user
belongs to it.
:param str name: (required), name of the organization
:param str filter: accepted values:
('assigned', 'created', 'mentioned', 'subscribed')
api-default: 'assigned'
:param str state: accepted values: ('open', 'closed')
api-default: 'open'
:param str labels: comma-separated list of label names, e.g.,
'bug,ui,@high'
:param str sort: accepted values: ('created', 'updated', 'comments')
api-default: created
:param str direction: accepted values: ('asc', 'desc')
api-default: desc
:param since: (optional), Only issues after this date will
be returned. This can be a `datetime` or an ISO8601 formatted
date string, e.g., 2012-05-20T23:10:27Z
:type since: datetime or string
:param int number: (optional), number of issues to return. Default:
-1, returns all available issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Issue <github3.issues.Issue>`
"""
url = self._build_url('orgs', name, 'issues')
# issue_params will handle the since parameter
params = issue_params(filter, state, labels, sort, direction, since)
return self._iter(int(number), url, Issue, params, etag)
@requires_auth
def organizations(self, number=-1, etag=None):
"""Iterate over all organizations the authenticated user belongs to.
This will display both the private memberships and the publicized
memberships.
:param int number: (optional), number of organizations to return.
Default: -1 returns all available organizations
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`Organization <github3.orgs.Organization>`\ s
"""
url = self._build_url('user', 'orgs')
return self._iter(int(number), url, Organization, etag=etag)
def organizations_with(self, username, number=-1, etag=None):
"""Iterate over organizations with ``username`` as a public member.
.. versionadded:: 1.0.0
Replaces ``iter_orgs('sigmavirus24')``.
:param str username: (optional), user whose orgs you wish to list
:param int number: (optional), number of organizations to return.
Default: -1 returns all available organizations
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`Organization <github3.orgs.Organization>`\ s
"""
if username:
url = self._build_url('users', username, 'orgs')
return self._iter(int(number), url, Organization, etag=etag)
return iter([])
def public_gists(self, number=-1, etag=None):
"""Retrieve all public gists and iterate over them.
.. versionadded:: 1.0
:param int number: (optional), number of gists to return. Default: -1
returns all available gists
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Gist <github3.gists.Gist>`\ s
"""
url = self._build_url('gists', 'public')
return self._iter(int(number), url, Gist, etag=etag)
@requires_auth
def organization_memberships(self, state=None, number=-1, etag=None):
"""List organizations of which the user is a current or pending member.
:param str state: (option), state of the membership, i.e., active,
pending
:returns: iterator of :class:`Membership <github3.orgs.Membership>`
"""
params = None
url = self._build_url('user', 'memberships', 'orgs')
if state is not None and state.lower() in ('active', 'pending'):
params = {'state': state.lower()}
return self._iter(int(number), url, Membership,
params=params,
etag=etag)
@requires_auth
def pubsubhubbub(self, mode, topic, callback, secret=''):
"""Create/update a pubsubhubbub hook.
:param str mode: (required), accepted values: ('subscribe',
'unsubscribe')
:param str topic: (required), form:
https://github.com/:user/:repo/events/:event
:param str callback: (required), the URI that receives the updates
:param str secret: (optional), shared secret key that generates a
SHA1 HMAC of the payload content.
:returns: bool
"""
from re import match
m = match('https?://[\w\d\-\.\:]+/\w[\w-]+\w/[\w\._-]+/events/\w+',
topic)
status = False
if mode and topic and callback and m:
data = [('hub.mode', mode), ('hub.topic', topic),
('hub.callback', callback)]
if secret:
data.append(('hub.secret', secret))
url = self._build_url('hub')
# This is not JSON data. It is meant to be form data
# application/x-www-form-urlencoded works fine here, no need for
# multipart/form-data
status = self._boolean(self._post(url, data=data, json=False,
headers={
'Content-Type':
'application/x-www-form-urlencoded'
}), 204, 404)
return status
def pull_request(self, owner, repository, number):
"""Fetch pull_request #:number: from :owner:/:repository
:param str owner: (required), owner of the repository
:param str repository: (required), name of the repository
:param int number: (required), issue number
:return: :class:`~github.pulls.PullRequest`
"""
json = None
if int(number) > 0:
url = self._build_url('repos', owner, repository, 'pulls',
str(number))
json = self._json(self._get(url), 200)
return self._instance_or_null(PullRequest, json)
def rate_limit(self):
"""Returns a dictionary with information from /rate_limit.
The dictionary has two keys: ``resources`` and ``rate``. In
``resources`` you can access information about ``core`` or ``search``.
Note: the ``rate`` key will be deprecated before version 3 of the
GitHub API is finalized. Do not rely on that key. Instead, make your
code future-proof by using ``core`` in ``resources``, e.g.,
::
rates = g.rate_limit()
rates['resources']['core'] # => your normal ratelimit info
rates['resources']['search'] # => your search ratelimit info
.. versionadded:: 0.8
:returns: dict
"""
url = self._build_url('rate_limit')
return self._json(self._get(url), 200)
@requires_auth
def repositories(self, type=None, sort=None, direction=None, number=-1,
etag=None):
"""List repositories for the authenticated user, filterable by ``type``.
.. versionchanged:: 0.6
Removed the login parameter for correctness. Use repositories_by
instead
:param str type: (optional), accepted values:
('all', 'owner', 'public', 'private', 'member')
API default: 'all'
:param str sort: (optional), accepted values:
('created', 'updated', 'pushed', 'full_name')
API default: 'created'
:param str direction: (optional), accepted values:
('asc', 'desc'), API default: 'asc' when using 'full_name',
'desc' otherwise
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
objects
"""
url = self._build_url('user', 'repos')
params = {}
if type in ('all', 'owner', 'public', 'private', 'member'):
params.update(type=type)
if sort in ('created', 'updated', 'pushed', 'full_name'):
params.update(sort=sort)
if direction in ('asc', 'desc'):
params.update(direction=direction)
return self._iter(int(number), url, Repository, params, etag)
def repositories_by(self, username, type=None, sort=None, direction=None,
number=-1, etag=None):
"""List public repositories for the specified ``username``.
.. versionadded:: 0.6
:param str username: (required), username
:param str type: (optional), accepted values:
('all', 'owner', 'member')
API default: 'all'
:param str sort: (optional), accepted values:
('created', 'updated', 'pushed', 'full_name')
API default: 'created'
:param str direction: (optional), accepted values:
('asc', 'desc'), API default: 'asc' when using 'full_name',
'desc' otherwise
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
objects
"""
url = self._build_url('users', username, 'repos')
params = {}
if type in ('all', 'owner', 'member'):
params.update(type=type)
if sort in ('created', 'updated', 'pushed', 'full_name'):
params.update(sort=sort)
if direction in ('asc', 'desc'):
params.update(direction=direction)
return self._iter(int(number), url, Repository, params, etag)
def repository(self, owner, repository):
"""Returns a Repository object for the specified combination of
owner and repository
:param str owner: (required)
:param str repository: (required)
:returns: :class:`Repository <github3.repos.Repository>`
"""
json = None
if owner and repository:
url = self._build_url('repos', owner, repository)
json = self._json(self._get(url, headers=License.CUSTOM_HEADERS),
200)
return self._instance_or_null(Repository, json)
def repository_with_id(self, number):
"""Returns the Repository with id ``number``.
:param int number: id of the repository
:returns: :class:`Repository <github3.repos.Repository>`
"""
number = int(number)
json = None
if number > 0:
url = self._build_url('repositories', str(number))
json = self._json(self._get(url), 200)
return self._instance_or_null(Repository, json)
@requires_app_credentials
def revoke_authorization(self, access_token):
"""Revoke specified authorization for an OAuth application.
Revoke all authorization tokens created by your application. This will
only work if you have already called ``set_client_id``.
:param str access_token: (required), the access_token to revoke
:returns: bool -- True if successful, False otherwise
"""
client_id, client_secret = self.session.retrieve_client_credentials()
url = self._build_url('applications', str(client_id), 'tokens',
access_token)
with self.session.temporary_basic_auth(client_id, client_secret):
response = self._delete(url, params={'client_id': None,
'client_secret': None})
return self._boolean(response, 204, 404)
@requires_app_credentials
def revoke_authorizations(self):
"""Revoke all authorizations for an OAuth application.
Revoke all authorization tokens created by your application. This will
only work if you have already called ``set_client_id``.
:param str client_id: (required), the client_id of your application
:returns: bool -- True if successful, False otherwise
"""
client_id, client_secret = self.session.retrieve_client_credentials()
url = self._build_url('applications', str(client_id), 'tokens')
with self.session.temporary_basic_auth(client_id, client_secret):
response = self._delete(url, params={'client_id': None,
'client_secret': None})
return self._boolean(response, 204, 404)
def search_code(self, query, sort=None, order=None, per_page=None,
text_match=False, number=-1, etag=None):
"""Find code via the code search API.
The query can contain any combination of the following supported
qualifiers:
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the file contents, the file path, or
both.
- ``language`` Searches code based on the language it’s written in.
- ``fork`` Specifies that code from forked repositories should be
searched. Repository forks will not be searchable unless the fork
has more stars than the parent repository.
- ``size`` Finds files that match a certain size (in bytes).
- ``path`` Specifies the path that the resulting file must be at.
- ``extension`` Matches files with a certain extension.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/-DvAuA
:param str query: (required), a valid query as described above, e.g.,
``addClass in:file language:js repo:jquery/jquery``
:param str sort: (optional), how the results should be sorted;
option(s): ``indexed``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/iRmJxg for more information
:param int number: (optional), number of repositories to return.
Default: -1, returns all available repositories
:param str etag: (optional), previous ETag header value
:return: generator of :class:`CodeSearchResult
<github3.search.CodeSearchResult>`
"""
params = {'q': query}
headers = {}
if sort == 'indexed':
params['sort'] = sort
if sort and order in ('asc', 'desc'):
params['order'] = order
if text_match:
headers = {
'Accept': 'application/vnd.github.v3.full.text-match+json'
}
url = self._build_url('search', 'code')
return SearchIterator(number, url, CodeSearchResult, self, params,
etag, headers)
def search_issues(self, query, sort=None, order=None, per_page=None,
text_match=False, number=-1, etag=None):
"""Find issues by state and keyword
The query can contain any combination of the following supported
qualifers:
- ``type`` With this qualifier you can restrict the search to issues
or pull request only.
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the title, body, comments, or any
combination of these.
- ``author`` Finds issues created by a certain user.
- ``assignee`` Finds issues that are assigned to a certain user.
- ``mentions`` Finds issues that mention a certain user.
- ``commenter`` Finds issues that a certain user commented on.
- ``involves`` Finds issues that were either created by a certain user,
assigned to that user, mention that user, or were commented on by
that user.
- ``state`` Filter issues based on whether they’re open or closed.
- ``labels`` Filters issues based on their labels.
- ``language`` Searches for issues within repositories that match a
certain language.
- ``created`` or ``updated`` Filters issues based on times of creation,
or when they were last updated.
- ``comments`` Filters issues based on the quantity of comments.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/d1oELA
:param str query: (required), a valid query as described above, e.g.,
``windows label:bug``
:param str sort: (optional), how the results should be sorted;
options: ``created``, ``comments``, ``updated``;
default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/QLQuSQ for more information
:param int number: (optional), number of issues to return.
Default: -1, returns all available issues
:param str etag: (optional), previous ETag header value
:return: generator of :class:`IssueSearchResult
<github3.search.IssueSearchResult>`
"""
params = {'q': query}
headers = {}
if sort in ('comments', 'created', 'updated'):
params['sort'] = sort
if order in ('asc', 'desc'):
params['order'] = order
if text_match:
headers = {
'Accept': 'application/vnd.github.v3.full.text-match+json'
}
url = self._build_url('search', 'issues')
return SearchIterator(number, url, IssueSearchResult, self, params,
etag, headers)
def search_repositories(self, query, sort=None, order=None,
per_page=None, text_match=False, number=-1,
etag=None):
"""Find repositories via various criteria.
The query can contain any combination of the following supported
qualifers:
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the repository name, description,
readme, or any combination of these.
- ``size`` Finds repositories that match a certain size (in
kilobytes).
- ``forks`` Filters repositories based on the number of forks, and/or
whether forked repositories should be included in the results at
all.
- ``created`` or ``pushed`` Filters repositories based on times of
creation, or when they were last updated. Format: ``YYYY-MM-DD``.
Examples: ``created:<2011``, ``pushed:<2013-02``,
``pushed:>=2013-03-06``
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
- ``language`` Searches repositories based on the language they're
written in.
- ``stars`` Searches repositories based on the number of stars.
For more information about these qualifiers, see: http://git.io/4Z8AkA
:param str query: (required), a valid query as described above, e.g.,
``tetris language:assembly``
:param str sort: (optional), how the results should be sorted;
options: ``stars``, ``forks``, ``updated``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/4ct1eQ for more information
:param int number: (optional), number of repositories to return.
Default: -1, returns all available repositories
:param str etag: (optional), previous ETag header value
:return: generator of :class:`Repository <github3.repos.Repository>`
"""
params = {'q': query}
headers = {}
if sort in ('stars', 'forks', 'updated'):
params['sort'] = sort
if order in ('asc', 'desc'):
params['order'] = order
if text_match:
headers = {
'Accept': 'application/vnd.github.v3.full.text-match+json'
}
url = self._build_url('search', 'repositories')
return SearchIterator(number, url, RepositorySearchResult, self,
params, etag, headers)
def search_users(self, query, sort=None, order=None, per_page=None,
text_match=False, number=-1, etag=None):
"""Find users via the Search API.
The query can contain any combination of the following supported
qualifers:
- ``type`` With this qualifier you can restrict the search to just
personal accounts or just organization accounts.
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the username, public email, full
name, or any combination of these.
- ``repos`` Filters users based on the number of repositories they
have.
- ``location`` Filter users by the location indicated in their
profile.
- ``language`` Search for users that have repositories that match a
certain language.
- ``created`` Filter users based on when they joined.
- ``followers`` Filter users based on the number of followers they
have.
For more information about these qualifiers see: http://git.io/wjVYJw
:param str query: (required), a valid query as described above, e.g.,
``tom repos:>42 followers:>1000``
:param str sort: (optional), how the results should be sorted;
options: ``followers``, ``repositories``, or ``joined``; default:
best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/_V1zRwa for more information
:param int number: (optional), number of search results to return;
Default: -1 returns all available
:param str etag: (optional), ETag header value of the last request.
:return: generator of :class:`UserSearchResult
<github3.search.UserSearchResult>`
"""
params = {'q': query}
headers = {}
if sort in ('followers', 'repositories', 'joined'):
params['sort'] = sort
if order in ('asc', 'desc'):
params['order'] = order
if text_match:
headers = {
'Accept': 'application/vnd.github.v3.full.text-match+json'
}
url = self._build_url('search', 'users')
return SearchIterator(number, url, UserSearchResult, self, params,
etag, headers)
def set_client_id(self, id, secret):
"""Allows the developer to set their client_id and client_secret for
their OAuth application.
:param str id: 20-character hexidecimal client_id provided by GitHub
:param str secret: 40-character hexidecimal client_secret provided by
GitHub
"""
self.session.params = {'client_id': id, 'client_secret': secret}
def set_user_agent(self, user_agent):
"""Allows the user to set their own user agent string to identify with
the API.
:param str user_agent: String used to identify your application.
Library default: "github3.py/{version}", e.g., "github3.py/0.5"
"""
if not user_agent:
return
self.session.headers.update({'User-Agent': user_agent})
@requires_auth
def star(self, username, repo):
"""Star to username/repo
:param str username: (required), owner of the repo
:param str repo: (required), name of the repo
:return: bool
"""
resp = False
if username and repo:
url = self._build_url('user', 'starred', username, repo)
resp = self._boolean(self._put(url), 204, 404)
return resp
@requires_auth
def starred(self, sort=None, direction=None, number=-1, etag=None):
"""Iterate over repositories starred by the authenticated user.
.. versionchanged:: 1.0
This was split from ``iter_starred`` and requires authentication.
:param str sort: (optional), either 'created' (when the star was
created) or 'updated' (when the repository was last pushed to)
:param str direction: (optional), either 'asc' or 'desc'. Default:
'desc'
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
params = {'sort': sort, 'direction': direction}
self._remove_none(params)
url = self._build_url('user', 'starred')
return self._iter(int(number), url, Repository, params, etag)
def starred_by(self, username, sort=None, direction=None, number=-1,
etag=None):
"""Iterate over repositories starred by ``username``.
.. versionadded:: 1.0
This was split from ``iter_starred`` and requires the login
parameter.
:param str username: name of user whose stars you want to see
:param str sort: (optional), either 'created' (when the star was
created) or 'updated' (when the repository was last pushed to)
:param str direction: (optional), either 'asc' or 'desc'. Default:
'desc'
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
params = {'sort': sort, 'direction': direction}
self._remove_none(params)
url = self._build_url('users', str(username), 'starred')
return self._iter(int(number), url, Repository, params, etag)
@requires_auth
def subscriptions(self, number=-1, etag=None):
"""Iterate over repositories subscribed to by the authenticated user.
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
url = self._build_url('user', 'subscriptions')
return self._iter(int(number), url, Repository, etag=etag)
def subscriptions_for(self, username, number=-1, etag=None):
"""Iterate over repositories subscribed to by ``username``.
:param str username: , name of user whose subscriptions you want
to see
:param int number: (optional), number of repositories to return.
Default: -1 returns all repositories
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>`
"""
url = self._build_url('users', str(username), 'subscriptions')
return self._iter(int(number), url, Repository, etag=etag)
@requires_auth
def unfollow(self, username):
"""Make the authenticated user stop following username
:param str username: (required)
:returns: bool
"""
resp = False
if username:
url = self._build_url('user', 'following', username)
resp = self._boolean(self._delete(url), 204, 404)
return resp
@requires_auth
def unstar(self, username, repo):
"""Unstar username/repo.
:param str username: (required), owner of the repo
:param str repo: (required), name of the repo
:return: bool
"""
resp = False
if username and repo:
url = self._build_url('user', 'starred', username, repo)
resp = self._boolean(self._delete(url), 204, 404)
return resp
@requires_auth
def update_me(self, name=None, email=None, blog=None, company=None,
location=None, hireable=False, bio=None):
"""Update the profile of the authenticated user.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., 'john.smith@example.com'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company:
:param str location:
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: whether the operation was successful or not
:rtype: bool
"""
user = {'name': name, 'email': email, 'blog': blog,
'company': company, 'location': location,
'hireable': hireable, 'bio': bio}
self._remove_none(user)
url = self._build_url('user')
_json = self._json(self._patch(url, data=json.dumps(user)), 200)
if _json:
self._update_attributes(_json)
return True
return False
def user(self, username):
"""Retrieve a User object for the specified user name.
:param str username: name of the user
:returns: :class:`~github3.users.User`
"""
url = self._build_url('users', username)
json = self._json(self._get(url), 200)
return self._instance_or_null(users.User, json)
@requires_auth
def user_issues(self, filter='', state='', labels='', sort='',
direction='', since=None, per_page=None, number=-1,
etag=None):
"""List only the authenticated user's issues. Will not list
organization's issues
.. versionchanged:: 1.0
``per_page`` parameter added before ``number``
.. versionchanged:: 0.9.0
- The ``state`` parameter now accepts 'all' in addition to 'open'
and 'closed'.
:param str filter: accepted values:
('assigned', 'created', 'mentioned', 'subscribed')
api-default: 'assigned'
:param str state: accepted values: ('all', 'open', 'closed')
api-default: 'open'
:param str labels: comma-separated list of label names, e.g.,
'bug,ui,@high'
:param str sort: accepted values: ('created', 'updated', 'comments')
api-default: created
:param str direction: accepted values: ('asc', 'desc')
api-default: desc
:param since: (optional), Only issues after this date will
be returned. This can be a `datetime` or an ISO8601 formatted
date string, e.g., 2012-05-20T23:10:27Z
:type since: datetime or string
:param int number: (optional), number of issues to return.
Default: -1 returns all issues
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Issue <github3.issues.Issue>`
"""
url = self._build_url('user', 'issues')
# issue_params will handle the since parameter
params = issue_params(filter, state, labels, sort, direction, since)
params.update(per_page=per_page)
return self._iter(int(number), url, Issue, params, etag)
@requires_auth
def user_teams(self, number=-1, etag=None):
"""Gets the authenticated user's teams across all of organizations.
List all of the teams across all of the organizations to which the
authenticated user belongs. This method requires user or repo scope
when authenticating via OAuth.
:returns: generator of :class:`Team <github3.orgs.Team>` objects
"""
url = self._build_url('user', 'teams')
return self._iter(int(number), url, Team, etag=etag)
def user_with_id(self, number):
"""Get the user's information with id ``number``.
:param int number: the user's id number
:returns: :class:`~github3.users.User`
"""
number = int(number)
json = None
if number > 0:
url = self._build_url('user', str(number))
json = self._json(self._get(url), 200)
return self._instance_or_null(users.User, json)
def zen(self):
"""Returns a quote from the Zen of GitHub. Yet another API Easter Egg
:returns: str (on Python 3, unicode on Python 2)
"""
url = self._build_url('zen')
resp = self._get(url)
return resp.text if resp.status_code == 200 else b''.decode('utf-8')
class GitHubEnterprise(GitHub):
"""For GitHub Enterprise users, this object will act as the public API to
your instance. You must provide the URL to your instance upon
initialization and can provide the rest of the login details just like in
the :class:`GitHub <GitHub>` object.
There is no need to provide the end of the url (e.g., /api/v3/), that will
be taken care of by us.
If you have a self signed SSL for your local github enterprise you can
override the validation by passing `verify=False`.
"""
def __init__(self, url, username='', password='', token='', verify=True):
super(GitHubEnterprise, self).__init__(username, password, token)
self.session.base_url = url.rstrip('/') + '/api/v3'
self.session.verify = verify
self.url = url
def _repr(self):
return '<GitHub Enterprise [{0.url}]>'.format(self)
@requires_auth
def create_user(self, login, email):
"""Create a new user.
This is only available for administrators of the instance.
:param str login: (required), The user's username.
:param str email: (required), The user's email address.
:returns: :class:`User <github3.users.User>`, if successful
"""
url = self._build_url('admin', 'users')
payload = {'login': login, 'email': email}
json_data = self._json(self._post(url, data=payload), 201)
return self._instance_or_null(users.User, json_data)
@requires_auth
def admin_stats(self, option):
"""This is a simple way to get statistics about your system.
:param str option: (required), accepted values: ('all', 'repos',
'hooks', 'pages', 'orgs', 'users', 'pulls', 'issues',
'milestones', 'gists', 'comments')
:returns: dict
"""
stats = {}
if option.lower() in ('all', 'repos', 'hooks', 'pages', 'orgs',
'users', 'pulls', 'issues', 'milestones',
'gists', 'comments'):
url = self._build_url('enterprise', 'stats', option.lower())
stats = self._json(self._get(url), 200)
return stats
class GitHubStatus(GitHubCore):
"""A sleek interface to the GitHub System Status API. This will only ever
return the JSON objects returned by the API.
"""
def __init__(self):
super(GitHubStatus, self).__init__({})
self.session.base_url = 'https://status.github.com'
def _repr(self):
return '<GitHub Status>'
def _recipe(self, *args):
url = self._build_url(*args)
resp = self._get(url)
return resp.json() if self._boolean(resp, 200, 404) else {}
def api(self):
"""GET /api.json"""
return self._recipe('api.json')
def status(self):
"""GET /api/status.json"""
return self._recipe('api', 'status.json')
def last_message(self):
"""GET /api/last-message.json"""
return self._recipe('api', 'last-message.json')
def messages(self):
"""GET /api/messages.json"""
return self._recipe('api', 'messages.json')
| {
"content_hash": "732c651b7c261ca9229f17829d5d5e4f",
"timestamp": "",
"source": "github",
"line_count": 1762,
"max_line_length": 80,
"avg_line_length": 40.74971623155505,
"alnum_prop": 0.5905628055319564,
"repo_name": "balloob/github3.py",
"id": "d411c2e40f926c0989f9e28ef3451654fb19bb9c",
"size": "71829",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "github3/github.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "101335"
},
{
"name": "Makefile",
"bytes": "362"
},
{
"name": "Python",
"bytes": "799157"
}
],
"symlink_target": ""
} |
from django import template
from events.models import EventRSVP
register = template.Library()
@register.simple_tag
def get_community_event_rsvp(user, event):
if not event or not user:
return None
# use filter + first to return None if there is no subscription
return EventRSVP.objects.filter(user=user, event=event).first()
| {
"content_hash": "3ac7c990ab6607ba838505f85f74d984",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 67,
"avg_line_length": 26.76923076923077,
"alnum_prop": 0.7385057471264368,
"repo_name": "letsmeet-click/letsmeet.click",
"id": "d26afde25f5ecad91a0871fea814b8a4c5822593",
"size": "348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "letsmeet/events/templatetags/event_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "425"
},
{
"name": "Dockerfile",
"bytes": "766"
},
{
"name": "HTML",
"bytes": "67033"
},
{
"name": "JavaScript",
"bytes": "3716"
},
{
"name": "Makefile",
"bytes": "74"
},
{
"name": "Python",
"bytes": "98881"
},
{
"name": "Shell",
"bytes": "1027"
}
],
"symlink_target": ""
} |
import threading
import asyncio
import weakref
from functools import partial
import inspect
import collections
import rethinkdb as r
r.set_loop_type("asyncio")
from .errors import IllegalAccessError, AlreadyExistsError
from .registry import registry
__all__ = [ "db_conn", "init_app_db", "configure_db_connection", "aiter_changes" ]
###############################################################################
# DB connections
###############################################################################
class _OneConnPerThreadPool:
"""Keeps track of one RethinkDB connection per thread.
Get (or create) the current thread's connection with get() or just
__await__. close() closes and discards the the current thread's connection
so that a subsequent __await__ or get opens a new connection.
"""
def __init__(self):
self._tl = threading.local()
self._connect_kwargs = None
def configure_db_connection(self, **connect_kwargs):
if self._connect_kwargs != None:
raise AlreadyExistsError("Can not re-configure DB connection(s)")
self._connect_kwargs = connect_kwargs
def __await__(self):
return self.get().__await__()
async def get(self):
"""Gets or opens the thread's DB connection.
"""
if self._connect_kwargs == None:
raise IllegalAccessError("DB connection parameters not set yet")
if not hasattr(self._tl, "conn"):
self._tl.conn = await r.connect(**self._connect_kwargs)
return self._tl.conn
async def close(self, noreply_wait = True):
"""Closes the thread's DB connection.
"""
if hasattr(self._tl, "conn"):
if self._tl.conn.is_open():
await self._tl.conn.close(noreply_wait)
del self._tl.conn
db_conn = _OneConnPerThreadPool()
def configure_db_connection(db, **kwargs_for_rethink_connect):
"""Sets DB connection parameters. This function should be called exactly
once, before init_app_db is called or db_conn is first used.
"""
db_conn.configure_db_connection(db = db, **kwargs_for_rethink_connect)
###############################################################################
# DB setup (tables and such)
###############################################################################
async def init_app_db(reconfigure_db = False, conn = None):
cn = conn or await db_conn
# create DB if it doesn't exist
our_db = db_conn._connect_kwargs["db"]
dbs = await r.db_list().run(cn)
if our_db not in dbs:
await r.db_create(our_db).run(cn)
# (re)configure DB tables
for doc_class in registry.values():
if not await doc_class.table_exists(cn):
await doc_class._create_table(cn)
elif reconfigure_db:
await doc_class._reconfigure_table(cn)
###############################################################################
# DB query helpers
###############################################################################
async def _run_query(query, conn = None):
"""`run()`s query if caller hasn't already done so, then awaits and returns
its result.
If run() has already been called, then the query (strictly speaking, the
awaitable) is just awaited. This gives the caller the opportunity to
customize the run() call.
If run() has not been called, then the query is run on the given connection
(or the default connection). This is more convenient for the caller than
the other version.
"""
# run() it if caller didn't do that already
if not inspect.isawaitable(query):
if not isinstance(query, r.RqlQuery):
raise TypeError("query is neither awaitable nor a RqlQuery")
cn = conn or await db_conn
query = query.run(cn)
return await query
async def aiter_changes(query, value_type, conn = None):
"""Runs any changes() query, and from its result stream constructs "Python
world" objects as determined by value_type (which may equal None when
data is deleted from the DB).
The function returns an asynchronous iterator (a ``ChangesAsyncMap``),
which yields `(constructed python object, changefeed message)` tuples.
Note that `constructed python object` might well be None.
The `query` might or might not already have called `run()`, but it should
not have been awaited on yet (check ``_run_query`` for details).
"""
feed = await _run_query(query, conn)
mapper = value_type.dbval_to_pyval
return ChangesAsyncMap(feed, mapper)
###############################################################################
# Asynchronous iterators over cursors and changefeeds
###############################################################################
class CursorAsyncIterator(collections.abc.AsyncIterator):
"""Async iterator that iterates over a RethinkDB cursor until it's empty.
"""
def __init__(self, cursor):
self.cursor = cursor
async def __aiter__(self):
return self
async def __anext__(self):
try:
return await self.cursor.next()
except r.ReqlCursorEmpty:
raise StopAsyncIteration
async def as_list(self):
"""Turns the asynchronous iterator into a list by doing the iteration
and collecting the resulting items into a list.
"""
l = []
async for item in self:
l.append(item)
return l
class CursorAsyncMap(CursorAsyncIterator):
"""Async iterator that iterates through a RethinkDB cursor, mapping each
object coming out of the cursor to a supplied mapper function.
Example: Document.from_cursor(cursor) returns a CursorAsyncMap that maps
each object from the cursor to Document.from_doc().
The ``as_list()`` coroutine creates a list out of the iterated items.
"""
def __init__(self, cursor, mapper):
"""cursor is a RethinkDB cursor. mapper is a function accepting one
parameter: whatever comes out of cursor.next().
"""
super().__init__(cursor)
self.mapper = mapper
async def __anext__(self):
item = await super().__anext__()
mapped = self.mapper(item)
return mapped
class ChangesAsyncMap(CursorAsyncIterator):
"""Async iterator that iterates over a RethinkDB changefeed, mapping each
new_val coming in to a supplied mapper function (that typically makes some
Python object out of it). On each iteration, a tuple (mapped object,
changefeed message) is yielded. Note that the mapped object might well be
None, for instance when documents are deleted from the DB.
Changefeed messages that do not contain a `new_val` (status messages) are
ignored.
Example: ``Document.aiter_changes()`` returns a ChangesAsyncMap that maps
each new_val (i.e., changed and inserted documents) to Document.from_doc().
"""
def __init__(self, changefeed, mapper):
"""`changefeed` is a RethinkDB changes stream (technically, a RethinkDB
cursor). `mapper` is a function accepting one parameter: a `new_val`
from a changefeed message.
"""
super().__init__(changefeed)
self.mapper = mapper
async def __anext__(self):
# process and yield next message from changefeed that carries a
# "new_val"
while True:
message = await super().__anext__()
if "new_val" not in message:
continue
mapped = self.mapper(message["new_val"])
return mapped, message
async def as_list(self):
"""This is verboten on changefeeds as they have infinite length.
"""
raise NotImplementedError("as_list makes no sense on changefeeds")
| {
"content_hash": "3ae48e00b993fcef5b4d4834e6bec548",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 82,
"avg_line_length": 33.050632911392405,
"alnum_prop": 0.5979828928890591,
"repo_name": "lars-tiede/aiorethink",
"id": "42611a973db18e74fdfd08afea850dfe4289f0d7",
"size": "7833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiorethink/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125701"
},
{
"name": "Shell",
"bytes": "274"
}
],
"symlink_target": ""
} |
import wx
import armid
from InterfaceListPanel import InterfaceListPanel
class InterfaceListDialog(wx.Dialog):
def __init__(self,parent,ifName = '',ifType = '',arName = '',pName = ''):
wx.Dialog.__init__(self,parent,armid.INTERFACELISTDIALOG_ID,'Add Interface',style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(300,200))
self.theInterfaceName = ifName
self.theInterfaceType = ifType
self.theAccessRight = arName
self.thePrivilege = pName
self.buildControls()
self.load(ifName,ifType,arName,pName)
def buildControls(self):
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.panel = InterfaceListPanel(self)
mainSizer.Add(self.panel,1,wx.EXPAND)
self.SetSizer(mainSizer)
wx.EVT_BUTTON(self,wx.ID_OK,self.onCommit)
def load(self,ifName,ifType,arName,pName):
self.panel.load(ifName,ifType,arName,pName)
def onCommit(self,evt):
nameCtrl = self.FindWindowById(armid.INTERFACELISTDIALOG_COMBONAME_ID)
typeCtrl = self.FindWindowById(armid.INTERFACELISTDIALOG_COMBOTYPE_ID)
arCtrl = self.FindWindowById(armid.INTERFACELISTDIALOG_COMBOACCESSRIGHT_ID)
pCtrl = self.FindWindowById(armid.INTERFACELISTDIALOG_COMBOPRIVILEGE_ID)
self.theInterfaceName = nameCtrl.GetValue()
self.theInterfaceType = typeCtrl.GetValue()
self.theAccessRight = arCtrl.GetValue()
self.thePrivilege = pCtrl.GetValue()
self.EndModal(wx.ID_OK)
def interface(self): return self.theInterfaceName
def interfaceType(self): return self.theInterfaceType
def accessRight(self): return self.theAccessRight
def privilege(self): return self.thePrivilege
| {
"content_hash": "995aa94cf0112f8831f021817bfa2583",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 173,
"avg_line_length": 39.97560975609756,
"alnum_prop": 0.7553386211104332,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "f6eda325e9aa17c09b75cb346908477f646ab982",
"size": "2438",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/InterfaceListDialog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
} |
"""
Tests for L{imaginary.action.LookAt} and L{imaginary.action.LookAround}.
"""
from __future__ import print_function
from textwrap import dedent
from twisted.trial.unittest import TestCase
from zope.interface import implementer
from characteristic import attributes as has_attributes
from axiom import store, item, attributes
from imaginary import iimaginary, objects, language, action, events
from imaginary.enhancement import Enhancement
from imaginary.world import ImaginaryWorld
from imaginary.test.commandutils import (
CommandTestCaseMixin, E, createLocation, flatten)
class TestIntelligence(object):
def __init__(self):
self.observedConcepts = []
def prepare(self, concept):
return lambda: self.observedConcepts.append(concept)
class LookContext(object):
def __init__(self):
self.store = store.Store()
locContainer = createLocation(
self.store, name=u"Test Location",
description=u"Location for testing.")
self.location = locContainer.thing
self.world = ImaginaryWorld(store=self.store)
self.player = self.world.create(u"Test Player", gender=language.Gender.FEMALE)
locContainer.add(self.player)
self.actor = iimaginary.IActor(self.player)
self.actor.setEphemeralIntelligence(TestIntelligence())
class LookAroundTranscriptTests(CommandTestCaseMixin, TestCase):
"""
Transcript-style tests for I{look}.
"""
def test_emptyLocation(self):
iimaginary.IContainer(self.location).remove(self.observer)
self._test(
u"look",
[E(u"[ Test Location ]"),
u"Location for testing.",
])
def test_siblingObject(self):
self._test(
"look",
[E(u"[ Test Location ]"),
u"Location for testing.",
u"Here, you see Observer Player."])
def test_cousinObject(self):
o = objects.Thing(store=self.store, name=u"foo")
iimaginary.IContainer(self.observer).add(o)
self._test(
"look",
[E(u"[ Test Location ]"),
u"Location for testing.",
u"Here, you see Observer Player."])
def test_childObject(self):
o = objects.Thing(store=self.store, name=u"foo")
self.playerContainer.add(o)
self._test(
"look",
[E(u"[ Test Location ]"),
u"Location for testing.",
u"Here, you see Observer Player."])
def test_equipment(self):
self.observer.moveTo(None)
self._test(u"create a shirt named t-shirt", [u"You create a t-shirt."])
self._test(u"wear t-shirt", [u"You put on the t-shirt."])
self._test(
u"look",
[E(u"[ Test Location ]"),
E(u"Location for testing.")])
@implementer(iimaginary.ILitLink)
@has_attributes(["bear"])
class BearsHiddenBeyondThisLink(object):
"""
An annotation for a link implementing L{BearBlindness}.
"""
def isItLit(self, path):
"""
Any path that passes through a L{BearsHiddenBeyondThisLink} link and
terminates in a bear is shrouded in darkness. The bear lies in wait.
"""
schroedingerBear = path.targetAs(iimaginary.IThing)
actualBear = self.bear
if schroedingerBear == actualBear:
return False
else:
return True
def whyNotLit(self):
"""
The reason that a bear is obscured is L{BearsWhyNot}.
"""
return BearsWhyNot()
def applyLighting(self, litThing, it, interface):
"""
L{iimaginary.ILitLink.applyLighting} can modify a target that has had
lighting applied to it; in the case of this annotation things are
either completely not lit at all (bears) or fully lit and appear normal
(everything else) so we just always return the thing itself and don't
modify it.
"""
return it
class BearsWhyNot(object):
"""
A reason you can't see something: it's a bear, and you're blind to bears,
that's why you can't see it.
"""
def tellMeWhyNot(self):
"""
An evocative message that the user probably won't see (since they can't
in fact see this bear).
"""
return u"IT'S A BEAR"
interfaces = [iimaginary.ILinkAnnotator]
@implementer(*interfaces)
class BearBlindness(item.Item, Enhancement):
"""
An enhancement for an actor which causes that actor to become unable to see
bears.
(This could be installed on something other than an actor, which would
cause all bears on the other side of whatever link it was to become
invisible to all.)
"""
powerupInterfaces = interfaces
thing = attributes.reference(
"""
This is a reference to a Thing which is blind to bears.
"""
)
bear = attributes.reference(
"""
This is a reference to a Thing which is the one and only bear in the
universe, which you cannot see.
THERE CAN ONLY BE ONE.
"""
)
def annotationsFor(self, link, idea):
"""
Yield an annotation for all links which causes bears on the opposite
side of you to be invisible to you.
"""
yield BearsHiddenBeyondThisLink(bear=self.bear)
class LookAtTranscriptTests(CommandTestCaseMixin, TestCase):
def test_bearBlindness(self):
"""
If I cast a spell on you which makes you unable to see bears, you
should not see a bear in the room with you when you look at the room
around you.
"""
bear = objects.Thing(store=self.store,
name=u"Bear",
location=self.location)
BearBlindness(store=self.store,
thing=self.player,
bear=bear).applyEnhancement()
self._test(
"look here",
[E("[ Test Location ]"),
E("Location for testing."),
"Here, you see Observer Player."])
def test_exits(self):
objects.Exit.link(self.location, self.location, u"north")
self._test(
"look here",
[E("[ Test Location ]"),
E("( north south )"),
E("Location for testing."),
"Here, you see Observer Player."])
def test_lookMe(self):
self._test(
"look me",
[E("[ Test Player ]"),
"Test Player is great.",
"She is naked."])
def test_lookAtMe(self):
self._test(
"look at me",
[E("[ Test Player ]"),
"Test Player is great.",
"She is naked."])
def test_lookAtAnother(self):
self._test(
"look at Observer Player",
[E("[ Observer Player ]"),
"Observer Player is great.",
"She is naked."],
["Test Player looks at you."])
def test_lookAtThing(self):
o = objects.Thing(store=self.store, name=u"foo")
iimaginary.IContainer(self.location).add(o)
self._test(
"look at foo",
[E("[ foo ]")])
def test_lookAtMissing(self):
self._test(
"look at bar",
["You don't see that."])
class LookAroundTests(TestCase):
"""
Tests for L{imaginary.action.LookAround}.
"""
def setUp(self):
self.context = LookContext()
def test_eventBroadcasting(self):
"""
The L{LookAround} action broadcasts an L{events.Success} to the actor.
"""
action.LookAround().runEventTransaction(
self.context.player, u"look", {})
[event] = self.context.actor.getIntelligence().observedConcepts
self.assertIsInstance(event, events.Success)
class LookAtTests(TestCase):
"""
Tests for L{imaginary.action.LookAt}.
"""
def setUp(self):
self.context = LookContext()
def test_exitNameEventBroadcasting(self):
target = objects.Thing(
store=self.context.store,
name=u"Visible Location",
description=u"Description of visible location.",
proper=True)
objects.Container.createFor(target, capacity=1000)
objects.Exit.link(self.context.location, target, u"south")
action.LookAt().runEventTransaction(
self.context.player, u"look", {"target": u"south"})
evts = self.context.actor.getIntelligence().observedConcepts
self.assertEqual(1, len(evts))
self.assertIsInstance(evts[0], events.Success)
self.assertEqual(
dedent(u"""
[ Visible Location ]
( north )
Description of visible location.
""").lstrip(),
flatten(evts[0].actorMessage.plaintext(self.context.actor.thing)))
| {
"content_hash": "cc309baebe2d3262dd072bab776704af",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 86,
"avg_line_length": 28.560897435897434,
"alnum_prop": 0.5902816743350915,
"repo_name": "glyph/imaginary",
"id": "64883304c7847fd99388ec2272edfede04ff9035",
"size": "8911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imaginary/test/test_look.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "615840"
}
],
"symlink_target": ""
} |
from hashlib import sha1
import inspect
import re
import collections
from . import compat
def coerce_string_conf(d):
result = {}
for k, v in d.items():
if not isinstance(v, compat.string_types):
result[k] = v
continue
v = v.strip()
if re.match(r'^[-+]?\d+$', v):
result[k] = int(v)
elif re.match(r'^[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?$', v):
result[k] = float(v)
elif v.lower() in ('false', 'true'):
result[k] = v.lower() == 'true'
elif v == 'None':
result[k] = None
else:
result[k] = v
return result
class PluginLoader(object):
def __init__(self, group):
self.group = group
self.impls = {}
def load(self, name):
if name in self.impls:
return self.impls[name]()
else: # pragma NO COVERAGE
import pkg_resources
for impl in pkg_resources.iter_entry_points(
self.group, name):
self.impls[name] = impl.load
return impl.load()
else:
raise Exception(
"Can't load plugin %s %s" %
(self.group, name))
def register(self, name, modulepath, objname):
def load():
mod = __import__(modulepath, fromlist=[objname])
return getattr(mod, objname)
self.impls[name] = load
def function_key_generator(namespace, fn, to_str=compat.string_type):
"""Return a function that generates a string
key, based on a given function as well as
arguments to the returned function itself.
This is used by :meth:`.CacheRegion.cache_on_arguments`
to generate a cache key from a decorated function.
It can be replaced using the ``function_key_generator``
argument passed to :func:`.make_region`.
"""
if namespace is None:
namespace = '%s:%s' % (fn.__module__, fn.__name__)
else:
namespace = '%s:%s|%s' % (fn.__module__, fn.__name__, namespace)
args = inspect.getargspec(fn)
has_self = args[0] and args[0][0] in ('self', 'cls')
def generate_key(*args, **kw):
if kw:
raise ValueError(
"yosai_dpcache.cache's default key creation "
"function does not accept keyword arguments.")
if has_self:
args = args[1:]
return namespace + "|" + " ".join(map(to_str, args))
return generate_key
def function_multi_key_generator(namespace, fn, to_str=compat.string_type):
if namespace is None:
namespace = '%s:%s' % (fn.__module__, fn.__name__)
else:
namespace = '%s:%s|%s' % (fn.__module__, fn.__name__, namespace)
args = inspect.getargspec(fn)
has_self = args[0] and args[0][0] in ('self', 'cls')
def generate_keys(*args, **kw):
if kw:
raise ValueError(
"yosai_dpcache.cache's default key creation "
"function does not accept keyword arguments.")
if has_self:
args = args[1:]
return [namespace + "|" + key for key in map(to_str, args)]
return generate_keys
def sha1_mangle_key(key):
"""a SHA1 key mangler."""
return sha1(key).hexdigest()
def length_conditional_mangler(length, mangler):
"""a key mangler that mangles if the length of the key is
past a certain threshold.
"""
def mangle(key):
if len(key) >= length:
return mangler(key)
else:
return key
return mangle
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
def to_list(x, default=None):
"""Coerce to a list."""
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
class KeyReentrantMutex(object):
def __init__(self, key, mutex, keys):
self.key = key
self.mutex = mutex
self.keys = keys
@classmethod
def factory(cls, mutex):
# this collection holds zero or one
# thread idents as the key; a set of
# keynames held as the value.
keystore = collections.defaultdict(set)
def fac(key):
return KeyReentrantMutex(key, mutex, keystore)
return fac
def acquire(self, wait=True):
current_thread = compat.threading.current_thread().ident
keys = self.keys.get(current_thread)
if keys is not None and \
self.key not in keys:
# current lockholder, new key. add it in
keys.add(self.key)
return True
elif self.mutex.acquire(wait=wait):
# after acquire, create new set and add our key
self.keys[current_thread].add(self.key)
return True
else:
return False
def release(self):
current_thread = compat.threading.current_thread().ident
keys = self.keys.get(current_thread)
assert keys is not None, "this thread didn't do the acquire"
assert self.key in keys, "No acquire held for key '%s'" % self.key
keys.remove(self.key)
if not keys:
# when list of keys empty, remove
# the thread ident and unlock.
del self.keys[current_thread]
self.mutex.release()
| {
"content_hash": "2a3e1499a85039d6eb786f4830284e9a",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 77,
"avg_line_length": 29.184615384615384,
"alnum_prop": 0.5512212264979792,
"repo_name": "YosaiProject/yosai_dpcache",
"id": "767780e15aa5340c17c39eb6afe921c7ed1984a8",
"size": "5691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yosai_dpcache/cache/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19656"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.